python urllib模块

https://www.jianshu.com/p/87d1e2f875b7
https://www.cnblogs.com/melonjiang/p/5768440.html
https://blog.csdn.net/csdnzzu/article/details/80649015

目录


在python3中,urllib和urllib2进行了合并,现在只有一个urllib模块,urllib和urllib2的中的内容整合进了urllib.request,urlparse整合进了urllib.parse

urlparse

将urlstr解析成各个组件

# -*- coding:utf-8 -*-
import urllib.request
import urllib.parse
url = "http://www.baidu.com"
parsed = urllib.parse.urlparse(url)
print(parsed)
#输出:ParseResult(scheme='http', netloc='www.baidu.com', path='', params='', query='', fragment='')

可以通过元组索引的方式获取的 URL 地址的六个部分为:方案 (scheme) ,网址 (network location) ,路径 (path) ,路径段 (path segment) 参数(用分号与路径隔开),查询字符串 (query) 和片段 (fragment) 。

urljoin

urljoin(baseurl,newurl,allowFrag=None)  将url的根域名和新url拼合成一个完整的url

import urllib.parse
url = "http://www.baidu.com"
new_path = urllib.parse.urljoin(url,"index.html")
print(new_path)
#输出:http://www.baidu.com/index.html

urlopen

urlopen(url,data,timeout)  打开一个url的方法,返回一个文件对象,然后可以进行类似文件对象的操作

import urllib.request
req = urllib.request.urlopen('http://www.baidu.com')
print(req.read())

read() , readline() , readlines() , fileno() , close()

info():返回一个httplib.HTTPMessage 对象,表示远程服务器返回的头信息。

getcode():返回Http状态码,如果是http请求,200表示请求成功完成;404表示网址未找到。

geturl():返回请求的url。

urlretrieve

urlretrieve(url,filename,reporthook,data)  下载url定位到的html文件,不写路径filename则会被存为临时文件可以用 urllib.urlcleanup() 来清理缓存

file_name = urllib.request.urlretrieve('http://www.baidu.com','%s/baidu.html'%BASE_DIR)

urlencode

urlencode()   将dict中的键值对以连接符&划分

import urllib.parse
dic = {'name':'melon','age':18}
data = urllib.parse.urlencode(dic)

print(data)     #age=18&name=melon

用法

# request:GET
import urllib.request
response = urllib.request.urlopen('http://www.baidu.com')
print(response.read().decode('utf-8'))

# request: POST
# http测试:http://httpbin.org/
import urllib.parse
import urllib.request
data = bytes(urllib.parse.urlencode({'word':'hello'}),encoding='utf8')
response = urllib.request.urlopen('http://httpbin.org/post',data=data)
print(response.read())

# 超时设置
import urllib.request
response = urllib.request.urlopen('http://httpbin.org/get',timeout=1)
print(response.read())

import socket
import urllib.request
import urllib.error

try:
    response = urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
except urllib.error.URLError as e:
    if isinstance(e.reason,socket.timeout):
        print('TIME OUT')

响应

# 响应类型
import urllib.open
response = urllib.request.urlopen('https:///www.python.org')
print(type(response))
# 状态码, 响应头
import urllib.request
response = urllib.request.urlopen('https://www.python.org')
print(response.status)
print(response.getheaders())
print(response.getheader('Server'))

Request

# 简单例子
import urllib.request
request = urllib.request.Requests('https://python.org')
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))

# 增加header
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
    'Host':'httpbin.org'
}
# 构造POST表格
dict = {
    'name':'Germey'
}
data = bytes(parse.urlencode(dict),encoding='utf8')
req = request.Request(url=url,data=data,headers=headers,method='POST')
response = request.urlopen(req)
print(response.read()).decode('utf-8')

代理

import urllib.request
proxy_handler = urllib.request.ProxyHandler({
    'http':'http://127.0.0.1:9743'
    'https':'https://127.0.0.1.9743'
})
opener = urllib.request.build_openner(proxy_handler)
response = opener.open('http://www.baidu.com')
print(response.read())

Cookie

import http.cookiejar, urllib.request

cookie = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open("http://www.baidu.com")
for item in cookie:
    print(item.name+"="+item.value)

# 保存cooki为文本
import http.cookiejar, urllib.request
filename = "cookie.txt"
# 保存类型有很多种
## 类型1
cookie = http.cookiejar.MozillaCookieJar(filename)
## 类型2
cookie = http.cookiejar.LWPCookieJar(filename)

handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open("http://www.baidu.com")

# 使用相应的方法读取
import http.cookiejar, urllib.request
cookie = http.cookiejar.LWPCookieJar()
cookie.load('cookie.txt',ignore_discard=True,ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
response = opener.open("http://www.baidu.com")

异常处理

# 访问不存在的页面
from urllib import request, error
try:
    response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.URLError as e:
    print(e.reason)

# 先捕获子类错误
from urllib imort request, error
try:
    response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e:
    print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e:
    print(e.reason)
else:
    print("Request Successfully')
# 判断原因
import socket
import urllib.request
import urllib.error

try:
    response = urllib.request.urlopen('http://httpbin.org/get',timeout=0.1)
except urllib.error.URLError as e:
    if isinstance(e.reason,socket.timeout):
        print('TIME OUT')

提交json数据

import json
import urllib.request

raw = {
    "data": {
        "code": "xxxxx",
        "password": "xxxxx",
        "err": ""
    }
}

data = json.dumps(raw)
data = bytes(data, 'utf8')
request = urllib.request.Request('http://xxxxxxxxxxxxxx/login/xxxx', data=data)
response = urllib.request.urlopen(request)
html = response.read().decode('utf-8')
print(html)
上一篇:python爬虫第1练url lib


下一篇:使用python爬取图片(爬取百度图片为例)