代码:
import requests
import os
from hashlib import md5
from urllib.parse import urlencode
from multiprocessing.pool import Pool GROUP_START = 1
GROUP_END = 5 def get_page(offset):
params = {
'offset': offset,
'format': 'json',
'keyword': '街拍',
'autoload': 'true',
'count': '',
'cur_tab': '',
'form': 'search_tab',
}
url = 'https://www.toutiao.com/search_content/?' + urlencode(params)
try:
response = requests.get(url)
if response.status_code == 200:
return response.json()
except requests.ConnectionError:
return None def get_images(json):
data = json.get('data')
if data:
for item in data:
image_list = item.get('image_list')
title = item.get('title')
if image_list:
for image in image_list:
# 构造一个生成器,将图片和标题一起返回
yield {
'image': image.get('url'),
'title': title
} # item就是get_image()返回的一个字典
# item里面的title创建一个文件夹
def save_image(item):
if not os.path.exists(item.get('title')):
os.mkdir(item.get('title'))
try:
local_image_url = item.get('image')
new_image_url = local_image_url.replace('list', 'large')
response = requests.get('http:' + new_image_url)
if response.status_code == 200:
file_path = '{0}/{1}.{2}'.format(item.get('title'), md5(response.content).hexdigest(), 'jpg')
# 判断路径是否存在,如果不存在,写入
if not os.path.exists(file_path):
with open(file_path, 'wb')as f:
f.write(response.content)
else:
print('Already Download', file_path)
except:
print('Failed to save image') # 定义一个offset数组,遍历,提取图片,下载
def main(offset):
json = get_page(offset)
for item in get_images(json):
print(item)
save_image(item) if __name__ == '__main__':
pool = Pool() # 创建进程池
groups = ([x * 20 for x in range(GROUP_START, GROUP_END + 1)])
pool.map(main, groups) # 第一个参数是函数,第二个参数是一个迭代器,将迭代器中的数字作为参数依次传入函数中
pool.close() # 关闭pool,使其不在接受新的(主进程)任务
pool.join() # 主进程阻塞后,让子进程继续运行完成,子进程运行完后,再把主进程全部关掉
结果:
此时可以看到文件夹里:
随便打开一个:
Successful!