分析ajax请求抓取今日头条关键字美图

 # 目标:抓取今日头条关键字美图
# 思路:
# 一、分析目标站点
# 二、构造ajax请求,用requests请求到索引页的内容,正则+BeautifulSoup得到索引url
# 三、对索引url请求,得到图片url与标题,下载并保存到数据库,本次使用MongDB
# 四、开启循环与多进程,对多页内容遍历与抓取 #问题一、为什么要构造请求
#为什么要构造请求,举个例子,第一屏的内容我们看到的实际url是:
# http://www.toutiao.com/search_content/?offset=20&format=json&keyword=%E8%A1%97%E6%8B%8D&autoload=true&count=20&cur_tab=1
# 后面有一大串参数,这些参数就是请求的一些‘设定’,表示关键词,加载的页数,等等,是一个字典的形式,
# 如果人为去传这些数据显然十分繁琐,我们需要将这字典编码成一定格式加载请求函数里面。
import os
from json import JSONDecodeError
from multiprocessing.pool import Pool import requests
from urllib.parse import urlencode
import json
import pymongo from bs4 import BeautifulSoup from requests.exceptions import RequestException
import re
from config import * client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB] def get_index_page(offset,keyword):
data = {
'offset': offset,
'format': 'json',
'keyword': keyword,
'autoload': 'true',
'count': '',
'cur_tab': 1
}
data = urlencode(data)
url ='http://www.toutiao.com/search_content/?' + data
#print(url)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return None
except RequestException:
print('请求不到索引页面!')
return None def parse_index_page(html): #json_obj = json.dumps(html)#将Python对象序列化为json
#python_obj = json.loads(json_obj)#将json加载成Python对象
data = json.loads(html)
#在进行json操作之前有必要了解一下json是怎么操作的
if data and 'data' in data.keys():
for item in data.get('data'):
yield item.get('article_url') def get_detail_page(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return None
except RequestException:
return None def save_to_mongo(result):
if db[MONG_TABLE].insert(result):
print('存储到MongoDB成功',result)
return True
else:
return False def parse_detail_page(html,url):
soup = BeautifulSoup(html,'lxml')
title = soup.title.string
pattern = re.compile(r'var gallery = (.*?);',re.S)
result = re.findall(pattern,html)
if result:
images=[]
for i in result:
i = json.loads(i)
j = i.get("sub_images")
#print(j)
for k in j:
k = k.get('url')
images.append(k) return{
'title':title,
'url':url,
'images':images
} def download_image(result):
image_list = result.get('images')
image_title = result.get('title')
print('正在下载:%s'%image_title) if image_title not in os.listdir(path ='.'):
os.mkdir(image_title)
os.chdir(image_title)
for image in image_list:
try:
response = requests.get(image)
if response.status_code == 200:
filename = image.split('/')[-1] + '.jpg'
with open(filename,'wb') as f:
f.write(response.content)
print('正在下载:%s'%image) else:
return None
except RequestException:
return None
os.chdir(os.pardir)#返回上一级目录 def main(offset): html = get_index_page(offset,KEYWORDS)
for url in parse_index_page(html):
#print(url)
html = get_detail_page(url)
if html:
result = parse_detail_page(html,url)
if result:
#print(result)
#save_to_mongo(result)
download_image(result) if __name__ == '__main__': groups = [i*20 for i in range(GROUP_START,GROUP_END + 1)]
pool = Pool()
pool.map(main,groups)
 #对比老司机所写
import json
import os
from urllib.parse import urlencode
import pymongo
import requests
from bs4 import BeautifulSoup
from requests.exceptions import ConnectionError
import re
from multiprocessing import Pool
from hashlib import md5
from json.decoder import JSONDecodeError
from config import * client = pymongo.MongoClient(MONGO_URL, connect=False)
db = client[MONGO_DB] def get_page_index(offset, keyword):
data = {
'autoload': 'true',
'count': 20,
'cur_tab': 3,
'format': 'json',
'keyword': keyword,
'offset': offset,
}
params = urlencode(data)
base = 'http://www.toutiao.com/search_content/'
url = base + '?' + params
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
print('Error occurred')
return None def download_image(url):
print('Downloading', url)
try:
response = requests.get(url)
if response.status_code == 200:
save_image(response.content)
return None
except ConnectionError:
return None def save_image(content):
file_path = '{0}/{1}.{2}'.format(os.getcwd(), md5(content).hexdigest(), 'jpg')
print(file_path)
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(content)
f.close() def parse_page_index(text):
try:
data = json.loads(text)
if data and 'data' in data.keys():
for item in data.get('data'):
yield item.get('article_url')
except JSONDecodeError:
pass def get_page_detail(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
print('Error occurred')
return None def parse_page_detail(html, url):
soup = BeautifulSoup(html, 'lxml')
result = soup.select('title')
title = result[0].get_text() if result else ''
images_pattern = re.compile('var gallery = (.*?);', re.S)
result = re.search(images_pattern, html)
if result:
data = json.loads(result.group(1))
if data and 'sub_images' in data.keys():
sub_images = data.get('sub_images')
images = [item.get('url') for item in sub_images]
for image in images: download_image(image)
return {
'title': title,
'url': url,
'images': images
} def save_to_mongo(result):
if db[MONGO_TABLE].insert(result):
print('Successfully Saved to Mongo', result)
return True
return False def main(offset):
text = get_page_index(offset, KEYWORD)
urls = parse_page_index(text)
for url in urls:
html = get_page_detail(url)
result = parse_page_detail(html, url)
if result: save_to_mongo(result) pool = Pool()
groups = ([x * 20 for x in range(GROUP_START, GROUP_END + 1)])
pool.map(main, groups)
pool.close()
pool.join()
上一篇:python爬虫---实现项目(二) 分析Ajax请求抓取数据


下一篇:嵩天《Python数据分析与展示》实例3:Matplotlib基础图表绘制