爬虫三部曲

import time
import requests
# 爬虫三部曲
# 1.发送请求
def get_page(url):
response = requests.get(url)
return response
# 2.解析数据
import re
def parse_index(html):
# findall匹配所有
# re.findall('正则匹配规则','匹配文本','匹配模式')
# re.S:对全部文本进行搜索匹配
detail_urls = re.findall(
'<div class="items"><a class="imglink" href="(.*?)"',
html,re.S
)
#
# for detail_url in detail_urls:
# print(detail_url)
return detail_urls
#解析详情页
def parse_detail(html):
movie_url = re.findall('<source src="(.*?)">',html,re.S)
#print(movie_url)
if movie_url:
return movie_url[0]
#3.保护数据
import uuid
#uuid.uuid4()根据时间戳生成一段世界上唯一的字符串
def save_video(content):
with open(f'{uuid.uuid4()}.mp4','wb')as f:
f.write(content)
print('lllllll')
# main + 回车键
# 测试用例:
if __name__== '__main__':
for line in range(6):
url = f'http://www.xiaohuar.com/list-3-{line}.html'
#发送请求
response = requests.get(url)
#print(response)
#返回响应状态码
# print(response.status_code)
#print(response.text)
#解析主页面
detail_urls = parse_index(response.text)

#循环遍历详情页url
for detail_url in detail_urls:
# peint(detail_url)
#往每一个详情页发送请求
detail_res = get_page(detail_url)
# print(response.text)
#解析详情页获取视频uel
movie_url = parse_detail(detail_res.text)
#判断视频url存在则打印
if movie_url:
print(movie_url)
#往视频url发送请求获取视频二进制流
movie_res = get_page(movie_url)
save_video(movie_res.content)
上一篇:Day2 requests爬取豆瓣电影信息及selenium请求库


下一篇:爬校园网