python抓取wallhaven首页壁纸
"""
下载wallheaven首页图片,保存在同级文件夹image中
"""
from bs4 import BeautifulSoup
import requests
import re
import os
"""
图片下载函数
Pic_url为图片的url
save_path为图片保存路径
Headers为请求头,有默认参数
"""
# noinspection PyDefaultArgument
def DownloadImage(Pic_url, Save_path, Headers={
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/83.0.4103.61 Safari/537.36 '
}):
if not os.path.exists(Save_path):
os.mkdir(Save_path)
pic_name = pic_url[-20:]
pic = requests.get(url=Pic_url, headers=Headers)
with open(Save_path + pic_name, 'wb') as f:
for chunk in pic.iter_content():
f.write(chunk)
print("图片已保存")
if __name__ == '__main__':
# wallhaven的网址
url = "https://wallhaven.cc/"
# 请求头,用于通过wallhaven的检查,wallhaven不支持爬虫爬取
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/83.0.4103.61 Safari/537.36 '
}
# 获取html文件
response = requests.get(url=url, headers=headers)
html = BeautifulSoup(response.text, 'html5lib')
# 获取每一张图片的高清图片链接,存入列表。由于从上一个页面获取到的链接是打开图片详情页的链接,所以在这再次发起请求。
pic_page_url_list = []
for item in html.find_all(name='a', href=re.compile("https://wallhaven.cc/w/")):
pic_page_url_list.append(item.get("href"))
# 逐一下载列表里面的图片
for pic_page_url in pic_page_url_list:
pic_response = requests.get(url=pic_page_url, headers=headers)
pic_html = BeautifulSoup(pic_response.text, 'html5lib')
pic_url = pic_html.select('#wallpaper')[0]["src"]
if pic_url:
print(pic_url)
DownloadImage(pic_url, 'image/', headers)
else:
print("No url")