import sys
from bs4 import BeautifulSoup # 网页解析,获取数据
import re # 正则表达式,进行文字匹配
import urllib # 制定url,获取网页数据
import urllib.request
import xlwt # 进行excel操作
import sqlite3 # 进行数据库操作
def main():
# 爬取网页
# 解析数据
# 保存数据
baseurl = "https://movie.douban.com/top250?start="
# 保存在当前目录(1.一个/;2.\\,两个单斜杠是说其中一个为转义字符;3.r".\")
savepath = ".\\豆瓣电影top250.xls"
datalist = getdata(baseurl)
# 保存数据
savedata(datalist, savepath)
#html = askurl(baseurl)
#return html
# 影片链接
findlink = re.compile(r'<a href="(.*?)">') # 创建正则表达式对象,表示规则(字符串模式)
# 影片图片
findimgsrc = re.compile(r'<img.*src="(.*?)"', re.S) # re.S让换行符包含在字符中
# 影片title
findtitle = re.compile(r'<span class="title">(.*)</span>')
# 影片评分
findrating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
# 评价人数
findjudge = re.compile(r'<span>(\d*)人评价</span>')
# 找到概况
finddes = re.compile(r'<span class="inq">(.*)</span>')
# 找到影片的相关内容
findcon = re.compile(r'<p class="">(.*?)</p>',re.S)
# 爬取网页
def getdata(baseurl):
datalist = []
for i in range(0, 10): # 爬取的页数,10页
url = baseurl + str(i * 25)
html = askurl(url)
# 开始逐一解析
soup = BeautifulSoup(html, 'html.parser')
for item in soup.find_all('div', class_='item'): # 查找符合要求的字符串,形成列表
#print(item)
#break
data = [] # 保存一部电影的所有信息
item = str(item)
Link = re.findall(findlink, item)[0] # re用来通过正则表达式查找指定字符串
data.append(Link)
Imgsrc = re.findall(findimgsrc, item)[0]
data.append(Imgsrc)
Title = re.findall(findtitle, item)
if len(Title) == 2:
ctitle = Title[0]
data.append(ctitle)
etitle = Title[1].replace('/', '')
data.append(etitle)
else:
data.append(Title)
data.append(' ')
Rating = re.findall(findrating, item)[0]
data.append(Rating)
Judge = re.findall(findjudge, item)[0]
data.append(Judge)
Des = re.findall(finddes, item)
if len(Des) != 0:
Des = Des[0].replace('。', '')
data.append(Des)
else:
Des = ' '
data.append(Des)
Con = re.findall(findcon, item)[0]
Con = re.sub(r'<br(\s+)?/>(\s+)?',' ',Con) # 替换<br/>
Con = re.sub('/', ' ', Con) # 替换/
data.append(Con.strip()) # 去掉前后的空格
print(data)
datalist.append(data)
#print(link)
print(datalist)
return datalist
# 保存数据
def savedata(datalist, savepath):
book = xlwt.Workbook(encoding='utf-8', style_compression=0) # 创建workbook对象
sheet = book.add_sheet('豆瓣电影250', cell_overwrite_ok=True) # 创建工作表
col = ("影片链接", "影片图片", "影片中文名字", "影片英文名字", "影片评分", "影片评价人数", "影片概况", "影片相关内容")
for i in range(0, 8):
sheet.write(0, i, col[i])
for i in range(0, 250):
data = datalist[i]
for j in range(0, 8):
sheet.write(i+1, j, data[j])
book.save(savepath)
# 得到指定一个url的网页内容
def askurl(url):
# headers为F12中请求头的User-Agent
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36 Edg/96.0.1054.62"
} # 伪装告诉网站我是一个浏览器,不是一个爬虫,如果想伪装的更好,可以在headers中加入更多有关请求头的键值对即可
html = ""
res = urllib.request.Request(url=url, headers=headers)
try:
# 获取响应信息
response = urllib.request.urlopen(res)
html = response.read().decode("utf-8")
#print(html)
except urllib.error.URLError as e:
print("请求出错")
if hasattr(e, "code"):
print(e, "code")
if hasattr(e, "reason"):
print(e, "reason")
return html
if __name__ == "__main__":
html = main()
#bs = BeautifulSoup(html, "html.parser")
#print(type(bs.title.string))