实战案例:抽屉自动点赞与爬取汽车之家新闻

05.实战案例:抽屉自动点赞与爬取汽车之家新闻

文章目录

一、抽屉自动点赞

import requests

# data = {
#     'linkId': '31009758',
#
# }

data = {
    'content': '其实一般',
    'linkId': '31008563',
    'parentId': '0',
    'pictureUrl': ''
}
header = {
    'Referer': 'https://dig.chouti.com/',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
    'Cookie': 'deviceId=web.eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJqaWQiOiIwZmVlMjk5OS1iMDgzLTRmYzctOTM4MC03YjIzZmVmY2U5YmYiLCJleHBpcmUiOiIxNjIzOTA0ODk5MzM5In0.7cadtBYznS6OgnLwEF8aH0AmtDOoYB1WKDgdU4eYYS0; __snaker__id=VbChmBUEZIVY3FPa; _9755xjdesxxd_=32; YD00000980905869%3AWM_TID=%2FazmF9%2FrClJFEVFBVRN70z7msH6De39Y; YD00000980905869%3AWM_NI=fmln0UTLoOM0bJxRYMet9SoHoQFrKUG7angbfEmftGxseQnkMmbwsdEPNwgtVpQ9K0fqli5fhP6nKsZ15bIt%2BQYBdpjdM8x19UJqjf6LSi%2FmhSgQW%2F3SYGNWEwJPPlYGRWM%3D; YD00000980905869%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eeb5d567838e8fa6f94dbaef8eb7d54a938e8b85f83bf88a97a2e464a98689afaa2af0fea7c3b92aa6b3a48fb35f9894a1b0d03ca296b8b3dc47a7acf7b4ee44ad8f8a93ca5f85e9af8fe66aa69ba387f74dbcadabb2ed618fb3ae98f27087908298e68096b09fdaca3ca6afa48ab86eac90fa8fca799aeffb83cc80e98f97a3e77caabc83d9fb3bfb8b8692e96ef6949d8aae67ac8da9b2d625f18d97a8cd5d87a986b1d3689b999eb8d037e2a3; Hm_lvt_03b2668f8e8699e91d479d62bc7630f1=1621312902,1621392225; gdxidpyhxdE=weRAWhzVrJfrCGllI4mwY8LxZOiO4D79t%2Fkf8j8qcJUsTDrjyVh05GQiaf6uL8dwsXpkShI%2B2uGHa9Vj5b1QilxdgI%2BoDUr%5C0VN4kMrnVLUmzGb56lwmZRoAmUq%2FToGtCRjYKAaANejzA%5CQcWg4LwkrdXzwqNISMTfwQUaMw4puru4fM%3A1621393127138; token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJqaWQiOiJjZHVfNTMyMDcwNzg0NjAiLCJleHBpcmUiOiIxNjIzOTg0MjcwNDA0In0.4Q4uQAd4LkbVVcu37t0SjRFE4CSIidduRspeQ08-iYE; Hm_lpvt_03b2668f8e8699e91d479d62bc7630f1=1621392374'
}
# res = requests.post('https://dig.chouti.com/link/vote', data=data,headers=header)
res = requests.post('https://dig.chouti.com/comments/create', data=data, headers=header)
print(res.text)

二、爬取汽车之家

# 爬取汽车之间
# 使用bs4模块:beautifulsoup4:专门用来解析html的模块
import requests
from bs4 import BeautifulSoup

res = requests.get('https://www.autohome.com.cn/news/1/#liststart')
# print(res.text)

# 第一个参数是要解析的文档
# 第二参数是使用的解析库,解析方式
# html.parser  解析速度慢    不需要额外安装
# lxml         解析速度快,文档容错率高,需要额外安装lxml模块
soup = BeautifulSoup(res.text, 'html.parser')
# 查找文档中的所有类名为article的ul标签
ul_list = soup.find_all(name='ul', class_='article')
li_list = []
for ul in ul_list:
    li_list += ul.find_all(name='li')

for li in li_list:
    h3 = li.find(name='h3')
    if h3:
        title = h3.text  # 获取文本内容

        url = 'http:' + li.find(name='a')['href']

        desc = li.find(name='p').text

        img = 'http:' + li.find(name='img')['src']

        print('''
        文章标题:%s
        文章地址:%s
        文章图片:%s
        文章摘要:%s
        ''' % (title, url, img, desc))
        # 存到mysql中:
        # 存redis  articles=[{json格式字符串},{}]
        #         article ={'1':{json格式字符串},}

上一篇:window api requestAnimation


下一篇:JS-jQuery