Python爬虫: "追新番"网站资源链接爬取

“追新番”网站

追新番网站提供最新的日剧和日影下载地址,更新比较快。

个人比较喜欢看日剧,因此想着通过爬取该网站,做一个资源地图

可以查看网站到底有哪些日剧,并且随时可以下载。

资源地图

爬取的资源地图如下:

在linux系统上通过 ls | grep keywords 可以轻松找到想要的资源(windows直接搜索就行啦)

Python爬虫: "追新番"网站资源链接爬取

爬取脚本开发

1. 确定爬取策略

进入多个日剧,可以查看到每个剧的网址都是如下形式:

Python爬虫: "追新番"网站资源链接爬取

可以看出,每个日剧网页都对应一个编号。

因此我们可以通过遍历编号来爬取。

2. 获取日剧的名字

打开其中一个日剧的网页,查看标题的源代码如下:

Python爬虫: "追新番"网站资源链接爬取

可以看到,标题的标签ID为"pdtname", 我们只要获取该标签的文本即可获取日剧名字

通过beautifulSoup的接口,获取该标签内容(去除了名字中多余东西)

     # try get tv name
     tag_name = soup.find(id='pdtname')
     if None == tag_name:
         print('tv_{:0>4d}: not exist.'.format(num))
         return None

     # remove signs not need
     name = tag_name.get_text().replace(' ', '')
     try:
         name = name.replace(re.search('【.*】', name).group(0), '')
         name = name.replace(re.search('\(.*\)', name).group(0), '')
         name = name.replace('《', '')
         name = name.replace('》', '')
         name = name.replace('/', '')
     except :
         pass

3. 获取资源链接

在每个日剧页面中同时也包含了资源链接的地址,查看源代码如下:

Python爬虫: "追新番"网站资源链接爬取

可以看到资源链接使用了一个表块,并且表块的ID为"ajax_tbody"

其中每一集都是表的行元素,每一行又包含了几列来显示资源的各个信息

我们通过遍历表的元素来获取每一集的资源链接

    # try get tv resources list
    tag_resources = soup.find(id='ajax_tbody')
    if None == tag_resources:
        print('tv_{:0>4d}: has no resources.'.format(num))
        return None

    # walk resources
    for res in tag_resources.find_all('tr'):

        # get link tag
        tag_a = res.find('a')
        info = res.find_all('td')
        print('resource: ', tag_a.get_text())

        # get download link
        downlink = get_resources_link(session, tag_a.get('href'))

        # record resouces
        tv.resources.append([tag_a.get_text(), info[2].get_text(), downlink, ''])
        delay(1)

4. 获取下载链接

点击其中一个资源,进入下载链接页面,查看源代码如下

Python爬虫: "追新番"网站资源链接爬取

可以看到电驴的下载链接标签ID为"emule_url",因此我们只需要获取该标签的文本就可以了(磁力链接类似)

不过首先我们还需要先获取该下载页面,整体操作代码如下

def get_resources_link(session, url):
    ''' get tv resources download link  '''

    global domain
    res_url = domain + url

    # open resources page
    resp = session.get(res_url, timeout = 10)
    resp.raise_for_status()

    soup = page_decode(resp.content, resp.encoding)

    tag_emule = soup.find(id='emule_url')
    return tag_emule.get_text() if tag_emule != None else ''

5. 将资源下载链接保存到本地

其中,由于爬取所有日剧的下载链接比较耗时,前面做了判断可以只爬取标题,日后根据序号再爬取下载链接

def save_tv(tv):
    ''' save tv infomation on disk '''

    filename = os.path.join(os.path.abspath(save_dir), '{:0>4d}_{}.txt'.format(tv.num, tv.name))

    global only_catalog
    if only_catalog == True:
        with open(filename, 'a+') as f:
            pass
    else:
        with open(filename, 'w') as f:
            for info in tv.resources:
                f.write(os.linesep.join(info))
                f.write('========' + os.linesep)

以上,就是整个爬取脚本的开发过程。

欢迎关注我的代码仓库: https://gitee.com/github-18274965/Python-Spider

以后还会开发其余网站的爬取脚本。

附录

整体代码:

 #!/usr/bin/python3
 # -*- coding:utf-8 -*-

 import os
 import sys
 import re
 import requests
 from bs4 import BeautifulSoup
 from time import sleep

 # website domain
 domain = 'http://www.zhuixinfan.com/'

 # spide infomation save directory
 save_dir = './tvinfo/'

 # only tv catalog
 only_catalog = False

 class TVInfo:
     ''' TV infomation class'''

     def __init__(self, num, name):
         self.num = num
         self.name = name
         self.resources = []

 def delay(seconds):
     ''' sleep for secondes '''

     while seconds > 0:
         sleep(1)
         seconds = seconds - 1

 def page_decode(content, encoding):
     ''' decode page '''

     # lxml may failed, then try html.parser
     try:
         soup = BeautifulSoup(content, 'lxml', from_encoding=encoding)
     except:
         soup = BeautifulSoup(content, 'html.parser', from_encoding=encoding)

     return soup

 def open_home_page(session):
     ''' open home page first as humain being '''

     global domain
     home_url = domain + 'main.php'

     # open home page
     resp = session.get(home_url, timeout = 10)
     resp.raise_for_status()

     # do nothing

 def get_resources_link(session, url):
     ''' get tv resources download link  '''

     global domain
     res_url = domain + url

     # open resources page
     resp = session.get(res_url, timeout = 10)
     resp.raise_for_status()

     soup = page_decode(resp.content, resp.encoding)

     tag_emule = soup.find(id='emule_url')
     return tag_emule.get_text() if tag_emule != None else ''

 def spider_tv(session, num):
     ''' fetch tv infomaion '''

     global domain
     tv_url = domain + 'viewtvplay-{}.html'.format(num)

     # open tv infomation page
     resp = session.get(tv_url, timeout = 10)
     resp.raise_for_status()

     soup = page_decode(resp.content, resp.encoding)

     # try get tv name
     tag_name = soup.find(id='pdtname')
     if None == tag_name:
         print('tv_{:0>4d}: not exist.'.format(num))
         return None

     # try get tv resources list
     tag_resources = soup.find(id='ajax_tbody')
     if None == tag_resources:
         print('tv_{:0>4d}: has no resources.'.format(num))
         return None

     # remove signs not need
     name = tag_name.get_text().replace(' ', '')
     try:
         name = name.replace(re.search('【.*】', name).group(0), '')
         name = name.replace(re.search('\(.*\)', name).group(0), '')
         name = name.replace('《', '')
         name = name.replace('》', '')
         name = name.replace('/', '')
     except :
         pass

     print('tv_{:0>4d}: {}'.format(num, name))

     tv = TVInfo(num, name)

     global only_catalog
     if only_catalog == True:
         return tv

     # walk resources
     for res in tag_resources.find_all('tr'):

         # get link tag
         tag_a = res.find('a')
         info = res.find_all('td')
         print('resource: ', tag_a.get_text())

         # get download link
         downlink = get_resources_link(session, tag_a.get('href'))

         # record resouces
         tv.resources.append([tag_a.get_text(), info[2].get_text(), downlink, ''])
         delay(1)

     return tv

 def save_tv(tv):
     ''' save tv infomation on disk '''

     filename = os.path.join(os.path.abspath(save_dir), '{:0>4d}_{}.txt'.format(tv.num, tv.name)) 

     global only_catalog
     if only_catalog == True:
         with open(filename, 'a+') as f:
             pass
     else:
         with open(filename, 'w') as f:
             for info in tv.resources:
                 f.write(os.linesep.join(info))
                 f.write('========' + os.linesep)

 def main():

     start = 1
     end = 999

     if len(sys.argv) > 1:
         start = int(sys.argv[1])

     if len(sys.argv) > 2:
         end = int(sys.argv[2])

     global only_catalog
     s = input("Only catalog ?[y/N] ")
     if s == 'y' or s == 'Y':
         only_catalog = True

     # headers: firefox_58 on ubuntu
     headers = {
         'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0)'
                 + ' Gecko/20100101 Firefox/58.0',
         'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
         'Accept-Language': 'zh-CN,en-US;q=0.7,en;q=0.3',
         'Accept-Encoding': 'gzip, deflate',
         }

     # create spider session
     with requests.Session() as s:

         try:
             s.headers.update(headers)
             open_home_page(s)
             for num in range(start, end+1):
                 delay(3)
                 tv = spider_tv(s, num)
                 if tv != None:
                     save_tv(tv)

         except Exception as err:
             print(err)
             exit(-1)

 if __name__ == '__main__':
     main()
上一篇:解密httpclient,dbcp,jedis,c3p0,druid,okhttp都在使用的连接池技术


下一篇:XML语法随记