aiohttp、asyncio使用协程增加爬虫效率

import aiohttp
import asyncio
import time





async def get_requests(url):
    async with aiohttp.ClientSession() as session:
        async with await session.get(url) as  response:
            page_text =  await response.text()
            return page_text


def paser(task):
    result = task.result()
    #print(result)


if __name__ == '__main__':
    start = time.time()
    urls = [
        "http://www.baidu.com",
        "http://www.baidu.com",
        "http://www.baidu.com"
    ]
    tasks = []
    for url in urls:
        c = get_requests(url)
        task = asyncio.ensure_future(c)
        task.add_done_callback(paser)
        tasks.append(task)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))
    print("总耗时:",time.time()-start)

 

上一篇:aiohttp


下一篇:异步async;httpx、aiohttp