Python

soup = BeautifulSoup(html,"html.parser")# -- coding: utf-8 --
"""
Created on Tue Jun 8 09:55:53 2021

@author: Administrator
"""

from bs4 import BeautifulSoup
import re
import urllib.request,urllib.error
import xlwt
import sqlite3

def main():
baseurl = "https://www.sheffield.ac.uk/FRAX/tool.aspx?country=2"
#1.爬取网页
datalist = getData(baseurl)
#savepath = '.\FRAX.xls'
#2.解析数据
#3.保存数据
#saveData(datalist,savepath)

html = askURL("https://www.sheffield.ac.uk/FRAX/tool.aspx?country=2")
soup = BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_="result-content"):
    print(item)

1.爬取网页

def getData(baseurl):
datalist = []
return datalist

得到指定URL网页内容

def askURL(url):
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"
}
request = urllib.request.Request(url,headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
print(htmlsoup = BeautifulSoup(html,"html.parser")# -- coding: utf-8 --
"""
Created on Tue Jun 8 09:55:53 2021

@author: Administrator
"""

from bs4 import BeautifulSoup
import re
import urllib.request,urllib.error
import xlwt
import sqlite3

def main():
baseurl = "https://www.sheffield.ac.uk/FRAX/tool.aspx?country=2"
#1.爬取网页
datalist = getData(baseurl)
#savepath = '.\FRAX.xls'
#2.解析数据
#3.保存数据
#saveData(datalist,savepath)

html = askURL("https://www.sheffield.ac.uk/FRAX/tool.aspx?country=2")
soup = BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_="result-content"):
    print(item)

1.爬取网页

def getData(baseurl):
datalist = []
return datalist

得到指定URL网页内容

def askURL(url):
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"
}
request = urllib.request.Request(url,headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
print(html)
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
return html

3.保存数据

)
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
return html

3.保存数据

上一篇:使用原生js + css 实现一个文字轮播效果(一)


下一篇:js 递归获取多层树的某个节点