因此,我的目标是解析网站中的数据并将这些数据存储在格式化为可在Excel中打开的文本文件中。 下面是代码:用BeautifulSoup解析数据并用熊猫数据存储DataFrame to_csv
from bs4 import BeautifulSoup
import requests
import pprint
import re
import pyperclip
import json
import pandas as pd
import csv
pag = range (2,126)
out_file=open('bestumbrellasoffi.txt','w',encoding='utf-8')
with open('bestumbrellasoffi.txt','w', encoding='utf-8') as file:
for x in pag:
#iterate pages
url = 'https://www.paginegialle.it/ricerca/lidi%20balneari/italia/p-
'+str(x)+'?mr=50'
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
#parse data
for i,j,k,p,z in zip(soup.find_all('span', attrs=
{'itemprop':'name'}),soup.find_all('span', attrs=
{'itemprop':'longitude'}),soup.find_all('span', attrs=
{'itemprop':'latitude'}),soup.find_all('span', attrs={'class':'street-
address'}), soup.find_all('div', attrs={'class':'tel elementPhone'})):
info=i.text,j.text,k.text,p.text,z.text
#check if data is good
print(url)
print (info)
#create dataframe
raw_data = { 'nome':[i],'longitudine':[j],'latitudine':
[k],'indirizzo':[p],'telefono':[z]}
print(raw_data)
df=pd.DataFrame(raw_data, columns =
['nome','longitudine','latitudine','indirizzo','telefono'])
df.to_csv('bestumbrellasoffi.txt')
out_file.close()
有所有这些模块,因为我做了很多尝试。 所以 打印(信息)的输出is
打印(raw_data)is
EDIT
此的输出是审查并充分发挥功能的代码。
感谢所有耐心等待!
from bs4 import BeautifulSoup
导入请求 进口pprint 进口重新 进口pyperclip 进口JSON 进口熊猫作为PD 导入CSV
PAG =范围(2126) 张开( 'bestumbrellasoffia.txt',” a',encoding ='utf-8')作为文件:
for x in pag:
#iterate pages
url = 'https://www.paginegialle.it/ricerca/lidi%20balneari/italia/p-'+str(x)+'?mr=50'
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
raw_data = { 'nome':[],'longitudine':[],'latitudine':[],'indirizzo':[],'telefono':[]}
df=pd.DataFrame(raw_data, columns = ['nome','longitudine','latitudine','indirizzo','telefono'])
#parse data
for i,j,k,p,z in zip(soup.find_all('span', attrs={'itemprop':'name'}),soup.find_all('span', attrs={'itemprop':'longitude'}),soup.find_all('span', attrs={'itemprop':'latitude'}),soup.find_all('span', attrs={'class':'street-address'}), soup.find_all('div', attrs={'class':'tel elementPhone'})):
inno=i.text.lstrip()
ye=inno.rstrip()
info=ye,j.text,k.text,p.text,z.text
#check if data is good
print(info)
#create dataframe
raw_data = { 'nome':[i],'longitudine':[j],'latitudine':[k],'indirizzo':[p],'telefono':[z]}
#try dataframe
#print(raw_data)
file.write(str(info)+"\n")
欢迎来到SO。什么是问题?请花时间阅读[问]及其中包含的链接。 – wwii
谢谢@wwii,对不起,我不清楚 –