首页 > 其他 > 详细

爬取豌豆荚

时间:2019-07-03 21:41:03      阅读:106      评论:0      收藏:0      [点我收藏+]
from bs4 import BeautifulSoup
import requests


# 请求url https://www.wandoujia.com/category/6001
# 请求方式: get

def have_title(tag):
if tag.name == ‘span‘ and tag.has_attr("title"):
return tag


# 获取网页
def get_page(url):
index_res = requests.get(url)
return index_res


# 解析网页
def parse_detail(html):
soup = BeautifulSoup(html, ‘lxml‘)
list = soup.find_all(name=‘li‘, class_=‘card‘)

data = ""
for i in list:
app_name = i.a.img.attrs[‘alt‘]
detail_url = i.a.attrs[‘href‘]
download_num = i.find(name=‘div‘, class_=‘meta‘).find(class_=‘install-count‘).text
app_size = i.find(name=‘div‘, class_=‘meta‘).find(have_title).text
data += f"""
名称 : {app_name}
详情页url : {detail_url}
下载人数 : {download_num}
app大小 : {app_size}

"""
return data


# 保存数据
def save_games(data):
with open(‘games.txt‘, ‘w‘, encoding=‘utf-8‘) as f:
f.write(data)


if __name__ == ‘__main__‘:
url = ‘https://www.wandoujia.com/category/6001‘
index_res = requests.get(url)
index_detail = index_res.text
data = parse_detail(index_detail)
save_games(data)

爬取豌豆荚

原文:https://www.cnblogs.com/ywlhxr/p/11129202.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!