首页 > 其他 > 详细

---------------------------

时间:2019-11-21 00:09:29      阅读:142      评论:0      收藏:0      [点我收藏+]
#获取相应内容
import requests
from bs4 import BeautifulSoup
# r=requests.get("http://www.santostang.com/")
# print("文本编码格式:",r.encoding)
# print("响应状态码:",r.status_code)
# print("字符串方式的响应体:",r.text) #打印整个html页面

#响应超时
# link="http://www.santostang.com/"
# r=requests.get(link,timeout=0.001)

#定制Requests
# key_dict={"key1":"value1","key2":"value2"}
# req=requests.get("http://httpbin.org/get",params=key_dict)
# print("URL已正确编码:",req.url)
# print("字符串方式的响应体:\n",req.text)
# req=requests.post("http://httpbin.org/get",data=key_dict)
# print("URL已正确编码:",req.url)
# print("字符串方式的响应体:\n",req.text)

# headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0",
# "Host":"www.santostang.com"
# }
# r=requests.get("http://www.santostang.com/",headers=headers)
# print("响应状态码:",r.status_code)
def get_movies():
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0",
"Host":"movie.douban.com"
}
# r=requests.get("https://movie.douban.com/top250",headers=headers)
movie_list=[]
for i in range(10):
link="https://movie.douban.com/top250?start="+str(i*25)
r=requests.get(link,headers=headers,timeout=10)
print(str(i+1),"页面响应状态码:",r.status_code)
soup=BeautifulSoup(r.text,"lxml")
div_list=soup.find_all("div",class_="hd")
for each in div_list:
movie=each.a.span.text.strip()
movie_list.append(movie)
return movie_list
movies=get_movies()
print(movies)

---------------------------

原文:https://www.cnblogs.com/momingzhong/p/11901853.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!