0.从新闻url获取点击次数,并整理成函数
|
1
2
3
4
5
6
7
8
9
10
11
12
13
|
# 获取点击次数def clickCount(url): newsId = re.search(‘/(\d+).html‘, url).groups(0)[0] timeUrl=‘http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80‘.format(newsId) clickTime=re.findall("\d+",requests.get(timeUrl).text.split(‘;‘)[3])[0] return clickTime#获取新闻时间def newsDateTime(head): date=head[0][5:] time=head[1] format=‘%Y-%m-%d %H:%M:%S‘ return datetime.strptime(date+" "+time,format) |
1.从新闻url获取新闻详情: 字典,anews
|
1
2
3
4
5
6
7
8
9
10
11
12
13
|
#获取新闻信息def anews(url): newsDetail={} get=requests.get(url) get.encoding=‘utf-8‘ soup=BeautifulSoup(get.text,‘html.parser‘) newsDetail[‘title‘]=soup.select(‘.show-title‘)[0].text; # 新闻题目 head=soup.select(‘.show-info‘)[0].text.split() newsDetail[‘datetime‘]=newsDateTime(head) # 新闻时间 newsDetail[‘clickTime‘]=clickCount(url) # 点击次数 newsDetail[‘content‘] = soup.select(‘.show-content‘)[0].text # 点击内容 newsDetail[‘url‘]=url return newsDetail |
2.从列表页的url获取新闻url:列表append(字典) alist
|
1
2
3
4
5
6
7
8
9
10
11
|
#获取新闻列表页中的新闻urldef alist(listUrl): get=requests.get(listUrl) get.encoding=‘utf-8‘ soup=BeautifulSoup(get.text,‘html.parser‘) newsList=[] for news in soup.select(‘li‘): if len(news.select(‘.news-list-title‘))>0: newsUrl=news.select(‘a‘)[0][‘href‘] newsList.append(newsUrl) return newsList |
3.生成所页列表页的url并获取全部新闻 :列表extend(列表) allnews
*每个同学爬学号尾数开始的10个列表页
|
1
2
3
4
5
6
7
|
#爬取64至74页的数据url=[]for i in range(64,74): url.extend(alist(‘http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html‘.format(i)))allnews=[];for i in url: allnews.append(anews(i)) |
4.设置合理的爬取间隔
import time
import random
time.sleep(random.random()*3)
|
1
2
3
4
|
#设置合理的爬取间隔for i in range(5): time.sleep(random.random()*3) print(newsdf) |
5.用pandas做简单的数据处理并保存
保存到csv或excel文件
newsdf.to_csv(r‘F:\duym\爬虫\gzccnews.csv‘)
|
1
2
3
4
|
#保存文件pd.Series(allnews)newsdf=pd.DataFrame(allnews)newsdf.to_csv(‘news.csv‘,encoding=‘utf-8‘) |
运行截图:

6.完整代码
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
|
import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport sqlite3import pandas as pdimport timeimport pandasimport random# 获取点击次数def clickCount(url): newsId = re.search(‘/(\d+).html‘, url).groups(0)[0] timeUrl=‘http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80‘.format(newsId) clickTime=re.findall("\d+",requests.get(timeUrl).text.split(‘;‘)[3])[0] return clickTime#获取新闻时间def newsDateTime(head): date=head[0][5:] time=head[1] format=‘%Y-%m-%d %H:%M:%S‘ return datetime.strptime(date+" "+time,format)#获取新闻信息def anews(url): newsDetail={} get=requests.get(url) get.encoding=‘utf-8‘ soup=BeautifulSoup(get.text,‘html.parser‘) newsDetail[‘title‘]=soup.select(‘.show-title‘)[0].text; # 新闻题目 head=soup.select(‘.show-info‘)[0].text.split() newsDetail[‘datetime‘]=newsDateTime(head) # 新闻时间 newsDetail[‘clickTime‘]=clickCount(url) # 点击次数 newsDetail[‘content‘] = soup.select(‘.show-content‘)[0].text # 点击内容 newsDetail[‘url‘]=url return newsDetail#获取新闻列表页中的新闻urldef alist(listUrl): get=requests.get(listUrl) get.encoding=‘utf-8‘ soup=BeautifulSoup(get.text,‘html.parser‘) newsList=[] for news in soup.select(‘li‘): if len(news.select(‘.news-list-title‘))>0: newsUrl=news.select(‘a‘)[0][‘href‘] newsList.append(newsUrl) return newsList#爬取64至74页的数据url=[]for i in range(64,74): url.extend(alist(‘http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html‘.format(i)))allnews=[];for i in url: allnews.append(anews(i))#保存文件pd.Series(allnews)newsdf=pd.DataFrame(allnews)newsdf.to_csv(‘news.csv‘,encoding=‘utf-8‘)#设置合理的爬取间隔for i in range(5): time.sleep(random.random()*3) print(newsdf) |
原文:https://www.cnblogs.com/GMUK/p/10713523.html