首页 > 其他 > 详细

一个可以获取知乎timeline的爬虫

时间:2017-08-29 19:21:55      阅读:352      评论:0      收藏:0      [点我收藏+]
# -*- coding: utf-8 -*-
import requests
import lxml
import os,time
from bs4 import BeautifulSoup as sb
try:
    import cookielib

except:
    import http.cookiejar as cookielib
import json

headers = {
        "Host": "www.zhihu.com",
        "Accept-Language":"zh-CN,zh;q=0.8",
        "accept":"application/json, text/plain, */*",
        "Referer": "https://www.zhihu.com/",
        "Connection":"keep-alive",
        User-Agent: Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36,
        "authorization" : "Bearer Mi4xUXJGd0FBQUFBQUFBa0VKNTBfbnVDeGNBQUFCaEFsVk5OQmZMV1FCVnQ3aEhfeUVsUElGN1Zrd3RSSWpMdHI0ZG5B|1503889972|a235d0e24d646c5df6b1f667abc005381c273870"
    }

def get_session():
    session = requests.session()
    session.cookies = cookielib.LWPCookieJar(filename="cookies")
    try:
        session.cookies.load()
        print("cookie 加载成功!")
    except:
        print("cookie 无法加载...")
    return session

session = get_session()

data = {"action":"True",
        "limit":"10",
        "session_token":"c9c3581148b6d633275ba5d4412d3bd8",
        "action":"down",
        "after_id":"0",
        "desktop":"true"
        }

def get_data():
    res = session.get("https://www.zhihu.com/api/v3/feed/topstory", data=data, headers=headers)
    json = res.json()
    global count
    for i in json[data]:
        try:
            print(i[target][question][title])
        except:
            print(没有问题了+str(i))
        try:
            print(i[target][content])
        except:
            print(找不到答案了+str(i))
        count += 1
        print()
count = 0
for n in range(5):
    data["after_id"] = n*10
    get_data()
    time.sleep(3)


print(count)

 

一个可以获取知乎timeline的爬虫

原文:http://www.cnblogs.com/peter1994/p/7449751.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!