首页 > 其他 > 详细

代理操作

时间:2019-10-10 20:00:25      阅读:125      评论:0      收藏:0      [点我收藏+]

代理操作

概念:

  代理服务器

代理的作用

  请求和响应的转发(拦截请求和响应)

代理和爬虫之间的关联是什么?

可以基于代理实现更换爬虫程序请求的ip地址

代理ip的网站

西祠
快代理
www.goubanjia.com
代理精灵:http://http.zhiliandaili.cn/

代理的匿名度

高匿  匿名  透明

类型

http https


代理ip爬取实例1

get或post 都是proxies

proxies={‘协议’:ip端口号}

技术分享图片
import requests
headers = {
    User-Agent:Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36,
    Connection:close
}
url = https://www.baidu.com/s?ie=UTF-8&wd=ip
page_text = requests.get(url,headers=headers,proxies={https:125.87.99.237:22007}).text
with open(./ip.html,w,encoding=utf-8) as fp:
fp.write(page_text)
View Code

 

代理ip爬取实例2

‘Connection‘:‘close‘   # 防止卡顿

random.choice

在xpath表达式中不可以出现tbody标签

技术分享图片
import requests
from lxml import etree
headers = {
    User-Agent:Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36,
    Connection:close   # 防止卡顿
}

#构建一个付费的代理池
import random
ips_pool = []
url = http://ip.11jsq.com/index.php/api/entry?method=proxyServer.generate_api_url&packid=1&fa=0&fetch_key=&groupid=0&qty=103&time=1&pro=&city=&port=1&format=html&ss=5&css=&dt=1&specialTxt=3&specialJson=&usertype=2
page_text = requests.get(url,headers=headers).text
tree = etree.HTML(page_text)
ip_list = tree.xpath(//body//text())
for ip in ip_list:
    dic = {https:ip}
    ips_pool.append(dic)

url = https://www.xicidaili.com/nn/%d #通用的url模板(不可变)
all_ips = []
for page in range(1,5):
    new_url = format(url%page)
    page_text = requests.get(new_url,headers=headers,proxies=random.choice(ips_pool)).text
    tree = etree.HTML(page_text)
    #在xpath表达式中不可以出现tbody标签
    tr_list = tree.xpath(//*[@id="ip_list"]//tr)[1:]
    for tr in tr_list:
        ip = tr.xpath(./td[2]/text())[0]
        port = tr.xpath(./td[3]/text())[0]
        type_ip = tr.xpath(./td[6]/text())[0]
        dic = {
            ip:ip,
            port:port,
            type:type_ip
        }
        all_ips.append(dic)
                
print(len(all_ips))
View Code

 

cookie操作

爬虫中处理cookie的操作:

  手动处理:将cookie写在headers中

  自动处理:session对象。

    获取session对象:requests.Session()

    作用

      session对象和requests对象都可以对指定的url进行请求发送。只不过使用session进行请求发送的过程中如果产生了cookie则cookie会被自动存储在session对象中。

 

import requests

#基于cookie操作的修正
session = requests.Session() #获取saession
url = https://xueqiu.com/v4/statuses/public_timeline_by_category.json?since_id=-1&max_id=20352188&count=15&category=-1
#将cookie存储到session中,目的是将cookie获取存储到session中
session.get(https://xueqiu.com/,headers=headers) # 有cookie的页面

#保证该次请求时携带对应的cookie才可以请求成功
news_json = session.get(url,headers=headers).json()
news_json

 

验证码的识别

使用线上的打码平台进行自动的识别
  云打码
  超级鹰
注册《用户中心》身份的账户
登陆
  创建一个软件
  下载示例代码《开发文档》
技术分享图片
import requests
from hashlib import md5

class Chaojiying_Client(object):

    def __init__(self, username, password, soft_id):
        self.username = username
        password =  password.encode(utf8)
        self.password = md5(password).hexdigest()
        self.soft_id = soft_id
        self.base_params = {
            user: self.username,
            pass2: self.password,
            softid: self.soft_id,
        }
        self.headers = {
            Connection: Keep-Alive,
            User-Agent: Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0),
        }

    def PostPic(self, im, codetype):
        """
        im: 图片字节
        codetype: 题目类型 参考 http://www.chaojiying.com/price.html
        """
        params = {
            codetype: codetype,
        }
        params.update(self.base_params)
        files = {userfile: (ccc.jpg, im)}
        r = requests.post(http://upload.chaojiying.net/Upload/Processing.php, data=params, files=files, headers=self.headers)
        return r.json()

    def ReportError(self, im_id):
        """
        im_id:报错题目的图片ID
        """
        params = {
            id: im_id,
        }
        params.update(self.base_params)
        r = requests.post(http://upload.chaojiying.net/Upload/ReportError.php, data=params, headers=self.headers)
        return r.json()


chaojiying = Chaojiying_Client(bobo328410948, bobo328410948, 899370)    #用户中心>>软件ID 生成一个替换 96001
im = open(a.jpg, rb).read()                                                    #本地图片文件路径 来替换 a.jpg 有时WIN系统须要//
print(chaojiying.PostPic(im,1004)[pic_str])    

#验证码识别函数的封装
def transformCode(imgPath,imgType):
    chaojiying = Chaojiying_Client(bobo328410948, bobo328410948, 899370)
    im = open(imgPath, rb).read()
    return chaojiying.PostPic(im,imgType)[pic_str]

# 模拟登陆
from urllib import request
#验证码的识别:将验证码下载到本地然后提交给打吗平台进行识别
main_url = https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx
page_text = requests.get(main_url,headers=headers).text
tree = etree.HTML(page_text)
code_src = https://so.gushiwen.org+tree.xpath(//*[@id="imgCode"]/@src)[0]
request.urlretrieve(code_src,./code.jpg)

#识别验证码
code_text = transformCode(./code.jpg,1004)


login_url = https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx
data = {
    __VIEWSTATE: 8/BKAQBaZHn7+GP+Kl2Gx43fFO1NI32RMyVae0RyrtFQue3IAhzQKvkml41cIT42Y//OcQccA8AqGYkvB+NFkU43uaHqU69Y0Z1WT3ZRrr4vR+CF7JlBG29POXM=,
    __VIEWSTATEGENERATOR: C93BE1AE,
    from: http://so.gushiwen.org/user/collect.aspx,
    email: www.zhangbowudi@qq.com,
    pwd: bobo328410948,
    code: code_text,
    denglu: 登录,
}
print(code_text)
page_text = requests.post(login_url,headers=headers,data=data).text

with open(./login.html,w,encoding=utf-8) as fp:
    fp.write(page_text)

# 如何捕获动态变化的请求参数
# 通常请情况下动态变化的请求参数都会被隐藏在前台页面源码中

#验证码的识别:将验证码下载到本地然后提交给打吗平台进行识别
main_url = https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx
page_text = requests.get(main_url,headers=headers).text
tree = etree.HTML(page_text)
code_src = https://so.gushiwen.org+tree.xpath(//*[@id="imgCode"]/@src)[0]
request.urlretrieve(code_src,./code.jpg)

#解析出动态变化的请求参数
__VIEWSTATE = tree.xpath(//*[@id="__VIEWSTATE"]/@value)[0]
__VIEWSTATEGENERATOR = tree.xpath(//*[@id="__VIEWSTATEGENERATOR"]/@value)[0]

#识别验证码
code_text = transformCode(./code.jpg,1004)


login_url = https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx
data = {
    __VIEWSTATE: __VIEWSTATE,
    __VIEWSTATEGENERATOR: __VIEWSTATEGENERATOR,
    from: http://so.gushiwen.org/user/collect.aspx,
    email: www.zhangbowudi@qq.com,
    pwd: bobo328410948,
    code: code_text,
    denglu: 登录,
}
print(code_text)
page_text = requests.post(login_url,headers=headers,data=data).text

with open(./login.html,w,encoding=utf-8) as fp:
    fp.write(page_text)


# 处理下cookie
s = requests.Session()
#验证码的识别:将验证码下载到本地然后提交给打吗平台进行识别
main_url = https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx
page_text = s.get(main_url,headers=headers).text
tree = etree.HTML(page_text)
code_src = https://so.gushiwen.org+tree.xpath(//*[@id="imgCode"]/@src)[0]

# request.urlretrieve(code_src,‘./code.jpg‘)
code_data = s.get(code_src,headers=headers).content
with open(./code.jpg,wb) as fp:
    fp.write(code_data)

#解析出动态变化的请求参数
__VIEWSTATE = tree.xpath(//*[@id="__VIEWSTATE"]/@value)[0]
__VIEWSTATEGENERATOR = tree.xpath(//*[@id="__VIEWSTATEGENERATOR"]/@value)[0]

#识别验证码
code_text = transformCode(./code.jpg,1004)


login_url = https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx
data = {
    __VIEWSTATE: __VIEWSTATE,
    __VIEWSTATEGENERATOR: __VIEWSTATEGENERATOR,
    from: http://so.gushiwen.org/user/collect.aspx,
    email: www.zhangbowudi@qq.com,
    pwd: bobo328410948,
    code: code_text,
    denglu: 登录,
}
print(code_text)
page_text = s.post(login_url,headers=headers,data=data).text

with open(./login.html,w,encoding=utf-8) as fp:
    fp.write(page_text)
View Code

 

 

 

反爬机制   

  robots   

  UA检测   

  图片懒加载   

  代理   

  cookie   

  验证码   

  动态变化的请求参数   

  动态加载的数据


线程池+数据

import time
from multiprocessing.dummy import Pool
import requests
from lxml import etree
start = time.time()
urls = [
    https://www.huya.com/,
    https://www.huya.com/428354,
]

def get_request(url):
    page_text = requests.get(url).text
    return page_text

def parse(page_text):
    tree = etree.HTML(page_text)
    print(tree.xpath(//div[1]//text()))

pool = Pool(2)
page_text_list = pool.map(get_request,urls)

pool.map(parse,page_text_list)
print(len(page_text_list))

print(总耗时:,time.time()-start)

 

代理操作

原文:https://www.cnblogs.com/Pythonzrq/p/11650087.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!