首页 > 其他 > 详细

爬虫初识(爬取dytt电影列表及下载地址)

时间:2018-10-12 20:09:29      阅读:124      评论:0      收藏:0      [点我收藏+]
import re
from  urllib.request import urlopen
def getPage(url):
    response=urlopen(url)
    return response.read().decode(gbk,errors=ignore)
def parsePage(s):
    com=re.compile(r<td height="26">.*?<b>.*?<a href="(?P<url_name>.*?)" class="ulink">.*?,re.S)
    ret=com.finditer(s)
    for i  in  ret :
        return "http://www.dytt8.net"+i.group("url_name")
def parsePage1(s):
    com=re.compile(r<div id="Zoom">.*?译.*?名(?P<name>.*?)<br />◎片.*?名(?P<pianname>.*?)<br />.*?◎导.*?演(?P<daoyan>.*?)<br />+
◎主.*?演(?P<zhuyan>.*?)<br /><br />◎简.*?介.*?<td.*?><a href="(?P<xiazaidizhi>.*?)">,re.S)
    ret1=com.finditer(s)
    # print(‘****************************************************************‘)
    for i  in  ret1 :
        yield {"yiming":(re.sub("[\u3000]", "",i.group(name))),
                "pianming":re.sub("[\u3000]", "",i.group("pianname")),
                "daoyan":re.sub("[\u3000]", "",i.group("daoyan")),
                "zhuyan":re.sub("[\u3000]", "",i.group("zhuyan")),
                "xiazaidizhi":re.sub("[\u3000]", "",i.group("xiazaidizhi"))}
def main(num):
    url="http://www.dytt8.net/html/gndy/dyzz/list_23_%s.html" % num
    response_html=getPage(url)
    xiangqing=parsePage(response_html)
    response1_html = getPage(xiangqing)
    ret=parsePage1(response1_html)
    f = open("move_list", "a", encoding="utf8")
    for obj in ret:
        print(obj)
        data = str(obj)
        f.write(data + "\n")
for i in range(1,181):
    main(i)

 

爬虫初识(爬取dytt电影列表及下载地址)

原文:https://www.cnblogs.com/zhoushibin-1/p/9780285.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!