首页 > 其他 > 详细

scrapy

时间:2015-01-14 09:37:35      阅读:317      评论:0      收藏:0      [点我收藏+]

# -*- coding: utf-8 -*-
import scrapy
import chardet
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.url import urljoin_rfc
from scrapy.http import Request

class Greasemonkey1Spider(scrapy.Spider):
    name = "greasemonkey1"
    allowed_domains = ["wiki.greasespot.net"]
    start_urls = (
        ‘http://wiki.greasespot.net/‘,
    )

    def parse(self, response):
        baseurl = response.url
        print ‘baseurl  = ‘,  baseurl

        hxs  = response.xpath(r‘//a‘)
        for path in hxs:
            titles = path.xpath(r‘text()‘).extract()
            urls = path.xpath(r‘@href‘).extract()
            if len(titles) == 0:
                continue
            if len(urls) == 0:
                continue
            title = titles[0]
            url = urls[0]
            if title == ‘‘:
                continue
            if len(url) == 0:
                continue
            if url[0] == ‘#‘:
                continue
            print ‘2222‘,  title, url
#
            url2 = urljoin_rfc(baseurl, url)
            print ‘=== ‘, url2
            yield scrapy.Request(url2, callback=self.parse)

scrapy

原文:http://www.cnblogs.com/zhang-pengcheng/p/4223074.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!