在这里,spider可能太有限了。如果你需要大量逻辑,通常最好从Spider继承。
Scrapy提供了CloseSpider异常,当你需要在某些情况下停止解析时,可以引发该异常。你正在爬网的页面返回一条消息“你的库存中没有焦点文章”,当你超过最大页面数时,你可以检查此消息并在发生此消息时停止迭代。
在你的情况下,你可以执行以下操作:
from scrapy.spider import Spiderfrom scrapy.http import Requestfrom scrapy.exceptions import CloseSpiderclass ExampleSite(Spider): name = "so" download_delay = 0.1 more_pages = True next_page = 1 start_urls = ['http://example.com/account/ajax_headlines_content?type=in_focus_articles&page=0'+'&slugs=tsla&is_symbol_page=true'] allowed_domains = ['example.com'] def create_ajax_request(self, page_number): """ Helper function to create ajax request for next page. """ ajax_template = 'http://example.com/account/ajax_headlines_content?type=in_focus_articles&page={pagenum}&slugs=tsla&is_symbol_page=true' url = ajax_template.format(pagenum=page_number) return Request(url, callback=self.parse) def parse(self, response): """ Parsing of each page. """ if "There are no Focus articles on your stocks." in response.body: self.log("about to close spider", log.WARNING) raise CloseSpider(reason="no more pages to parse") # there is some content extract links to articles sel = Selector(response) links_xpath = "//div[@]/a/@href" links = sel.xpath(links_xpath).extract() for link in links: url = urljoin(response.url, link) # follow link to article # commented out to see how pagination works #yield Request(url, callback=self.parse_item) # generate request for next page self.next_page += 1 yield self.create_ajax_request(self.next_page) def parse_item(self, response): """ Parsing of each article page. """ self.log("Scraping: %s" % response.url, level=log.INFO) hxs = Selector(response) item = NewsItem() item['url'] = response.url item['source'] = 'example' item['title'] = hxs.xpath('//title/text()') item['date'] = hxs.xpath('//div[@]/span/text()') yield item


