2016-11-15 66 views
0

以下是我用来刮取产品信息的代码。网页上有很多产品。我把它们都刮掉了,然后转到下一页。问题在于scrapy只是选择页面上的第一个产品,而不是遍历页面上的所有产品。我哪里错了?使用scrapy刮去不同的产品信息

import re 
import time 
import sys 
from scrapy.spider import BaseSpider 
from scrapy.selector import Selector 
from scrapy.http import Request 
import parsedatetime 
from datetime import datetime 
from airline_sentiment.items import * 
from airline_sentiment.spiders.crawlerhelper import * 

class TripAdvisorRestaurantBaseSpider(BaseSpider): 
    name = "shoebuy" 

    allowed_domains = ["shoebuy.com"] 
    base_uri = "http://www.shoebuy.com" 
    start_urls = [ 
       base_uri + "/womens-leather-boots/category_2493?cm_sp=cat-_-d_womensboots_tiles_b1_leather-_-092216" 
       ] 


    def parse(self, response): 

     sel = Selector(response) 

     snode_airline = sel.xpath('//*[starts-with(@class, "pt_grid")]/div[starts-with(@class, "pt_product\")]') 

     for snode_restaurant in snode_airline: 
      tripadvisor_item = AirlineSentimentItem() 

      tripadvisor_item['url'] = self.base_uri + clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_info")]/a/@href')) 

      tripadvisor_item['name'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_info")]/a/span[@class="pt_title"]/text()')) 
      tripadvisor_item['price'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_prices")]/span[@class="pt_price"]/text()')) 
      tripadvisor_item['discount'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_prices")]/div[@class="pt_discount"]/span[@class="pt_percent_off"]/text()')) 
      tripadvisor_item['orig_price'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_prices")]/div[@class="pt_discount"]/span[@class="pt_price_orig"]/text()')) 
      tripadvisor_item['stars'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//*[@class="bv-rating-ratio"]/span/span[3]/text()')) 
      tripadvisor_item['reviews'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "bv-inline-rating-container")]/dl/dd[2]/span/text()')) 

      yield Request(url=tripadvisor_item['url'], meta={'tripadvisor_item': tripadvisor_item}, callback=self.parse_fetch_review) 


     next_page_url = clean_parsed_string(get_parsed_string(sel, '//div[@class="paging"]/a[@class="next"]/@href')) 
     if next_page_url and len(next_page_url) > 0: 
      yield Request(url=self.base_uri + next_page_url, meta={'tripadvisor_item': tripadvisor_item}, callback=self.parse_next_page) 

    def parse_next_page(self, response): 
     sel = Selector(response) 

     snode_airline = sel.xpath('//*[starts-with(@class, "pt_grid")]/div[starts-with(@class, "pt_product")]') 

     for snode_restaurant in snode_airline: 

      tripadvisor_item = AirlineSentimentItem() 

      tripadvisor_item['url'] = self.base_uri + clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_info")]/a/@href')) 
      tripadvisor_item['name'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_info")]/a/span[@class="pt_title"]/text()')) 
      tripadvisor_item['price'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_prices")]/span[@class="pt_price"]/text()')) 
      tripadvisor_item['discount'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_prices")]/div[@class="pt_discount"]/span[@class="pt_percent_off"]/text()')) 
      tripadvisor_item['orig_price'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_prices")]/div[@class="pt_discount"]/span[@class="pt_price_orig"]/text()')) 
      tripadvisor_item['stars'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//*[@class="bv-rating-ratio"]/span/span[3]/text()')) 
      tripadvisor_item['reviews'] = clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "bv-inline-rating-container")]/dl/dd[2]/span/text()')) 

      yield Request(url=tripadvisor_item['url'], meta={'tripadvisor_item': tripadvisor_item}, callback=self.parse_fetch_review) 

     next_page_url = clean_parsed_string(get_parsed_string(sel, '//div[@class="paging"]/a[@class="next"]/@href')) 
     if next_page_url and len(next_page_url) > 0: 
      yield Request(url=self.base_uri + next_page_url, meta={'tripadvisor_item': tripadvisor_item}, callback=self.parse_next_page) 

    def parse_fetch_review(self, response): 

     tripadvisor_item = response.meta['tripadvisor_item'] 
     sel = Selector(response) 

     snode_reviews = sel.xpath('//*[starts-with(@class, "product_info_wrapper")]') 

     for snode_review in snode_reviews: 

      tripadvisor_item['img'] = self.base_uri + clean_parsed_string(get_parsed_string(snode_review, '//div[starts-with(@class,"large_thumb")]/img/@src')) 

      tripadvisor_item['desc'] = clean_parsed_string(get_parsed_string(snode_review, '//*[starts-with(@class,"product_information")]/div[1]/span/text()')) 

      tripadvisor_item['brand'] = clean_parsed_string(get_parsed_string(snode_review, '//div[starts-with(@class,"seo_module")]/h3/text()')) 

     yield tripadvisor_item 

回答

0

这是故障线路:

 tripadvisor_item['url'] = self.base_uri + clean_parsed_string(get_parsed_string(snode_restaurant, '//div[starts-with(@class, "pt_info")]/a/@href')) 

的XPath与..//div开始表示相对节点:

'.//div[starts-with(@class, "pt_info")]/a/@href' 

。因为没有相对的XPath使到你的节点(用'。'表示法),你总是在页面上获得第一个产品链接,作为每个项目的URL。现在,scrapy具有自动重复URL过滤器,所以发生的事情是您的所有检索评论请求都会在稍后过滤出来,而您最终得到的只是第一个项目。

铊;博士:只是.之前你//在亲戚的XPath补充。

+0

完美的工作。谢谢。此外,我没有得到明星和评论价值(我得到None)。我不知道为什么我给出的xpath不起作用。如果我能为它找到一些解决方案,那将是非常好的。 –

+0

@NeelShah它发生是因为星号和评论是由一些JavaScript调用(ajax)生成的,scrapy不执行任何JavaScript。也许你应该为此打开一个新的问题,因为它与当前的无关。 – Granitosaurus