What should i change in my code to avoid Scrapy retrieving same items during deep crawl into multiple pages?
Right now, Scrapy performs crawling and scraping like this
Visit Page-A >> ScrapeItem1 & Extract_link_to_Page-B >> Visit Page-B >> ScrapeItem2 & Extract_links_to_Pages-C-D-E >> ScrapeItems2-3-4-5 from Pages-C-D-E
Code looks like this
def category_page(self,response):
         next_page = response.xpath('').extract()
         for item in self.parse_attr(response):
             yield item
         if next_page:
             path = next_page.extract_first()
             nextpage = response.urljoin(path)
             yield scrapy.Request(nextpage,callback=category_page)
    def parse_attr(self, response):
        item = TradeItem()
        item['NameOfCompany'] = response.xpath('').extract_first().strip()
        item['Country'] = response.xpath('').extract_first().strip()
        item['TrustPt'] = response.xpath('').extract_first().strip()
        company_page = response.xpath('').extract_first()
        if company_page:
            company_page = response.urljoin(company_page)
            request = scrapy.Request(company_page, callback = self.company_data)
            request.meta['item'] = item
            yield request
        else:
            yield item
    def company_data(self, response):
        item = response.meta['item']
        item['Address'] = response.xpath('').extract()[1]
        product_page = response.xpath('').extract()[1]
        sell_page = response.xpath('').extract()[2]
        trust_page = response.xpath('').extract()[4]       
        if sell_page:
            sell_page = response.urljoin(sell_page)
            request = scrapy.Request(sell_page, callback = self.sell_data)
            request.meta['item3'] = item
            yield request   
        if product_page:
            product_page = response.urljoin(product_page)
            request = scrapy.Request(product_page, callback = self.product_data)
            request.meta['item2'] = item
            yield request
        if trust_page:
            trust_page = response.urljoin(trust_page)
            request = scrapy.Request(trust_page, callback = self.trust_data)
            request.meta['item4'] = item
            yield request           
        yield item
    def product_data(self, response):
        item = response.meta['item2']
        item ['SoldProducts'] = response.xpath('').extract()    
        yield item
    def sell_data(self, response):
        item = response.meta['item3']
        item ['SellOffers'] = response.xpath('').extract()
        yield item
    def trust_data(self, response):
        item = response.meta['item4']
        item ['TrustData'] = response.xpath('').extract()
        yield item
Problem is that items are repeated, because Scrapy performs PARTIAL scraping on each function/meta item. So, i get entries like this:
Step1:
{'Address': u'',
 'Country': u'',
 'NameOfCompany': u'',
 'TrustPoints': u''}
Step2:
{'Address': u'',
 'Country': ','
 'NameOfCompany': ',
 'SellOffers': [
 'TrustPoints': u''}
Step3:
{'Address': u'',
 'Country': u'',
 'NameOfCompany': u'',
 'SellOffers': [],
 'SoldProducts': [u' '],
 'TrustData': [u''],
 'TrustPoints': u''}
Each STEP repeats values from previous one. I know that this is caused by Scrapy visiting URLS multiple times. There is some error in my logic which i cannot full grasp.
