I have this code
def parse(self, response):
    hxs = HtmlXPathSelector(response)
    sites = hxs.select('//div[@class="headline_area"]')
    items = []
    for site in sites[:5]:
        item = StackItem()
        log.msg(' LOOP' +str(ivar)+ '', level=log.ERROR)
        item['title'] ="yoo ma"
        request =  Request("blabla",  callback=self.test1)
        request.meta['item'] = item
        page_number = nextlink.split("&")[-1].split("=")[-1]
        if int(page_number) > 500:
           raise CloseSpider('Search Exceeded 500')
        ivar = ivar + 1
        yield request
        mylinks= soup.find_all('a')
        if mylinks:
            nextlink = mylinks[0].get('href')
            page_number = nextlink.split("&")[-3].split("=")[-1]
            request =  Request(urljoin(response.url, nextlink), callback=self.parse)
            request.meta['page'] = page_number
            yield request
Now my problem is that suppose i want to stop at page_number  = 5
now scrappy goes to that page before the all items from page 1 , page 2 etc are downloaded and stops when it first reaches there.
How can get rid of that porblem that it prcess all links before going to page = 5
 
     
    