Now I can see that scrapy downloads all pages concurrently, but what I need is to chain people and extract_person methods, so that when I get list of persons urls in method people I follow all of them and scrape all info I need and only after that I continue with another page people urls. How can I do that?
def people(self, response):
    sel = Selector(response)
    urls = sel.xpath(XPATHS.URLS).extract()
    for url in urls:
        yield Request(
            url=BASE_URL+url,
            callback=self.extract_person,
        )
def extract_person(self, response):
    sel = Selector(response)
    name = sel.xpath(XPATHS.NAME).extract()[0]
    person = PersonItem(name=name)
    yield student
 
     
    