I have X initial urls that are paginated - in order to get the next set of data, I have to grab the next url from the response header until there is no next url. I am having trouble getting this going right. I'm trying a queue approach that I found here.
import asyncio
from aiohttp import ClientSession, TCPConnector
async def get(session, url):
    headers = {
      'Authorization': 'Bearer  KEY',
     }
     async with session.get(url, headers=headers) as response:
            json = await response.json()
            return json, response
async def process(session, url, q):
    try:      
        try:
            views, response = await get(session, url)
            scode = response.status
            if scode == 404:
                return
        except Exception as e:
            print(e)
            return
        try:
            await q.put(str(response.links["next"]["url"]))
        except:
            pass
        <do something with views>
    except Exception as e:
        print(e)
async def fetch_worker(session, q):
    while True:
        url = await q.get()
        try:
            await process(session, url, q)
        except Exception as e:
            print(e)
        finally:
            q.task_done()
async def d():
    <code to query and put data into stdrows>
    connector = TCPConnector(limit=500)
    async with ClientSession(connector=connector) as session:
        url = '<some base url>'
        for i in range(500):
            tasks.append(asyncio.create_task(fetch_worker(session, url_queue)))
        for row in stdrows:
            await url_queue.put(url.format(row[1]))
        await asyncio.gather(*tasks)
        await url_queue.join()
asyncio.run(d())
This appears not to be going at 500 tasks/sec. is it even possible to get to this rate without knowing all the URLs ahead of time? I am hoping to fetch the next url from whatever initial url (or from its paginated url) while i work with views.
