I am trying to get all the unique urls of the website by calling the all_pages function recursively but this function is not giving all the urls of the website.
All I want to do is get all the unique urls of the website using BeautifulSoup. My code looks like this:
base_url = "http://www.readings.com.pk/"
unique_urls=[]
def all_pages(base_url,unique_urls=[]):
    response = requests.get(base_url)
    soup = BeautifulSoup(response.content, "html.parser")
    for link in soup.find_all("a"):
        url = link["href"]
        absolute_url = urljoin(base_url, url)
        if absolute_url not in unique_urls:
            if base_url in absolute_url:
                unique_urls.append(absolute_url)
                print (absolute_url)
                all_pages(absolute_url,unique_urls,book_urls)
all_pages(base_url,unique_urls)
 
     
     
    