I am trying to extract some information about mtg cards from a webpage with the following program but I repeatedly retrieve information about the initial page given(InitUrl). The crawler is unable to proceed further. I have started to believe that i am not using the correct urls or maybe there is a restriction in using urllib that slipped my attention. Here is the code that i struggle with for weeks now:
import re
from math import ceil
from urllib.request import urlopen as uReq, Request
from bs4 import BeautifulSoup as soup
InitUrl = "https://mtgsingles.gr/search?q=dragon"
NumOfCrawledPages = 0
URL_Next = ""
NumOfPages = 4   # depth of pages to be retrieved
query = InitUrl.split("?")[1]
for i in range(0, NumOfPages):
    if i == 0:
        Url = InitUrl
    else:
        Url = URL_Next
    print(Url)
    UClient = uReq(Url)  # downloading the url
    page_html = UClient.read()
    UClient.close()
    page_soup = soup(page_html, "html.parser")
    cards = page_soup.findAll("div", {"class": ["iso-item", "item-row-view"]})
    for card in cards:
        card_name = card.div.div.strong.span.contents[3].contents[0].replace("\xa0 ", "")
        if len(card.div.contents) > 3:
            cardP_T = card.div.contents[3].contents[1].text.replace("\n", "").strip()
        else:
            cardP_T = "Does not exist"
        cardType = card.contents[3].text
        print(card_name + "\n" + cardP_T + "\n" + cardType + "\n")
    try:
        URL_Next = InitUrl + "&page=" + str(i + 2)
        print("The next URL is: " + URL_Next + "\n")
    except IndexError:
        print("Crawling process completed! No more infomation to retrieve!")
    else:
        NumOfCrawledPages += 1
        Url = URL_Next
    finally:
        print("Moving to page : " + str(NumOfCrawledPages + 1) + "\n")
 
     
    