#Import Needed Libraries    
import requests
from bs4 import BeautifulSoup 
import pprint
res = requests.get('https://news.ycombinator.com/news')
soup = BeautifulSoup(res.text, 'html.parser')
links = soup.select('.titlelink')
subtext = soup.select('.subtext')
def sort_stories_by_votes(hnlist):  #Sorting your create_custom_hn dict by votes(if)
    return sorted(hnlist, key= lambda k:k['votes'], reverse=True)
def create_custom_hn(links, subtext): #Creates a list of links and subtext
    hn = []
    for idx, item in enumerate(links): #Need to use this because not every link has a lot of votes
        title = links[idx].getText()
        href = links[idx].get('href', None)
        vote = subtext[idx].select('.score')
        if len(vote):
            points = int(vote[0].getText().replace(' points', ''))
            if points > 99:  #Only appends stories that are over 100 points
                hn.append({'title': title, 'link': href, 'votes': points})
    return sort_stories_by_votes(hn)
pprint.pprint(create_custom_hn(links, subtext))
My question is that this is only for the first page, which has only 30 stories.
How would I apply my web scraping method by going through each page.... let's say the next 10 pages and keeping the formatted code above?