I'm trying to scape some data from a website that updates frequently (here I'm using AAPL stock off of Yahoo finance). This code doesn't run. I've tested every part individually, but I still get an error saying "RuntimeError: maximum recursion depth exceeded".
Thanks in advance for your help troubleshooting!
import time  
import lxml, requests  
from bs4 import BeautifulSoup  
url= "http://finance.yahoo.com/q?uhb=uh3_finance_vert&fr=&type=2button&s=aapl"  
def PrintPriceAAPL():  
    r = requests.get(url)  
    soup = BeautifulSoup(r.content, "lxml")  
    print (soup.find(id="yfs_l84_aapl").string) #ID of current stock price  
    time.sleep(60)  
while True:  
    PrintPriceAAPL()  
EDIT: This is the full error:
Traceback (most recent call last):
  File "C:/Users/sjung/Documents/PrintPriceAAPL.py", line 15, in <module>
    PrintPriceAAPL()
  File "C:/Users/sjung/Documents/PrintPriceAAPL.py", line 11, in PrintPriceAAPL
    print (soup.find_all(id="yfs_l84_aapl")[0].string) #ID of current stock price
  File "C:\Python27\lib\idlelib\rpc.py", line 595, in __call__
    value = self.sockio.remotecall(self.oid, self.name, args, kwargs)
  File "C:\Python27\lib\idlelib\rpc.py", line 210, in remotecall
    seq = self.asynccall(oid, methodname, args, kwargs)
  File "C:\Python27\lib\idlelib\rpc.py", line 225, in asynccall
    self.putmessage((seq, request))
  File "C:\Python27\lib\idlelib\rpc.py", line 324, in putmessage
    s = pickle.dumps(message)
  File "C:\Python27\lib\copy_reg.py", line 74, in _reduce_ex
    getstate = self.__getstate__
RuntimeError: maximum recursion depth exceeded
 
     
    