When I try to crawl thesis information in multiple threads, I cannot close the process after getting the information:
error
And when I comment the code which function is get the information from network, these processes can end normally.
normal
This error is trouble me and I don't have any idea,  my network connect is by requests and set the response.close()
so can any handsome brother or beautiful lady help this confused person? Thanks
This is whole code: my python is python 3.7
from multiprocessing import Process, Queue, Pool,Manager,Value
import time, random
import requests
import re
from bs4 import BeautifulSoup
headers = {
    'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36,Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
    'Connection': 'close'
}
## Just get the html text
def GetUrlInfo(url):
    response = requests.get(url=url, headers=headers)
    response.encoding = 'utf-8'
    response.close()
    SoupData = BeautifulSoup(response.text, 'lxml')
    return SoupData
def GetVolumeUrlfromUrl(url:str)->str:
    """input is Journal's url and output is a link and a text description to each issue of the journal"""
    url = re.sub('http:', 'https:', url)
    SoupDataTemp = GetUrlInfo(url+'index.html')
    SoupData = SoupDataTemp.find_all('li')
    UrlALL = []
    for i in SoupData:
        if i.find('a') != None:
            volumeUrlRule = '<a href=\"(.*?)\">(.*?)</a>'
            volumeUrlTemp = re.findall(volumeUrlRule,str(i),re.I)
            # u = i.find('a')['href']
            # # print(u)
            for u in volumeUrlTemp:
                if re.findall(url, u[0]):
                    # print(u)
                    UrlALL.append((u[0], u[1]), )
    # print(UrlALL)
    return UrlALL
def GetPaperBaseInfoFromUrlAll(url:str)->str:
    """The input is the url and the output is all the paper information obtained from the web page,
    including, doi, title, author, and the date about this volume """
    soup = GetUrlInfo(url)
    temp1 = soup.find_all('li',class_='entry article')
    temp2= soup.find_all('h2')
    temp2=re.sub('\\n',' ',temp2[1].text)
    # print(temp2)
    volumeYear = re.split(' ',temp2)[-1]
    paper = []
    for i in temp1:
        if i.find('div',class_='head').find('a')== None:
            paperDoi = ''
        else:
            paperDoi = i.find('div',class_='head').find('a')['href']
        title = i.find('cite').find('span',class_='title').text[:-2]
        paper.append([paperDoi,title])
    return paper,volumeYear
# test start
url = 'http://dblp.uni-trier.de/db/journals/talg/'
UrlALL = GetVolumeUrlfromUrl(url)
UrlLen = len(UrlALL)
# put the url into the query
def Write(query,value,num):
    for count in range(num):
        query.put(value[count][0],True)
        # time.sleep(random.random())
    print('write end')
# from the query get the url and get the paper info with this url
def Read(query,num,PaperInfo1,COUNT,i,paperNumber):
    while True:
        count = COUNT.get(True)
        # print("before enter" + str(i) + ' - ' + str(count)+' - '+str(num))
        COUNT.put(count, True)
        if not query.empty():
            value = query.get(True)
            count = COUNT.get(True)
            count = count + 1
            COUNT.put(count,True)
            paper, thisYear = GetPaperBaseInfoFromUrlAll(value) # just commented
            print("connected " + str(i) + ' - ' + str(count) + ' - ' + str(num))
            numb = paperNumber.get(True)
            numb = numb + len(paper)
            paperNumber.put(numb) # just commented
            # print(paper,thisYear)
            PaperInfo1.put((paper,thisYear),) # just commented
            print("the process "+str(i)+' - '+ str(count)+ ' : '+value)
        if not COUNT.empty():
            count = COUNT.get(True)
            # print("after enter" + str(i) + ' - ' + str(count) + ' - ' + str(num))
            COUNT.put(count,True)
            if int(count) == int(num):
                print("the process "+str(i)+" end ")
                break
    print('read end')
# print the paper info
def GetPaperInfo(PaperInfo1,paperNumber):
    for i in range(paperNumber.get(True)):
            value = PaperInfo1.get(True)
            print(value)
if __name__=='__main__':
    r_num = 10 # th read process number
    w_num = 1 # th write process number
    w_cnt = UrlLen # the write counter
    q = Queue(UrlLen) # the volune url queue
    paperNumber = Queue(1) # the all paper number
    COUNT = Queue(1) # the end tag
    COUNT.put(int(0)) # first is zero
    paperNumber.put(int(0)) # first is zero
    PaperInfo1 = Queue()
    r_list = [Process( target=Read, args=(q,w_cnt,PaperInfo1,COUNT,i,paperNumber) ) for i in range(r_num)]
    w_list = [Process( target=Write, args=(q,UrlALL,w_cnt) )]
    time_start = time.time()
    [task.start() for task in w_list]
    [task.start() for task in r_list]
    [task.join() for task in w_list]
    [task.join() for task in r_list]
    time_used = time.time() - time_start
    GetPaperInfo(PaperInfo1, paperNumber)
    print('time_used:{}s'.format(time_used))
I have no idea, with debug the process finally enter the process.py -> row:297: try: self.run() and then enter the row:300: util._exit_function() and just a connected
the debug
but I dont know why the network can cause this error and how to solve this
that's all Thank you!