I have a code like this:
def alpha():
    batch = list()
    batchSize = 25
    for i in range(frameNumber):
        frameData = doSomething()
        batch.append(frameData)
        if len(batch) == batchSize:
            result = beta(batch) # function to be parallelized
            continue
        else:
            continue
    print(completeResult) # here I want to have all results be joined
I need that every batchSize frames an instance of beta starts to compute those data using a thread. In the end, I would want to have all the results joined together (possibly in the correct order).
I tried this:
import concurrent.futures
from threading import Thread
def alpha():
    counter = 0
    batch = list()
    batchSize = 25
    completeResult = list()
    for i in range(frameNumber):
        frameData = doSomething()
        batch.append(frameData)
        if len(batch) == batchSize:
            with concurrent.futures.ThreadPoolExecutor() as executor:
                 counter = counter + 1
                 print('start thread ' + str(counter)
                 future = executor.submit(beta, batch)
                 result = future.result()
                 print('join thread ' + str(counter))
                 completeResult.append(result)
                 batch = []
    print(completeResult) # here I want to have all results be joined
but the execution is sequential because I have the following prints:
start thread 1
join thread 1
start thread 2 
join thread 2 
...
Then I tried using apply_async from multiprocessing.Pool but I have a problem. If I run:
import multiprocessing as mp
import os
import sys
import importlib.util
import main
results = []
def import_module_by_path(path):
    name = os.path.splitext(os.path.basename(path))[0]
    spec = importlib.util.spec_from_file_location(name, path)
    mod = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(mod)
    return mod
def collect_result(result):
    global results
    results.append(result)
def doSomething(i, a,b,c,d):
    print('doSomething function')
    result = i
    return result
if __name__ == '__main__':
    pool = mp.Pool(mp.cpu_count())
  
    for i in range(0,4):
        pool.apply_async(doSomething, args=(i, 2, 3,4,5), callback=collect_result)
  
    pool.close()
    pool.join() 
    print(results) 
it works fine. But if I run apply_async with an external function as following it doesn't work anymore:
import multiprocessing as mp
import os
import sys
import importlib.util
import main
results = []
def import_module_by_path(path):
    name = os.path.splitext(os.path.basename(path))[0]
    spec = importlib.util.spec_from_file_location(name, path)
    mod = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(mod)
    return mod
def collect_result(result):
    global results
    results.append(result)
if __name__ == '__main__':
    fn = import_module_by_path('./myPythonFile.py')
    pool = mp.Pool(mp.cpu_count())
  
    for i in range(0,4):
        pool.apply_async(fn.myFunction, args=(i, 2, 3,4,5,6), callback=collect_result)
    pool.close()
    pool.join()
The problem is that it doesn't enter the function myFunction and the program finishes with exit code 0
