I have a function that takes a list and returns the list of lists of ngrams (here n = 2). How can I parallelize  this function so that running time can be reduced?
I'm trying this, but it's not working. The data_list is a list of strings. 
import multiprocessing
from multiprocessing.dummy import Pool
from collections import OrderedDict
grams_list = []
data_list = ["Hello, I am learning Python",
             "Python is a very Powerful language",
             "And Learning python is easy" ]
def ngrams(input, n):
    input = input.split(' ')
    output = []    
    for i in range(len(input) - n + 1):
        output.append(input[i:i + n])
    return output
def generating_grams_list(data_list):
    for j in range(0, len(data_list)):
        grams = [' '.join(x) for x in ngrams(data_list[j], 2)]  # Creating ngrams
        grams_list.append(list(OrderedDict.fromkeys(grams)))  # removing duplicates
        # print "Creating ngrams list for each data string ", j
    return grams_list
if __name__ == '__main__':
    pool = Pool(multiprocessing.cpu_count())
    results = pool.map(generating_grams_list, data_list)
    pool.close()
    pool.join()
    for result in results:
        print("result", result)
 
     
    