I have a custom DataGenerator that uses Python's Multiprocessing module to generate the training data that is fed to the Tensorflow model.
The problem is whenever a new DataGenerator Process is initialized, it seems that it tries to initialize Tensorflow (which is imported on the top of the code) and allocate some GPU memory for itself.
I followed this question to limit each process' access to GPU memory and my code worked but I can only use a third of the GPU memory available.
The new Processes & the Tensorflow code are initiated in the same Python file. Is there a proper way to make use of Multiprocessing while forbidding the spawned processes to import Tensorflow and allocate some GPU memory for themselves?
Here's a part of the code (Runs In Windows) for clarification:
from multiprocessing import Process, Queue
from multiprocessing.pool import Pool
import cv2
import numpy as np
import tensorflow as tf
from keras.models import load_model
def TrainQueueProcess(queue):
    # This Function Fills The Queue For Other Consumers
def get_model(model_path=None):
    import tensorflow as tf
    import keras.backend.tensorflow_backend as ktf
    def get_session(gpu_fraction=0.333):
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction, allow_growth=True)
        return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    ktf.set_session(get_session())
    from keras import Input, Model
    from keras.applications.mobilenetv2 import MobileNetV2
    from keras.layers import Dense, Dropout
    from keras.optimizers import adam
    from keras.utils import plot_model
    input_tensor = Input(shape=(128, 128, 3))
    base_model = MobileNetV2(weights='imagenet', include_top=False, input_tensor=input_tensor, input_shape=(128, 128, 3), pooling='avg')
    for layer in base_model.layers:
        layer.trainable = True
    op = Dense(128, activation='relu')(base_model.output)
    op = Dropout(.25)(op)
    output_tensor = Dense(2, activation='softmax')(op)
    model = Model(inputs=input_tensor, outputs=output_tensor)
    model.compile(optimizer=adam(lr=0.0008), loss='binary_crossentropy', metrics=['accuracy'])
    return model
if __name__ == '__main__':
    TRAIN_QUEUE = Queue(maxsize=10)
    TRAIN_PROCESS = Process(target=TrainQueueProcess, args=(TRAIN_QUEUE))
    TRAIN_PROCESS.start()
    model = get_model(model_path)