for one epoch for this image process using more than 80 ram memory
I know the 'del' command to delete the memory usage 
del dataframe
I want to use 'del' command to free the memory after one epoch
but not sure which one is actually using the memory for this code 
here is the code 
for train_index, valid_index in zip(train_indexes,valid_indexes):
    print("cleanup memory")
    traindf = df_train.iloc[train_index, :].reset_index()
    validdf = df_train.iloc[valid_index, :].reset_index()
        if(j >= 1 and j <= 1):
        train_generator = train_datagen.flow_from_dataframe(
            dataframe=traindf,
            directory=TRAIN_CROPPED_PATH,
            x_col='img_file',
            y_col='class',
            target_size= (IMAGE_SIZE, IMAGE_SIZE),
            color_mode='rgb',
            class_mode='categorical',
            batch_size=BATCH_SIZE,
            seed=SEED,
            shuffle=True
            )
        valid_generator = valid_datagen.flow_from_dataframe(
            dataframe=validdf,
            directory=TRAIN_CROPPED_PATH,
            x_col='img_file',
            y_col='class',
            color_mode='rgb',
            class_mode='categorical',
            batch_size=BATCH_SIZE,
            seed=SEED,
            shuffle=True
            )
        model_name = model_path + str(j) + '_'+ modelName+"_Aug"+'.hdf5'
        model_names.append(model_name)
        print("TRAIN_CROPPED_PATH:",TRAIN_CROPPED_PATH)
        print("model_name:",model_name)
        model = get_model()
        try:
            model.load_weights(model_name)
        except:
            pass
        print("model_path:",model_path)
        patient = 2
        callbacks = [
        EarlyStopping(monitor='val_loss', patience=patient, mode='min', verbose=1),
        ReduceLROnPlateau(monitor = 'val_loss', factor = 0.5, patience = patient / 2, min_lr=0.00001, verbose=1, mode='min'),
        ModelCheckpoint(filepath=model_name, monitor='val_loss', verbose=1, save_best_only=True, mode='min'),
        ]
        history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(traindf.index) / BATCH_SIZE,
            epochs=epochs,
            validation_data=valid_generator,
            validation_steps=len(validdf.index) / BATCH_SIZE,
            verbose=1,
            shuffle=False,
           callbacks = callbacks
            )
    j+=1