I'm trying to get my tensorflow model to train on 2 categories of images but I'm running into a ValueError problem. Can somebody please help. Here is the relevant code:
# Get image arrays and labels for all image files
images, labels = load_data(sys.argv[1])
# Split data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(
    images, labels, test_size=TEST_SIZE
)
# Get a compiled neural network
model = get_model()
model.summary()
# Fit model on training data
model.fit_generator(x_train, steps_per_epoch=128, epochs=EPOCHS,
                    validation_data=y_train, validation_steps=128)
def load_data(data_dir):
    image_generator = ImageDataGenerator(rescale=1. / 255)
    resized_imgs = image_generator.flow_from_directory(batch_size=128, directory=data_dir,
                              shuffle=True, target_size=dimensions,
       class_mode='binary')
    images, labels = next(resized_imgs)
    plotImages(images[:15])
    return images, labels
def get_model():
    # create a convolutional neural network
    model = tf.keras.models.Sequential([
        # convolutional layer. Learn 32 filters using 
a 3x3 kernel
        tf.keras.layers.Conv2D(
            32, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)
    ),
    tf.keras.layers.BatchNormalization(),
    # max-pooling layer, using 2x2 pool size
    tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
    # convolutional layer. Learn 32 filters using a 3x3 kernel
    tf.keras.layers.Conv2D(
        32, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)
    ),
    tf.keras.layers.BatchNormalization(),
    # max-pooling layer, using 2x2 pool size
    tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
    # flatten units
    tf.keras.layers.Flatten(),
    # add a hidden layer with dropout
    tf.keras.layers.Dense(128, activation="relu"),
    tf.keras.layers.Dropout(0.5),
    # add an output layer with NUM_CATEGORIES (43) units
    tf.keras.layers.Dense(NUM_CATEGORIES, activation="sigmoid")  # changed activation from softmax
    # to sigmoid whic is the proper activation for binary data
])
# train neural network
model.compile(
    optimizer="adam",
    loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=["accuracy"]
)
return model
I end up getting the following error: ValueError: No gradients provided for any variable: ['conv2d/kernel:0', 'conv2d/bias:0', 'batch_normalization/gamma:0', 'batch_normalization/beta:0', 'conv2d_1/kernel:0', 'conv2d_1/bias:0', 'batch_normalization_1/gamma:0', 'batch_normalization_1/beta:0', 'dense/kernel:0', 'dense/bias:0', 'dense_1/kernel:0', 'dense_1/bias:0'].
The error is coming from the following line of code but not sure how to fix it:
model.fit_generator(x_train, steps_per_epoch=128, epochs=EPOCHS,
                        validation_data=y_train, validation_steps=128)
Thanks