Conv layer, No gradients provided for any variable
I try to train mnist dataset but I got an error like this:
No gradients provided for any variable: ['module_wrapper/conv2d/kernel:0',
'module_wrapper/conv2d/bias:0', 'module_wrapper_2/conv2d_1/kernel:0',
'module_wrapper_2/conv2d_1/bias:0', 'module_wrapper_5/dense/kernel:0',
'module_wrapper_5/dense/bias:0', 'module_wrapper_6/dense_1/kernel:0',
'module_wrapper_6/dense_1/bias:0'].
My fit code:
self.model.fit(x = self.datas.trainImages, y = self.datas.trainLabels, batch_size = self.datas.batch_size, epochs =self.datas.epochs)
Here the variables:
self.datas.trainImages = numpy.stack([cv2.imread(image1)],[cv2.imread(image2), dtype = float64],[cv2.imread(image3)])
self.datas.trainLabels = numpy.stack([0,1,2], dtype = int32)
Also if I print model.summary(), and it is lenet model:
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
module_wrapper (ModuleWrappe (None, 28, 28, 32) 320
_________________________________________________________________
module_wrapper_1 (ModuleWrap (None, 14, 14, 32) 0
_________________________________________________________________
module_wrapper_2 (ModuleWrap (None, 14, 14, 64) 18496
_________________________________________________________________
module_wrapper_3 (ModuleWrap (None, 7, 7, 64) 0
_________________________________________________________________
module_wrapper_4 (ModuleWrap (None, 3136) 0
_________________________________________________________________
module_wrapper_5 (ModuleWrap (None, 500) 1568500
_________________________________________________________________
module_wrapper_6 (ModuleWrap (None, 10) 5010
=================================================================
Total params: 1,592,326
Trainable params: 1,592,326
Non-trainable params: 0
_________________________________________________________________
There are no layers named Conv2D but I added them,
model.add(layers.Conv2D(filters=32,kernel_size=3,strides=1,activation='relu',padding='same'))
model.add(layers.MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(layers.Conv2D(filters=64,kernel_size=3,strides=1,activation='relu',padding='same'))
model.add(layers.MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(500))
model.add(layers.Dense(self.datas.classCount,activation='softmax'))
When I research about the problem, google and stackoverflow says add labels into fit function but I already added them.
UPDATE 1
you can try this codes to run it :
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import tensorflow.keras.losses
parameters = parameters
datas = datas
model = Sequential()
optimizer = tf.keras.optimizers.SGD()
loss = tensorflow.keras.losses.CategoricalCrossentropy(name = 'CategoricalCrossentropy', from_logits = True)
metrics = tensorflow.keras.metrics.CategoricalAccuracy(name = 'CategoricalAccuracy')
model.add(layers.Conv2D(filters=32,kernel_size=3,strides=1,activation='relu',padding='same'))
model.add(layers.MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(layers.Conv2D(filters=64,kernel_size=3,strides=1,activation='relu',padding='same'))
model.add(layers.MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(500))
model.add(layers.Dense(self.datas.classCount,activation='softmax'))
trainImages = numpy.stack([[cv2.imread(image1)],[cv2.imread(image2)],[cv2.imread(image3)]], dtype = float64)
#All images is belong to mnist dataset. I read them from a folder and append to list, then convert the dataset list into numpy.stack
trainLabels = numpy.stack([0,1,2], dtype = int32)
model.compile(loss = loss, optimizer = optimizer, metrics = metrics)
model.fit(x = trainImages, y = trainLabels, batch_size = 2, epochs =1)
Here is a working example based on your code but with the MNIST dataset, and just in case you didn't know, you should either use a softmax function on your output layer or set the from_logits
parameter of your loss function to True
, but not both.
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
model = tf.keras.Sequential()
optimizer = tf.keras.optimizers.SGD()
loss = tf.keras.losses.CategoricalCrossentropy(name = 'CategoricalCrossentropy')
metrics = tf.keras.metrics.CategoricalAccuracy(name = 'CategoricalAccuracy')
model.add(tf.keras.layers.Conv2D(filters=32,kernel_size=3,strides=1,activation='relu',padding='same', input_shape=(28, 28, 1)))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=3,strides=1,activation='relu',padding='same'))
model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(500, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
print(model.summary())
model.compile(loss = loss, optimizer = optimizer, metrics = metrics)
model.fit(x_train, y_train, batch_size = 64, epochs = 1)
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_6 (Conv2D) (None, 28, 28, 32) 320
max_pooling2d_6 (MaxPooling (None, 14, 14, 32) 0
2D)
conv2d_7 (Conv2D) (None, 14, 14, 64) 18496
max_pooling2d_7 (MaxPooling (None, 7, 7, 64) 0
2D)
flatten_3 (Flatten) (None, 3136) 0
dense_6 (Dense) (None, 500) 1568500
dense_7 (Dense) (None, 10) 5010
=================================================================
Total params: 1,592,326
Trainable params: 1,592,326
Non-trainable params: 0
_________________________________________________________________
None
938/938 [==============================] - 8s 8ms/step - loss: 24.1966 - CategoricalAccuracy: 0.1079
<keras.callbacks.History at 0x7f831b6d0a50>