Access accuracy in keras / tensorflow while learning

I want to access the accuracy (or the loss) of a neural net in order to make training dependent on it ("Curriculum Learning"). Is there a way how I can access the model accuracy in a custom layer?


One way would be to create a custom callback and track the accuracy (or loss) after every epoch. Then you can access this variable during training in your custom layer. One drawback is that you will have to set run_eagerly=True in model.compile in order for this method to work:

import tensorflow as tf

result_dic = {"epochs": []}

logging_callback = tf.keras.callbacks.LambdaCallback(
                on_epoch_end=lambda epoch, logs:
                result_dic["epochs"].append({
                    'epoch': epoch + 1, 
                    'acc': str(logs['acc'])
                }))

class Linear(tf.keras.layers.Layer):
    def __init__(self, units=32, input_dim=32):
        super(Linear, self).__init__()
        w_init = tf.random_normal_initializer()
        self.w = tf.Variable(
            initial_value=w_init(shape=(input_dim, units), dtype="float32"),
            trainable=True,
        )
        b_init = tf.zeros_initializer()
        self.b = tf.Variable(
            initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
        )

    def call(self, inputs):
        if result_dic['epochs']:
          tf.print(result_dic['epochs'][-1])
        return tf.matmul(inputs, self.w) + self.b

inputs = tf.keras.layers.Input((16,))
linear_layer = Linear(32, 16)
x = linear_layer(inputs)
outputs = tf.keras.layers.Dense(1, 'sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(), metrics=['acc'], run_eagerly=True)
x = tf.random.normal((5,16))
y = tf.random.uniform((5,), maxval=2, dtype=tf.int32)

model.fit(x, y, batch_size=2, epochs=4, callbacks=[logging_callback])
Epoch 1/4
3/3 [==============================] - 0s 15ms/step - loss: 0.8525 - acc: 0.0000e+00
Epoch 2/4
{'acc': '0.0', 'epoch': 1}
1/3 [=========>....................] - ETA: 0s - loss: 0.7647 - acc: 0.5000{'acc': '0.0', 'epoch': 1}
{'acc': '0.0', 'epoch': 1}
3/3 [==============================] - 0s 19ms/step - loss: 0.7834 - acc: 0.2000
Epoch 3/4
{'acc': '0.20000000298023224', 'epoch': 2}
1/3 [=========>....................] - ETA: 0s - loss: 0.7253 - acc: 0.5000{'acc': '0.20000000298023224', 'epoch': 2}
{'acc': '0.20000000298023224', 'epoch': 2}
3/3 [==============================] - 0s 18ms/step - loss: 0.7239 - acc: 0.2000
Epoch 4/4
{'acc': '0.20000000298023224', 'epoch': 3}
1/3 [=========>....................] - ETA: 0s - loss: 0.7091 - acc: 0.5000{'acc': '0.20000000298023224', 'epoch': 3}
{'acc': '0.20000000298023224', 'epoch': 3}
3/3 [==============================] - 0s 19ms/step - loss: 0.6662 - acc: 0.6000
<keras.callbacks.History at 0x7f5319f6a910>

Instead of a dictionary, you could also use a simple list:

import tensorflow as tf

results = [0.0]

class LossAccCallback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        global results
        results.append(logs['acc'])

class Linear(tf.keras.layers.Layer):
    def __init__(self, units=32, input_dim=32):
        super(Linear, self).__init__()
        w_init = tf.random_normal_initializer()
        self.w = tf.Variable(
            initial_value=w_init(shape=(input_dim, units), dtype="float32"),
            trainable=True,
        )
        b_init = tf.zeros_initializer()
        self.b = tf.Variable(
            initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
        )

    def call(self, inputs):
        tf.print(results[-1])
        return tf.matmul(inputs, self.w) + self.b

epochs = 4
inputs = tf.keras.layers.Input((16,))
linear_layer = Linear(32, 16)
x = linear_layer(inputs)
outputs = tf.keras.layers.Dense(1, 'sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(), metrics=['acc'], run_eagerly=True)
x = tf.random.normal((5,16))
y = tf.random.uniform((5,), maxval=2, dtype=tf.int32)

model.fit(x, y, batch_size=2, epochs=epochs, callbacks=[LossAccCallback()])