How to plot learning curves for each trial using the keras-tuner

You just have to implement a custom keras tuner and set verbose=0 to see the plot after each trial. Otherwise they will be deleted. Try something like this for complete flexibility:

Custom keras tuner:

import tensorflow as tf
import keras_tuner as kt
import numpy as np
from matplotlib import pyplot as plt

class CustomTuner(kt.Tuner):
      def run_trial(self, trial, train_ds, val_ds, *args, **kwargs):
        self._display.show_hyperparameter_table(trial)
        self._display.trial_number += 1
        hp = trial.hyperparameters
        model = self.hypermodel.build(trial.hyperparameters)
        
        optimizer = model.optimizer
        train_loss_metric = tf.keras.metrics.Mean()
        valid_loss_metric = tf.keras.metrics.Mean()

        loss_fn = tf.keras.losses.MeanSquaredError()
        train_ds = train_ds.batch(32)
        val_ds = val_ds.batch(32)

        def run_train_step(data):
          x = data[0]
          y = data[1]
          with tf.GradientTape() as tape:
            logits = model(x)
            loss = loss_fn(y, logits)

          gradients = tape.gradient(loss, model.trainable_variables)
          optimizer.apply_gradients(zip(gradients, model.trainable_variables))
          
          train_loss_metric.update_state(loss)
          return loss
        
        def run_valid_step(data):
          x = data[0]
          y = data[1]
          logits = model(x)
          loss = loss_fn(y, logits)

          valid_loss_metric.update_state(loss)
          return loss

        val_losses = []
        train_losses = []
        for epoch in range(5):
          tf.print("Epoch: {}".format(epoch))
          self.on_epoch_begin(trial, model, epoch, logs={})
          for batch, data in enumerate(train_ds):
              self.on_batch_begin(trial, model, batch, logs={})
              batch_loss = float(run_train_step(data))
              self.on_batch_end(trial, model, batch, logs={"loss": batch_loss})
              if batch == 6:
                loss = train_loss_metric.result()
                tf.print("Batches: {}, Loss: {}".format(batch + 1, loss))
                break
                
          for batch, data in enumerate(val_ds):
              self.on_batch_begin(trial, model, batch, logs={})
              batch_loss = float(run_valid_step(data))
              self.on_batch_end(trial, model, batch, logs={"val_loss": batch_loss})
              if batch == 6:
                loss = valid_loss_metric.result()
                tf.print("Batches: {}, Val Loss: {}".format(batch + 1, loss))
                break

          epoch_loss = train_loss_metric.result()
          self.on_epoch_end(trial, model, epoch, logs={"loss": epoch_loss})
          val_epoch_loss = valid_loss_metric.result()
          self.on_epoch_end(trial, model, epoch, logs={"val_loss": val_epoch_loss})
          
          train_losses.append(epoch_loss)
          val_losses.append(val_epoch_loss)

          train_loss_metric.reset_states()
          valid_loss_metric.reset_states()

      
        plt.plot(train_losses)
        plt.plot(val_losses)
        plt.title('Model Loss For Trial {}'.format(self._display.trial_number))
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'val'], loc='upper left')
        plt.show()
        tf.print("Ending Trail {}".format(self._display.trial_number))
        return super(CustomTuner, self).run_trial(trial, train_ds, validation_data=val_ds, *args, **kwargs)

Dummy data and parameters:

tuner = CustomTuner(
    oracle=kt.oracles.RandomSearch(
        objective=kt.Objective("val_loss", "min"), max_trials=5
    ),
    hypermodel=model_builder
)

X_train = np.random.random((224, 2))
y_train = np.random.random((224, 2))
valx_train = np.random.random((224, 2))
valy_train = np.random.random((224, 2))
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train))
val_ds = tf.data.Dataset.from_tensor_slices((valx_train, valy_train))
tuner.search(train_ds, val_ds, callbacks=[stop_early], verbose=0)

Output:

Hyperparameter    |Value             |Best Value So Far 
layers            |1                 |?                 
units             |128               |?                 
learning_rate     |0.0001            |?                 
Epoch: 0
Batches: 7, Loss: 0.6267251372337341
Batches: 7, Val Loss: 0.6261463165283203
Epoch: 1
Batches: 7, Loss: 0.6248489022254944
Batches: 7, Val Loss: 0.6242721676826477
Epoch: 2
Batches: 7, Loss: 0.6229791045188904
Batches: 7, Val Loss: 0.6224031448364258
Epoch: 3
Batches: 7, Loss: 0.6211144328117371
Batches: 7, Val Loss: 0.6205392479896545
Epoch: 4
Batches: 7, Loss: 0.619255006313324
Batches: 7, Val Loss: 0.6186805963516235

enter image description here

Ending Trail 1
Hyperparameter    |Value             |Best Value So Far 
layers            |3                 |1                 
units             |1024              |128               
learning_rate     |0.0001            |0.0001            
Epoch: 0
Batches: 7, Loss: 0.2655337154865265
Batches: 7, Val Loss: 0.22062525153160095
Epoch: 1
Batches: 7, Loss: 0.1646299660205841
Batches: 7, Val Loss: 0.14632494747638702
Epoch: 2
Batches: 7, Loss: 0.11420594155788422
Batches: 7, Val Loss: 0.11366432905197144
Epoch: 3
Batches: 7, Loss: 0.09950900077819824
Batches: 7, Val Loss: 0.10782861709594727
Epoch: 4
Batches: 7, Loss: 0.10018070787191391
Batches: 7, Val Loss: 0.10787512362003326

enter image description here

Ending Trail 2
...
...

The model based on the code you posted in your question:

def model_builder(hp):
  model = tf.keras.Sequential()

  layers = hp.Choice('layers', values=[1,2,3,4,5])
  units = hp.Choice('units', values=[1,2,4,8,16,32,64,128,256,512,1024])
  hp_learning_rate = hp.Choice('learning_rate', values=[1e-1, 1e-2, 1e-3, 1e-4])
  
  for i in range(1, layers):
    model.add(tf.keras.layers.Dense(units=units, activation='relu'))

  model.add(tf.keras.layers.Dense(2, activation='linear'))
  model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=hp_learning_rate),
                loss='mse')

  return model

You can probably simplify the code, but I think you get the point.