Tensorflow to learn an input dependent and an input independent variable
The problem is that you are creating the nu
variable with a hard-coded batch size of 1. That is why it is only working with a sample size of 1 and no more. It is hard to say what you want to do, but you can try something like this:
import tensorflow as tf
class NuLayer(tf.keras.layers.Layer):
def __init__(self, batch_dim, initial='he_uniform'):
super(NuLayer, self).__init__()
self.batch_dim = batch_dim
def build(self, input_shape):
self.nu = tf.Variable(initial_value = tf.ones((self.batch_dim,1)), trainable = True)
def call(self, inputs):
return self.nu
inp_1 = tf.keras.layers.Input(shape=(2,)) #setting the size of the input layer
initial = 'he_uniform'
x = tf.keras.layers.Dense(20,kernel_initializer= initial, activation = 'tanh', bias_initializer=initial)(inp_1)
x = tf.keras.layers.Dense(20,kernel_initializer= initial, activation = 'tanh', bias_initializer=initial)(x)
x = tf.keras.layers.Dense(20,kernel_initializer= initial, activation = 'tanh', bias_initializer=initial)(x)
x = tf.keras.layers.Dense(20,kernel_initializer= initial, activation = 'tanh',bias_initializer=initial)(x)
x = tf.keras.layers.Dense(20,kernel_initializer= initial, activation = 'tanh',bias_initializer=initial)(x)
x = tf.keras.layers.Dense(20,kernel_initializer= initial, activation = 'tanh',bias_initializer=initial)(x)
x = tf.keras.layers.Dense(1,kernel_initializer= initial, activation = 'tanh',bias_initializer=initial)(x)
nu = NuLayer(batch_dim=4)
nu = nu(inp_1)
out = tf.keras.layers.Concatenate(axis=1)([x, nu])
model = tf.keras.Model(inputs=inp_1, outputs=out)
def residualValOfPDE(xt):
x = xt[:, 0:1] # x coordinate
t = xt[:, 1:2] # t coordinate
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(t)
u, nu = model(tf.stack([x[:, 0], t[:, 0]], axis=1) )[0]
u_x = tape.gradient(u, x)
u_t = tape.gradient(u, t)
u_xx = tape.gradient(u_x, x)
return u_t + u*u_x - nu*u_xx
xt_f = tf.random.normal((10000, 2))
print( residualValOfPDE(xt_f[1:3,:]))
tf.Tensor(
[[0.5751909]
[0. ]], shape=(2, 1), dtype=float32)
If you want to examine a different batch size, then change the batch size in the Input
layer:
nu = NuLayer(batch_dim=4)
print( residualValOfPDE(xt_f[1:5,:]))
[[-0.51205623]
[ 0. ]
[ 0. ]
[ 0. ]], shape=(4, 1), dtype=float32)