Thanks for your comment freedom521jin.
I will describe my solution and would welcome any comments that might correct or improve my code.
I define some global variables for a Bayesian Neural Network with 14 hidden layers:
#create session
sess = ed.get_session()
#define the number of samples for training and prediction
nSamp = 10000
#define the nuber of hidden layers
hL = 14
I then build and train the BNN using data:
tf.app.flags.DEFINE_string(‘f’, ‘’, ‘kernel’)
D=inputVars.shape[1] #11 #this is the No. of features
N=inputVars.shape[0]
tf.flags.DEFINE_integer(“N”, default=N, help=“Number of data points.”)
tf.flags.DEFINE_integer(“D”, default=D, help=“Number of features.”)
FLAGS = tf.flags.FLAGS
def neural_network(x):
h = tf.tanh(tf.matmul(x, W0) + B0)
h = tf.matmul(h, W1) + B1
return tf.reshape(h, [-1])
w0=hL
w1=1
b0=w0
b1=w1
W0 = Normal(loc=tf.zeros([D, w0]), scale=tf.ones([D, w0]))
W1 = Normal(loc=tf.zeros([w0, w1]), scale=tf.ones([w0, w1]))
B0 = Normal(loc=tf.zeros(b0), scale=tf.ones(b0))
B1 = Normal(loc=tf.zeros(b1), scale=tf.ones(b1))
x_data = tf.placeholder(shape=[None, D], dtype=tf.float32, name=“x_data”)
y = Normal(loc=neural_network(x_data), scale=0.1 * tf.ones(FLAGS.N), name=“y”)
output = Normal(loc=neural_network(x_data), scale=0.1 * tf.ones(1)) #w0))
q_W1 = Normal(loc=tf.Variable(tf.zeros([w0, w1])), name=(“q_W1”),
scale=tf.nn.softplus(tf.Variable(tf.zeros([w0, w1]))))
q_b1 = Normal(loc=tf.Variable(tf.zeros(b1)), name=(“q_b1”),
scale=tf.nn.softplus(tf.Variable(tf.zeros(b1))))
q_W0 = Normal(loc=tf.Variable(tf.zeros([D, w0])), name=(“q_W0”),
scale=tf.nn.softplus(tf.Variable(tf.zeros([D, w0]))))
q_b0 = Normal(loc=tf.Variable(tf.zeros(b0)), name=(“q_b0”),
scale=tf.nn.softplus(tf.Variable(tf.zeros(b0))))
inference = ed.KLqp({W0: q_W0, B0: q_b0,
W1: q_W1, B1: q_b1}, data={x_data: inputVars, y: yld})
inference.run(n_iter=nSamp)#,logdir=logDir)
I then print the weights and biases to be able to check that any later reloading has occurred correctly.
vars = tf.trainable_variables()
print(vars)
for vr in vars:
print(vr.name)
print(sess.run([v for v in tf.trainable_variables() if v.name == vr.name]))
I then saved the BNN using the saver method:
saver = tf.train.Saver()
save_path = saver.save(sess, ‘pathway to …/fileName.ckpt’)
To reload in a new file, I begin by creating thj global variables:
#create session
sess = ed.get_session()
#define the number of hidden layers
hL = 14
#define the number of samples for prediction
nSamp=10000
I can then define a new BNN and load the saved parameters into it:
w1x = tf.placeholder(“float”, name=“w1x”)
w2x = tf.placeholder(“float”, name=“w2x”)
b1x = tf.placeholder(“float”, name=“b1x”)
b2x = tf.placeholder(“float”, name=“b2x”)
fname=“pathway/filename.ckpt”
loader = tf.train.import_meta_graph(fname+’.meta’)
loader.restore(sess,fname)
graph = tf.get_default_graph()
#create empty BNN
w0=hL
w1=1
b0=w0
b1=w1
D=inputVars.shape[1]
#a place holder for input data …
x_data = tf.placeholder(shape=[None, D], dtype=tf.float32, name=“x_data”)
#x_data_2 = tf.placeholder(shape=[None, D], dtype=tf.float32, name=“x_data_2”)
W0 = Normal(loc=tf.zeros([D, w0]), scale=tf.ones([D, w0]))
W1 = Normal(loc=tf.zeros([w0, w1]), scale=tf.ones([w0, w1]))
B0 = Normal(loc=tf.zeros(b0), scale=tf.ones(b0))
B1 = Normal(loc=tf.zeros(b1), scale=tf.ones(b1))
def neural_network(x):
h = tf.tanh(tf.matmul(x, W0) + B0)
h = tf.matmul(h, W1) + B1
return tf.reshape(h, [-1])
Next I populate and prepare the BNN:
output = Normal(loc=neural_network(x_data), scale=0.1 * tf.ones(1))
#populate with reloaded data
q_W1=(Normal(loc=sess.run(‘q_W1/loc:0’),
scale=sess.run(‘q_W1/scale:0’)))
q_b1 = Normal(loc=graph.get_tensor_by_name(“q_b1/loc:0”),
scale=graph.get_tensor_by_name(“q_b1/scale:0”))
q_W0=(Normal(loc=sess.run(‘q_W0/loc:0’),
scale=sess.run(‘q_W0/scale:0’)))
q_b0=(Normal(loc=sess.run(‘q_b0/loc:0’),
scale=sess.run(‘q_b0/scale:0’)))
#create a copy for use
y_post=ed.copy(output, {W0: q_W0, B0: q_b0, W1: q_W1, B1: q_b1 })
I can re-use the trainable_variables method to check that the weights and biases are correct:
vars = tf.trainable_variables()
print(vars)
for vr in vars:
print(vr.name)
print(sess.run([v for v in tf.trainable_variables() if v.name == vr.name]))
I hope that this description is useful and would welcome any improvements.