Neural Network link function

Hi everyone,

I’m seeking suggestion on improving a simple Bernoulli model that uses a two-layer neural network as the link function for the Bernoulli distribution. Initially, I put normal priors on the weight and bias terms in the first layer. For the second level, I used a sigmoid function to transform the outputs of the first layer to probabilities. It worked well. I’d like to do the same thing using the TensorFlow function “tf.layers.dense.” I’ve uploaded the code below. However, I’ve not had much success with the inference.

from future import absolute_import
from future import division
from future import print_function
import warnings warnings.filterwarnings(‘ignore’)
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Bernoulli, Normal

def sigmoid(x):
return 1. / (1. + np.exp(-x))

ed.set_seed(42)

n = 1000
z_obs = np.random.normal(loc=10.0, scale=1.0, size=n)
p_true = sigmoid(z_obs - 10.0)
x_obs = np.random.binomial(n=1, p=p_true).reshape((n, 1))

regularizer = tf.contrib.layers.l2_regularizer(scale=0.001)

def neural_network(z, w, b):
h = tf.matmul(z, w) + b
h = tf.sigmoid(h)
return h

def nn(z):
h = tf.layers.dense(z, 1, activation=None, kernel_regularizer=regularizer)
h = tf.layers.dense(h, 1, activation=tf.nn.sigmoid)
return h

MODEL

with tf.name_scope(“model”):
z = Normal(loc=10.0, scale=1.00, sample_shape=[n, 1], name=“z”)
p = nn(z)
x = Bernoulli(probs=p, name=“x”)

INFERENCE

with tf.variable_scope(“posterior”):
with tf.variable_scope(“qz”):
qz_variables = [tf.get_variable(“qz/loc”, [n, 1]), tf.get_variable(“qz/scale”, [n, 1])]
qz = Normal(loc=qz_variables[0], scale=tf.nn.softplus(qz_variables[1]))

inference = ed.KLqp({z: qz}, data={x: x_obs})
inference.initialize(n_samples=3)

sess = ed.get_session()
tf.global_variables_initializer().run()
for _ in range(inference.n_iter):
info_dict = inference.update()
inference.print_progress(info_dict)
t = info_dict[‘t’]
if t % 100 == 0:
phat = sess.run§

1000/1000 [100%] Elapsed: 7s | Loss: 692.635 1s | Loss: ETA: 1s | Los ETA: 0s | Loss: 692

In [6]: np.set_printoptions(2)
phat.mean(), np.percentile(phat, q=[0.0, 2.5, 50, 97.5, 100.0])

Out[6]: (0.48400226, array([0.48, 0.48, 0.48, 0.48, 0.48]))

In [7]: np.set_printoptions(2)
p_true.mean(), np.percentile(p_true, q=[0.0, 2.5, 50, 97.5, 100.0])

Out[7]: (0.5028831582094038, array([0.04, 0.14, 0.51, 0.87, 0.98]))

Best,
Mark

Hi all,

I’ve been able to fix the problem. However, I’d welcome suggestions on improving the inference. Here is the new code:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import warnings
warnings.filterwarnings('ignore')
import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Normal
def sigmoid(x): 
    return 1. / (1. + np.exp(-x))
ed.set_seed(42)

n = 1000
z_obs = np.random.normal(loc=10.0, scale=1.0, size=n)
p_true = sigmoid(z_obs - 10.0)
x_obs = np.random.binomial(n=1, p=p_true).reshape((n, 1))
# MODEL

kernel_init = tf.random_normal_initializer(mean=1.00, stddev=0.001)
bias_init = tf.random_normal_initializer(mean=10.0, stddev=0.001)

def neural_network(z):
    h = tf.layers.dense(z, 1, kernel_initializer=kernel_init, 
                         bias_initializer=bias_init,
                         trainable=True, activation=None)
    h = tf.nn.sigmoid(h)
    return h

with tf.name_scope("model"):
    z = Normal(loc=10.0, scale=1.00, sample_shape=[n, 1], name="z")
    p = neural_network(z)
    x = Bernoulli(probs=p, name="x")
# INFERENCE

with tf.variable_scope("posterior"):
    with tf.variable_scope("qz"):    
        qz_variables = [tf.get_variable("qz/loc", [n, 1]),
                                 tf.get_variable("qz/scale", [n, 1])]
        qz = Normal(loc=qz_variables[0], scale=tf.nn.softplus(qz_variables[1]))
           
    inference = ed.KLqp({z: qz}, data={x: x_obs})
    inference.initialize(n_samples=5, var_list=tf.trainable_variables())
 
    sess = ed.get_session()
    tf.global_variables_initializer().run()
    for _ in range(inference.n_iter):
        info_dict = inference.update()
        inference.print_progress(info_dict)
        t = info_dict['t']
        if t % 100 == 0:
            phat = sess.run(p)
1000/1000 [100%]  Elapsed: 9s | Loss: 687.535
np.set_printoptions(2)
phat.mean(), np.percentile(phat, q=[0.0, 2.5, 50, 97.5, 100.0])
(0.48167148, array([0.09, 0.19, 0.49, 0.78, 0.85]))
np.set_printoptions(2)
p_true.mean(), np.percentile(p_true, q=[0.0, 2.5, 50, 97.5, 100.0])
(0.5028831582094038, array([0.04, 0.14, 0.51, 0.87, 0.98]))