Thank you for your reply, dustin. But where should I put the set seed statement? I have attached a sampel code below. If ed.set_seed(0) is used the same error araised: RuntimeError: Seeding is not supported after initializing part of the graph. Please move set_seed to the beginning of your code.

If I use tf.set_random_seed(0), the resutls of the two inferences are different.

```
import edward as ed
from edward.models import Normal
import tensorflow as tf
import numpy as np
from importlib import reload
##To generate sample data
def build_toy_dataset(N, w,b, noise_std=0.1):
np.random.seed(0)
D = len(w)
x = np.random.randn(N, D)
y = np.dot(x, w) +b #+ np.random.normal(0, noise_std, size=N)
print('x[0]',x[0])
print('y[0]',y[0])
return x, y
###end of function build_toy_dataset
##function to calibrate Bayesian linear regression
def BLR(N,D,X_train, y_train):
#ed.set_seed(0)
#tf.set_random_seed(0)
sess = tf.InteractiveSession()
with sess.as_default():
X = tf.placeholder(tf.float32, [N, D])
w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
b = Normal(loc=tf.zeros(1), scale=tf.ones(1))
y = Normal(loc=ed.dot(X, w) + b, scale=tf.ones(1))
qw = Normal(loc=tf.Variable(tf.random_normal([D])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))
qb = Normal(loc=tf.Variable(tf.random_normal([1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
inference = ed.KLqp({w: qw, b: qb}, data={X: X_train, y: y_train})
inference.run(n_samples=2, n_iter=1000)
return qw.mean().eval(),qb.mean().eval()
##end of BLR
##The main function
N = 100 # number of data points
D = 3 # number of features
noise_std=0.2/np.sqrt(50)
w_true=np.array([0.3,-0.2,0.1])
b_true=np.array([0.2])
X_train, y_train = build_toy_dataset(N, w_true,b_true,noise_std)
qw_list=[];qb_list=[];
for ic in range(2):
#tf.set_random_seed(0)
#ed.set_seed(0)
qw,qb=BLR(N,D,X_train, y_train)
qw_list.append(qw)
qb_list.append(qb)
for ic in range(2):
print(qw_list[ic])###I hope to get the same qw here
```