In the linear regression model y= x_coff*X+ sigma+N(0,sigma^2), pymc3 can make sigma to be inferred as in http://pymc-devs.github.io/pymc3/notebooks/GLM-linear.html#Bayesian-GLMs-in-PyMC3
However, in the tutorial of Edward, I found most of the standard deviation of dependent variable is set to be a constant 1. I have tried the code below to infer sigma together, but the estimated coefficients are not correct. Anything wron in my code?
import edward as ed
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import tensorflow as tf
from edward.models import Normal, Empirical,Chi2,Uniform
import time
def build_toy_dataset(N, w,b, noise_std=0.1):
D = len(w)
x = np.random.randn(N, D)
y = np.dot(x, w) +b+ np.random.normal(0, noise_std, size=N)
return x, y
N = 100 # number of data points
D = 3 # number of features
noise_std=0.2/np.sqrt(50)
w_true = np.random.randn(D)
b_true = np.random.randn(1)
X_train, y_train = build_toy_dataset(N, w_true,b_true,noise_std)
#X_test, y_test = build_toy_dataset(N, w_true,b_true)
X = tf.placeholder(tf.float32, [N, D])
w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
b = Normal(loc=tf.zeros(1), scale=tf.ones(1))
sigma=Chi2(df=2.5*tf.ones(1))
y = Normal(loc=ed.dot(X, w) + b, scale=sigma)
qw = Normal(loc=tf.Variable(tf.random_normal([D])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))
qb = Normal(loc=tf.Variable(tf.random_normal([1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
qsigma=Chi2(df=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
inference = ed.KLqp({w: qw, b: qb,sigma:qsigma}, data={X: X_train, y: y_train})
inference.run(n_samples=5, n_iter=4000)