Change ed.KLqp to ed.sgld,then it gives error.why?

from future import absolute_import
from future import division
from future import print_function

import edward as ed
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.cm as cm
import numpy as np
import six
import tensorflow as tf

from edward.models import (
Categorical, Dirichlet, Empirical, InverseGamma,
MultivariateNormalDiag, Normal, ParamMixture, Mixture)

plt.style.use(‘ggplot’)

pi = np.array([0.5, 0.5])
mus = [[1, 1], [-1, -1]]
stds = [[0.1, 0.1], [0.1, 0.1]]

def build_toy_dataset(N, pi=pi, mus=mus, stds=stds):
x = np.zeros((N, 2), dtype=np.float32)
for n in range(N):
k = np.argmax(np.random.multinomial(1, pi))
x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))
return x

N = 500 # number of data points
K = 2 # number of components
D = 2 # dimensionality of data
ed.set_seed(42)

x_train = build_toy_dataset(N)
x2_train = 5.0*x_train

plt.scatter(x_train[:, 0], x_train[:, 1])
plt.title(“Simulated dataset”)
plt.show()

The collapsed version marginalizes out the mixture assignments.

pi = Dirichlet(tf.ones(K))
mu = Normal(tf.zeros(D), tf.ones(D), sample_shape=K)
sigmasq = InverseGamma(tf.ones(D), tf.ones(D), sample_shape=K)
cat = Categorical(probs=pi, sample_shape=N)

components = [
MultivariateNormalDiag(mu[k], sigmasq[k], sample_shape=N)
for k in range(K)]
x = Mixture(cat=cat, components=components, sample_shape=N)
x2 = 5.0*x

KLqp

qmu = Normal(loc=tf.Variable(tf.random_normal([K,D])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([K,D]))))

qsigmasq = InverseGamma(
concentration=tf.nn.softplus(tf.Variable(tf.zeros([K,D]))),
rate=tf.nn.softplus(tf.Variable(tf.zeros([K,D]))))

Preformatted text# inference = ed.KLqp({mu: qmu, sigmasq: qsigmasq}, data={x2: x2_train})
inference = ed.SGLD({mu: qmu, sigmasq: qsigmasq}, data={x2: x2_train})

n_iter = 10000
n_print = 500
n_samples = 30

inference.initialize(n_iter=n_iter, n_print=n_print, n_samples=n_samples)
sess = ed.get_session()
init = tf.global_variables_initializer()
init.run()

learning_curve = []
for _ in range(inference.n_iter):
info_dict = inference.update()
if _%1000 == 0:
print(info_dict)
print(qmu.loc.eval())
print(qmu.scale.eval())
learning_curve.append(info_dict[‘loss’])

plt.semilogy(learning_curve)
plt.show()

TypeError: Posterior approximation must consist of only Empirical random variables.