# Bayesian hacker chapter1_introduction imp use edward

EDIT: Oops, this is a re-post. See Bayesian hacker chapter 1 use edward.

@cavities

``````import pymc3 as pm
import theano.tensor as tt

with pm.Model() as model:
alpha = 1.0/count_data.mean()  # Recall count_data is the
# variable that holds our txt counts
lambda_1 = pm.Exponential("lambda_1", alpha)
lambda_2 = pm.Exponential("lambda_2", alpha)

tau = pm.DiscreteUniform("tau", lower=0, upper=n_count_data - 1)

with model:
idx = np.arange(n_count_data) # Index
lambda_ = pm.math.switch(tau >= idx, lambda_1, lambda_2)
observation = pm.Poisson("obs", lambda_, observed=count_data)
step = pm.Metropolis()
trace = pm.sample(10000, tune=5000,step=step)
``````

and use edward

``````sess=tf.Session()
alpha_f = 1.0/count_data.mean()

alpha = tf.Variable(alpha_f, name="alpha", dtype=tf.float32)

# init
lambda_1 = Exponential(alpha)
lambda_2 = Exponential(alpha)
tau = Uniform(low=0.0,high=float(n_count_data - 1))
idx = np.arange(n_count_data)
lambda_ = tf.where(tau>=idx,tf.ones(shape=[n_count_data,],dtype=tf.float32)*lambda_1,tf.ones(shape=[n_count_data,],dtype=tf.float32)*lambda_2)

# error
z = Poisson(lambda_,value=tf.Variable(tf.ones(n_count_data)))

# model
T = 5000  # number of posterior samples
qlambda_1 =  Empirical(params=tf.Variable(tf.zeros([T,n_count_data,1])))
qlambda_2 =  Empirical(params=tf.Variable(tf.zeros([T,n_count_data,1])))
# qlambda_  =  Empirical(params=tf.Variable(tf.zeros([T,n_count_data,1])))

qz = Empirical(params=tf.Variable(tf.random_normal([n_count_data,1])))
inference = ed.HMC({lambda_1:qlambda_1,lambda_2:qlambda_2},data={z:count_data})
inference.run()
``````
``````
TypeError                                 Traceback (most recent call last)
<ipython-input-134-fd551ffe571e> in <module>()
23
24 qz = Empirical(params=tf.Variable(tf.random_normal([n_count_data,1])))
---> 25 inference = ed.HMC({lambda_1:qlambda_1,lambda_2:qlambda_2},data={z:count_data})
26 inference.run()
27

c:\python35\lib\site-packages\edward\inferences\hmc.py in __init__(self, *args, **kwargs)
48     >>> inference = ed.HMC({z: qz}, data)
49     """
---> 50     super(HMC, self).__init__(*args, **kwargs)
51
52   def initialize(self, step_size=0.25, n_steps=2, *args, **kwargs):

c:\python35\lib\site-packages\edward\inferences\monte_carlo.py in __init__(self, latent_vars, data)
87                            "a scalar sample shape.")
88
---> 89     super(MonteCarlo, self).__init__(latent_vars, data)
90
91   def initialize(self, *args, **kwargs):

c:\python35\lib\site-packages\edward\inferences\inference.py in __init__(self, latent_vars, data)
70       data = {}
71
---> 72     check_latent_vars(latent_vars)
73     self.latent_vars = latent_vars
74

c:\python35\lib\site-packages\edward\util\random_variables.py in check_latent_vars(latent_vars)
74     elif not key.shape.is_compatible_with(value.shape):
75       raise TypeError("Key-value pair in latent_vars does not have same "
---> 76                       "shape: {}, {}".format(key.shape, value.shape))
77     elif key.dtype != value.dtype:
78       raise TypeError("Key-value pair in latent_vars does not have same "

TypeError: Key-value pair in latent_vars does not have same shape: (), (74, 1)

``````

ps:url

The error message says that youâ€™re matching latent variables with differing dimensions. For example, you wrote `qlambda_1` as an Empirical distribution with shape `[n_count_data, 1]` to try to approximate a scalar random variable `lambda_1`.