Hi everyone!
I’m trying to estimate the demand rate of a product with inventory stock-out. I started with a simulation of the data and then tried to estimate the latent parameter lambda using variational inference.
The estimated posterior q_lambda however doesn’t seem to capture the lambda value of 1.5
I’ve gotten variational inference to work for the case where inventory is always available (i.e. simply estimating the lambda of Poisson distributed data) but I’m wondering why it breaks down when using a composition of variables.
Code I used is shown below. Thanks!
import tensorflow as tf
from edward.models import Normal,HalfNormal, Beta, Poisson, Uniform
import edward as ed
import numpy as np
import matplotlib.pyplot as plt
# DATA SIMULATION
# I_train simulates inventory availability (1 if product is available)
I_train = np.array([np.random.choice((1,0), p=(.8,.2)) for i in range(3000)]).reshape(-1,1)
# T is the number of time intervals
T = I_train.shape[0]
# S_train simulates number of purchases of a product per time interval
S_train = np.multiply(np.random.poisson(lam=1.5, size=(T,1)),I_train)
#MODEL
# this is the latent variable we wish to estimate
var_lambda = Uniform(tf.scalar_mul(0,tf.ones([1,1])),tf.scalar_mul(10,tf.ones([1,1])))
I = tf.placeholder(tf.float32,[T,1])
S = tf.multiply(Poisson(tf.tile(tf.transpose(var_lambda),[T,1])),I)
#INFERENCE
# using variational inference
q_lambda_scale = tf.nn.softplus(tf.Variable(tf.random_normal([1,1])))
q_lambda_loc = tf.Variable(tf.random_normal([1,1]))
q_lambda = Normal(loc = q_lambda_loc, scale = q_lambda_scale)
inference = ed.KLqp({var_lambda:q_lambda}, data={I: I_train, S: S_train})
inference.run(n_samples=1, n_iter=5000)
print(q_lambda.mean().eval())
x_range = tf.range(-10,30,.1)
sess = ed.get_session()
plt.plot(*sess.run([x_range, tf.transpose(var_lambda.prob(x_range))]), color="green")
plt.plot(*sess.run([x_range, tf.transpose(q_lambda.prob(x_range))]), color="blue")
plt.axvline(x = 1.5, color = "red")
plt.show()