NaNs produced in output

I’m trying to implement a type of factor model, which is a variation on the one proposed here: https://arxiv.org/abs/1310.4195

I’m having trouble overcoming a problem which leads to nans in the output

A much simplified version of the model I’m working on is given below as stand-alone code which should run independently in a jupyter notebook. Thanks for reading this!

import numpy as np
import scipy.stats as sps
import tensorflow as tf
import edward as ed
import matplotlib.pylab as plt
n=50
p=5
r=2
alphaTau=1.1
betaTau=1
Mdata=np.random.normal(0,p**(-1/2),[r,p])
tauData=sps.invgamma.rvs(alphaTau,size=r)
fData=np.random.normal(0,tauData**(1/2),[n,r])
EXdata=np.matmul(fData,Mdata)
Xdata=np.apply_along_axis(np.random.multivariate_normal,1,EXdata,np.eye(p))
from edward.models import Normal, MultivariateNormalFullCovariance, InverseGamma
tau2=InverseGamma(concentration=alphaTau*tf.ones(r),rate=betaTau*tf.ones(r))
f=Normal(loc=tf.zeros([n,r]),scale=tf.sqrt(tau2))
M=Normal(loc=tf.zeros([r,p]),scale=tf.ones([r,p])*(p**(-1/2)))
EX=tf.matmul(f,M)
X=MultivariateNormalFullCovariance(loc=EX,covariance_matrix=tf.diag(tf.ones(p)))
qM=Normal(loc=tf.get_variable("qM/loc",[r,p]),scale=tf.get_variable("qM/scale",[r,p]))
qTau2=InverseGamma(concentration=tf.get_variable("qTau2/alpha",[r]),rate=tf.get_variable("qTau2/beta",[r]))
inference=ed.KLqp({M:qM,tau2:qTau2},data={X:Xdata})
inference.initialize(n_iter=1000, n_print=100, n_samples=30)
sess=ed.get_session()
init=tf.global_variables_initializer()
init.run()
learning_curve=[]
for _ in range(inference.n_iter):
    info_dict=inference.update()
    if _%50==0:
        print(info_dict)
    learning_curve.append(info_dict['loss'])
plt.semilogy(learning_curve)