Sampling from the prior of a gaussian process

Hi!

I modified the Guassian process classification tutorial (http://edwardlib.org/tutorials/supervised-classification) to a simple Gaussian process regression. However, when trying to sample from the prior over y all samples seem to use the same underlying realization of f and just resample the noise - how do I properly sample from my GP?

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf

plt.style.use('ggplot')

from edward.models import MultivariateNormalFullCovariance, Normal
from edward.util import rbf

N      = 100
x_test = np.reshape(np.linspace(0, 10, N, dtype = 'float32'), (N, 1))
y_test = np.sin(x_test[:, 0] / 2) + np.random.normal(0, .1, size = N)

X = tf.placeholder(tf.float32, [N, 1])
f = MultivariateNormalFullCovariance(
      loc               = tf.zeros(N), 
      covariance_matrix = rbf(x_test, 
        lengthscale = 1.0, 
        variance = .01
      ) + 1e-6 * tf.eye(N)
)
y = Normal(
    loc = f,
    scale = .01
)

sess = ed.get_session()
tf.global_variables_initializer().run()

fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.set_title("Iteration: 0")
ax.plot(x_test, y_test, 'ks', alpha=0.5, label='(x, y)')
ax.plot(x_test, 
        y.sample(3).eval().T,
        'r', lw = 2, alpha = 0.5, label='prior sample')
ax.legend()
plt.show()