Test gan and implicitKLqp on BNN

the sources is in the link:
edward/tests/inferences/test_bayesian_nn.py

  def test_gan_inference(self):
    with self.test_session():
      N, D, W_1, W_2, W_3, b_1, b_2, X, y, X_train, y_train = self._test()

      with tf.variable_scope("Gen"):
        theta = tf.get_variable("theta", [1])
        y = tf.cast(y, tf.float32) * theta

      def discriminator(x):
        w = tf.get_variable("w", [1])
        return w * tf.cast(x, tf.float32)

      inference = ed.GANInference(
          data={y: tf.cast(y_train, tf.float32), X: X_train},
          discriminator=discriminator)
      inference.run(n_iter=1)

I didn’t get the gen and discriminator in this example.
and the implicitKLqp

  def test_normal_run(self):
    def ratio_estimator(data, local_vars, global_vars):
      """Use the optimal ratio estimator, r(z) = log p(z). We add a
      TensorFlow variable as the algorithm assumes that the function
      has parameters to optimize."""
      w = tf.get_variable("w", [])
      return z.log_prob(local_vars[z]) + w

    with self.test_session() as sess:
      z = Normal(loc=5.0, scale=1.0)

      qz = Normal(loc=tf.Variable(tf.random_normal([])),
                  scale=tf.nn.softplus(tf.Variable(tf.random_normal([]))))

      inference = ed.ImplicitKLqp({z: qz}, discriminator=ratio_estimator)
      inference.run(n_iter=200)

can someone please please elaborate how to apply ImplicitKLqp in the Bayesian NN case? I wanna know what these (data, local_vars, global_vars) means in the BNN case.

Thanks very much!

well, apology for my recklessness, it seems that the example of ImplicitKLqp is right here all along.

2 Likes