InvalidArgumentError: assertion failed: [predictions must be in [0, 1]]

dear brothers

i am working on tensorflow/slim package there are training file and evaluation file , i have done the training successfully but when i run evaluation file get error .

the error from issue in terminal file
totalMemory: 3.95GiB freeMemory: 3.71GiB
2018-03-03 17:06:40.438218: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1312] Adding visible gpu devices: 0
2018-03-03 17:06:41.077317: I tensorflow/core/common_runtime/gpu/gpu_device.cc:993] Creating TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 3441 MB memory) -> physical GPU (device: 0, name: GeForce GTX 950M, pci bus id: 0000:01:00.0, compute capability: 5.0)
INFO:tensorflow:Restoring parameters from /home/dnn/Desktop/Project/BC_malignant_ckpt/inception_v1/model.ckpt-300
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
Traceback (most recent call last):
File “/home/dnn/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py”, line 1361, in _do_call
return fn(*args)
File “/home/dnn/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py”, line 1340, in _run_fn
target_list, status, run_metadata)
File “/home/dnn/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py”, line 516, in exit
c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: assertion failed: [predictions must be in [0, 1]] [Condition x <= y did not hold element-wise:x (ArgMax:0) = ] [0 2 0…] [y (auc/Cast_1:0) = ] [1]
[[Node: auc/assert_less_equal/Assert/AssertGuard/Assert = Assert[T=[DT_STRING, DT_STRING, DT_INT64, DT_STRING, DT_INT64], summarize=3, _device="/job:localhost/replica:0/task:0/device:CPU:0"](auc/assert_less_equal/Assert/AssertGuard/Assert/Switch/_1777, auc/assert_less_equal/Assert/AssertGuard/Assert/data_0, auc/assert_less_equal/Assert/AssertGuard/Assert/data_1, auc/assert_less_equal/Assert/AssertGuard/Assert/Switch_1/_1779, auc/assert_less_equal/Assert/AssertGuard/Assert/data_3, auc/assert_less_equal/Assert/AssertGuard/Assert/Switch_2/_1781)]]

here is snap code of the eval. file

from future import absolute_import
from future import division
from future import print_function

import math
import tensorflow as tf

from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory

slim = tf.contrib.slim

tf.app.flags.DEFINE_integer(
‘batch_size’, 100, ‘The number of samples in each batch.’)

tf.app.flags.DEFINE_integer(
‘max_num_batches’, None,
‘Max number of batches to evaluate by default use all.’)

tf.app.flags.DEFINE_string(
‘master’, ‘’, ‘The address of the TensorFlow master to use.’)

tf.app.flags.DEFINE_string(
‘checkpoint_path’, ‘/tmp/tfmodel/’,
'The directory where the model was written to or an absolute path to a '
‘checkpoint file.’)

tf.app.flags.DEFINE_string(
‘eval_dir’, ‘/tmp/tfmodel/’, ‘Directory where the results are saved to.’)

tf.app.flags.DEFINE_integer(
‘num_preprocessing_threads’, 4,
‘The number of threads used to create the batches.’)

tf.app.flags.DEFINE_string(
‘dataset_name’, ‘imagenet’, ‘The name of the dataset to load.’)

tf.app.flags.DEFINE_string(
‘dataset_split_name’, ‘test’, ‘The name of the train/test split.’)

tf.app.flags.DEFINE_string(
‘dataset_dir’, None, ‘The directory where the dataset files are stored.’)

tf.app.flags.DEFINE_integer(
‘labels_offset’, 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
‘class for the ImageNet dataset.’)

tf.app.flags.DEFINE_string(
‘model_name’, ‘inception_v3’, ‘The name of the architecture to evaluate.’)

tf.app.flags.DEFINE_string(
‘preprocessing_name’, None, 'The name of the preprocessing to use. If left '
‘as None, then the model_name flag is used.’)

tf.app.flags.DEFINE_float(
‘moving_average_decay’, None,
‘The decay to use for the moving average.’
‘If left as None, then moving averages are not used.’)

tf.app.flags.DEFINE_integer(
‘eval_image_size’, None, ‘Eval image size’)

FLAGS = tf.app.flags.FLAGS

def main(_):
if not FLAGS.dataset_dir:
raise ValueError(‘You must supply the dataset directory with --dataset_dir’)

tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
tf_global_step = slim.get_or_create_global_step()

######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
    FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)

####################
# Select the model #
####################
network_fn = nets_factory.get_network_fn(
    FLAGS.model_name,
    num_classes=(dataset.num_classes - FLAGS.labels_offset),
    is_training=False)

##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
    dataset,
    shuffle=False,
    common_queue_capacity=2 * FLAGS.batch_size,
    common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset

#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
    preprocessing_name,
    is_training=False)

eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size

image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

images, labels = tf.train.batch(
    [image, label],
    batch_size=FLAGS.batch_size,
    num_threads=FLAGS.num_preprocessing_threads,
    capacity=5 * FLAGS.batch_size)

####################
# Define the model #
####################
logits, _ = network_fn(images)

if FLAGS.moving_average_decay:
  variable_averages = tf.train.ExponentialMovingAverage(
      FLAGS.moving_average_decay, tf_global_step)
  variables_to_restore = variable_averages.variables_to_restore(
      slim.get_model_variables())
  variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
  variables_to_restore = slim.get_variables_to_restore()

predictions = tf.argmax(logits, 1)
labels = tf.squeeze(labels)

# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
    'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
    'Recall_5': slim.metrics.streaming_recall_at_k(
        logits, labels, 5),
    'TruePositives': slim.metrics.streaming_true_positives(predictions, labels),
    'TrueNegatives': slim.metrics.streaming_true_negatives(predictions, labels),
    'FalsePositives': slim.metrics.streaming_false_positives(predictions, labels),
    'FalseNegatives': slim.metrics.streaming_false_negatives(predictions, labels),
    'Precision': slim.metrics.streaming_precision(predictions, labels),
    'Auc': slim.metrics.streaming_auc(predictions, labels),
})

c_matrix = slim.metrics.confusion_matrix(predictions, labels)

# These operations needed for image summary
c_matrix = tf.cast(c_matrix,tf.float32)
c_matrix = tf.expand_dims(c_matrix, 2)
c_matrix = tf.expand_dims(c_matrix, 0)


op = tf.summary.image("confusion matrix", c_matrix, collections=[])
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

# Print the summaries to screen.###########################in line 161 I replace .iteritems by itemes
for name, value in names_to_values.items():
  summary_name = 'eval/%s' % name
  op = tf.summary.scalar(summary_name, value, collections=[])
  op = tf.Print(op, [value], summary_name)
  tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
  num_batches = FLAGS.max_num_batches
else:
  # This ensures that we make a single pass over all of the data.
  num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
  checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
  checkpoint_path = FLAGS.checkpoint_path

tf.logging.info('Evaluating %s' % checkpoint_path)


############################################
actual = labels
predicted=predictions 

# Count true positives, true negatives, false positives and false negatives.
tp = tf.count_nonzero(predicted * actual)
tn = tf.count_nonzero((predicted - 1) * (actual - 1))
fp = tf.count_nonzero(predicted * (actual - 1))
fn = tf.count_nonzero((predicted - 1) * actual)

# Calculate accuracy, precision, recall and F1 score.
accuracy = (tp + tn) / (tp + fp + fn + tn)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fmeasure = (2 * precision * recall) / (precision + recall)

# Add metrics to TensorBoard.
tf.summary.scalar('Accuracy', accuracy)
tf.summary.scalar('Precision', precision)
tf.summary.scalar('Recall', recall)
tf.summary.scalar('f-measure', fmeasure)

  
tpr = tp/(tp + fn)
fpr = fp/(tp + fn)


f1_score = (2 * (precision * recall)) / (precision + recall)
tf.summary.scalar('tpr', tpr)
tf.summary.scalar('fpr', fpr)
tf.summary.scalar('tp', tp)
tf.summary.scalar('fp', fp)
tf.summary.scalar('tn', tn)
tf.summary.scalar('fn', fn)

confusion=tf.contrib.metrics.confusion_matrix(predictions, labels)
tf.summary.tensor_summary('con1', c_matrix)
tf.summary.tensor_summary('con2', confusion)

#####################################################
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=checkpoint_path,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=list(names_to_updates.values()),
variables_to_restore=variables_to_restore)