如何正确地从模型中获得重量和偏差到堆叠自动编码器?

问题描述:

https://github.com/cmgreen210/TensorFlowDeepAutoencoderhttps://github.com/cmgreen210/TensorFlowDeepAutoencoder 我试图保存和恢复模型后,微调步骤我试图恢复模型,然后从模型获取变量,它给了我这个错误,ValueError:变量autoencoder_variables /权重1不存在,或者是不是用tf.get_variable()创建的。你是否想在VarScope中设置重用=无?如果我做的再利用成为虚假它创建的权重a新的变量和偏置如何正确地从模型中获得重量和偏差到堆叠自动编码器?

这里是我的代码,以恢复模式。

def do_eval(sess, eval_correct,images_placeholder,labels_placeholder, 
      data_set): 
    true_count = 0 # Counts the number of correct predictions. 
    steps_per_epoch = data_set.num_examples // FLAGS.batch_size 
    num_examples = steps_per_epoch * FLAGS.batch_size 
    for step in range(steps_per_epoch): 
    feed_dict = fill_feed_dict(data_set, 
      images_placeholder, 
      labels_placeholder) 
    true_count += sess.run(eval_correct, feed_dict=feed_dict) 
    precision = true_count/num_examples 
    print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' % 
      (num_examples, true_count, precision)) 

def evaluation(logits, labels): 
    return tf.reduce_sum(tf.cast(correct, tf.int32)) 

def test_nets(self): 
    data = read_data_sets(FLAGS.data_dir) 
    ckpt = tf.train.get_checkpoint_state("model_sps_2017-08- 
      29_11:45:25") 

sess = tf.InteractiveSession() 
saver = tf.train.import_meta_graph('model_sps_2017-08-29_11:45:25/model.meta') 
saver.restore(sess, ckpt.model_checkpoint_path) 

with sess.as_default(): 
    ae_shape = [784, 2000, 2000, 2000, 10] 
    ae = AutoEncoder(ae_shape, sess) 

    input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, 
      FLAGS.image_pixels),name='input_pl') 
    sup_net = ae.supervised_net(input_pl) 
    data = read_data_sets(FLAGS.data_dir) 
    labels_placeholder = tf.placeholder(tf.int32, 
      shape=FLAGS.batch_size, 
      name='target_pl') 
    eval_correct = evaluation(sup_net, labels_placeholder) 
    do_eval(sess, 
     eval_correct, 
     input_pl, 
     labels_placeholder, 
     data.test) 

这是我的代码如何培养和保存监督模式

def main_supervised(ae): 

    with ae.session.graph.as_default(): 
    saver = tf.train.Saver() 
    sess = ae.session 
    input_pl = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, 
               FLAGS.image_pixels), 
           name='input_pl') 
    logits = ae.supervised_net(input_pl) 

    data = read_data_sets(FLAGS.data_dir) 
    num_train = data.train.num_examples 

    labels_placeholder = tf.placeholder(tf.int32, 
             shape=FLAGS.batch_size, 
             name='target_pl') 

    loss = loss_supervised(logits, labels_placeholder) 
    train_op, global_step = training(loss, FLAGS.supervised_learning_rate) 
    eval_correct = evaluation(logits, labels_placeholder) 

    hist_summaries = [ae['biases{0}'.format(i + 1)] 
         for i in range(ae.num_hidden_layers + 1)] 
    hist_summaries.extend([ae['weights{0}'.format(i + 1)] 
          for i in range(ae.num_hidden_layers + 1)]) 

    hist_summaries = [tf.summary.histogram(v.op.name + "_fine_tuning", v) 
         for v in hist_summaries] 
    summary_op = tf.summary.merge(hist_summaries) 

    summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph_def) 
    # tf.train.SummaryWriter(pjoin(FLAGS.summary_dir, 
    #            'fine_tuning'), 
    #           graph_def=sess.graph_def, 
    #           flush_secs=FLAGS.flush_secs) 

    vars_to_init = ae.get_variables_to_init(ae.num_hidden_layers + 1) 
    vars_to_init.append(global_step) 
    #sess.run(tf.initialize_variables(vars_to_init)) 
    init = tf.initialize_all_variables() 
    sess.run(init) 
    steps = (num_train//FLAGS.batch_size) 
    for k in range(1): 
     for step in range(1): 
     start_time = time.time() 

     feed_dict = fill_feed_dict(data.train, 
            input_pl, 
            labels_placeholder) 

     _, loss_value = sess.run([train_op, loss], 
           feed_dict=feed_dict) 

     duration = time.time() - start_time 

     # Write the summaries and print an overview fairly often. 
     if step % 1 == 0: 
      # Print status to stdout. 
      print('Step %d/%d: loss = %.2f (%.3f sec)' % (step, steps,loss_value, duration)) 
      # Update the events file. 

      summary_str = sess.run(summary_op, feed_dict=feed_dict) 
      summary_writer.add_summary(summary_str, step) 
      # summary_img_str = sess.run(
      #  tf.summary.image("training_images", 
      #      tf.reshape(input_pl, 
      #         (FLAGS.batch_size, 
      #         FLAGS.image_size, 
      #         FLAGS.image_size, 1)), 
      #      max_outputs=10), 
      #  feed_dict=feed_dict 
      #) 
      # summary_writer.add_summary(summary_img_str) 

     if (step + 1) % 1000 == 0 or (step + 1) == steps: 
      train_sum = do_eval_summary("training_error", 
             sess, 
             eval_correct, 
             input_pl, 
             labels_placeholder, 
             data.train) 

      val_sum = do_eval_summary("validation_error", 
            sess, 
            eval_correct, 
            input_pl, 
            labels_placeholder, 
            data.validation) 

      test_sum = do_eval_summary("test_error", 
            sess, 
            eval_correct, 
            input_pl, 
            labels_placeholder, 
            data.test) 

      summary_writer.add_summary(train_sum, step) 
      summary_writer.add_summary(val_sum, step) 
      summary_writer.add_summary(test_sum, step) 

    folder = "model_sps_"+str(strftime("%Y-%m-%d_%H:%M:%S", gmtime())) 
    os.mkdir(folder) 
    folder += "/model" 
    saver.save(sess, folder) 
    do_eval(sess, 
     eval_correct, 
     input_pl, 
     labels_placeholder, 
     data.test) 

这个我怎么设置变量,并通过一个关于autoencoder_variables范围还原一个

def _restore_variables(self): 
    #print(tf.get_collection(tf.GraphKeys.VARIABLES, scope='autoencoder_variables')) 
    # v=tf.get_variable("autoencoder_variables/weights1", shape=(784, 2000)) 

    # print(v) 
    with tf.variable_scope("autoencoder_variables",reuse=True) as scope1: 
     #print(tf.get_collection(tf.GraphKeys.VARIABLES, scope="autoencoder_variables")) 
     #print(scope) 
     #tf.Variable 'autoencoder_variables/weights1:0' shape=(784, 2000) 


     for i in range(self.__num_hidden_layers + 1): 
     # Train weights 
     name_w = self._weights_str.format(i + 1) 
     w_shape = (self.__shape[i], self.__shape[i + 1]) 

     self[name_w] = tf.get_variable(name_w,w_shape,trainable=False) 

     # Train biases 
     name_b = self._biases_str.format(i + 1) 
     b_shape = (self.__shape[i + 1],) 
     self[name_b] = tf.get_variable(name_b,b_shape) 

     if i < self.__num_hidden_layers: 
      # Hidden layer fixed weights (after pretraining before fine tuning) 
      self[name_w + "_fixed"] = tf.get_variable(name_w+ "_fixed",w_shape) 

      # Hidden layer fixed biases 
      self[name_b + "_fixed"] = tf.get_variable(name_b+ "_fixed",b_shape) 
      # Pretraining output training biases 
      name_b_out = self._biases_str.format(i + 1) + "_out" 
      b_shape = (self.__shape[i],) 
      self[name_b_out] = tf.get_variable(name_b_out,b_shape) 

回溯错误:

File "run.py", line 47, in <module> 
    main() 
    File "run.py", line 40, in main 
    test.test_nets_1() 
    File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder_test.py", line 55, in test_nets_1 
    ae = AutoEncoder(ae_shape, sess) 
    File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder.py", line 41, in __init__ 
    self._restore_variables() 
    File "C:\Users\simjs\Downloads\TensorFlowDeepAutoencoder-master\TensorFlowDeepAutoencoder-master\code\ae\autoencoder.py", line 98, in _restore_variables 
    self[name_w] = tf.get_variable(name_w,w_shape) 
    File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1065, in get_variable 
    use_resource=use_resource, custom_getter=custom_getter) 
    File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 962, in get_variable 
    use_resource=use_resource, custom_getter=custom_getter) 
    File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 367, in get_variable 
    validate_shape=validate_shape, use_resource=use_resource) 
    File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 352, in _true_getter 
    use_resource=use_resource) 
    File "C:\Users\simjs\Anaconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 682, in _get_single_variable 
    "VarScope?" % name) 
ValueError: Variable autoencoder_variables/weights1 does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope? 

  1. 检查哪些变量没有被恢复后再行书面方式print(sess.run(tf.report_uninitialized_variables()))初始化。

  2. 尝试也sess.run(tf.global_variables_initializer())恢复和分析结果之前。

+0

我做到了这一点,在恢复会话期间,我发现在autoencoder类构造函数中,它重新调用self._setup_variables()函数时的权重和偏移变量。我认为训练模型中的权重和偏差变量应该被启动并且与自动编码器的关键值相关联。你有这个想法吗? ae [weigth_n],weight_n

+0

,第68行,在__getitem__中 返回self .__变量[项目] KeyError异常:“weights1” –

+0

您可以更新您的问题,包括您的评论,不管你想的要使用相同的变量'_setup_variables',然后再调用它,你可以用'用tf.name_scope(“autoencoder_variables”?重用= TRUE)'没有新的变量将被创建和旧的将被重用。 – prometeu