2017-08-24 14 views
0

保存されたテンソルフローグラフをディスクから復元し、辞書をモデルにフィードする方法をまだ把握しようとしています。私はmultiple sourcesを見ましたが、これをトラブルシューティングすることはできません。以下の一般的なMLPコード(最初のスニペット)は、ファイルをディスクに保存しますが、復元後(2番目のスニペット)、精度はなしの値を返します。これの理由は何ですか?Tensorflowモデルを復元しても結果が正しくない

# Import MINST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 
import tensorflow as tf 

# Parameters 
learning_rate = 0.001 
training_epochs = 15 
batch_size = 100 
display_step = 1 

# Network Parameters 
n_hidden_1 = 256 # 1st layer number of features 
n_hidden_2 = 256 # 2nd layer number of features 
n_input = 784 # MNIST data input (img shape: 28*28) 
n_classes = 10 # MNIST total classes (0-9 digits) 

with tf.name_scope('placeholders'): 
# tf Graph input 
    x = tf.placeholder("float", [None, n_input],name='x') 
    y = tf.placeholder("float", [None, n_classes],name='y') 

with tf.name_scope('Layer-1'): 
    NN_weights_1=tf.Variable(tf.random_normal([n_input, n_hidden_1],seed=1),name='NN_weights_1') 
    NN_biases_1=tf.Variable(tf.constant(0.0,shape=[n_hidden_1],name='Const'),name='NN_biases_1') 
    func=tf.add(tf.matmul(x, NN_weights_1,name='matmul'), NN_biases_1,name='Addition') 
    func_2=tf.nn.relu(func) 

with tf.name_scope('Layer-2'): 
    NN_weights_2=tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2],seed=2),name='NN_weights_2') 
    NN_biases_2=tf.Variable(tf.constant(0.0,shape=[n_hidden_2],name='Const'),name='NN_biases_2') 
    func_3=tf.add(tf.matmul(func_2, NN_weights_2,name='matmul'), NN_biases_2,name='Addition') 
    func_4=tf.nn.relu(func_3) 

with tf.name_scope('Output'): 
    NN_weights_3=tf.Variable(tf.random_normal([n_hidden_2, n_classes],seed=3),name='NN_weights_3') 
    NN_biases_3=tf.Variable(tf.constant(0.0,shape=[n_classes],name='Const'),name='NN_biases_3') 
    func_3=tf.add(tf.matmul(func_4, NN_weights_3,name='matmul'), NN_biases_3,name='Addition') 
    func_4=tf.nn.sigmoid(func_3) 

    # Define loss and optimizer 
with tf.name_scope('Operations_'): 
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=func_4, labels=y),name='cost') 
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 
    # Test model 
    correct_prediction = tf.equal(tf.argmax(func_4, 1), tf.argmax(y, 1),name='correct_prediction') 
    # Calculate accuracy 
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"),name='accuracy') 
    # Initializing the variables 
    init = tf.global_variables_initializer() 

# Launch the graph 
with tf.Session() as sess: 
    sess.run(init) 
    saver = tf.train.Saver() 

    # Training cycle 
    for epoch in range(training_epochs): 
     avg_cost = 0. 
     total_batch = int(mnist.train.num_examples/batch_size) 
     # Loop over all batches 
     for i in range(total_batch): 
      batch_x, batch_y = mnist.train.next_batch(batch_size) 
      # Run optimization op (backprop) and cost op (to get loss value) 
      _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, 
                  y: batch_y}) 
      # Compute average loss 
      avg_cost += c/total_batch 
     # Display logs per epoch step 
     if epoch % display_step == 0: 
      print (("Epoch:", '%04d' % (epoch+1), "cost="), \ 
       "{:.9f}".format(avg_cost)) 
    print ("Optimization Finished!") 
    print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) 
    saver.save(sess, 'my_test_model',global_step=1000) 

モデルを復元すると精度のための辞書を渡す:

import tensorflow as tf 

sess=tf.Session()  
#First let's load meta graph and restore weights 
saver = tf.train.import_meta_graph('my_test_model-1000.meta') 
saver.restore(sess,"my_test_model-1000") 
graph = tf.get_default_graph() 
accuracy=graph.get_operation_by_name("Operations_/accuracy") 
# Access saved Variables directly 
print(sess.run('Layer-1/NN_weights_1:0')) 
# This will print 2, which is the value of bias that we saved 

print ("Accuracy:", sess.run([accuracy],feed_dict={'placeholders/x:0': mnist.test.images, 'placeholders/y:0': mnist.test.labels})) 

答えて

0

変更、それに:

accuracy=graph.get_operation_by_name("Operations_/accuracy").outputs[0] 

TensorflowはSession.runの手段によって実行される操作対象の出力を破棄する。詳細な説明はこちらを参照してください。TensorFlow: eval restored graph

+0

本当にあなたの答えを参考にしていただきたいと思います。 – mamafoku

関連する問題