2017-05-27 15 views
0

テンソルフローによって完全に接続されたニュートラルネットワークを構築したいと思います。テンソルフローを使用して完全に接続されたニュートラルネットワークを構築することを学ぶとエラーが発生しました

import tensorflow as tf 
from sklearn.metrics import confusion_matrix 
import numpy as np 

import load 

def get_chunk(samples, labels, chunkSize): 
    if len(samples) != len(labels): 
     raise Exception('dataset is wrong!!!!!') 

    print(np.shape(labels)) 
    stepStart = 0 
    i = 0 
    while stepStart < len(samples): 
     stepEnd = stepStart + chunkSize 
     if stepEnd < len(samples): 
      print('==================this is the shape of placeholder======================') 
      print(np.shape(labels[stepStart: stepEnd])) 
      print('------------------------------------------------------------------------') 
      yield i, samples[stepStart:stepEnd], labels[stepStart: stepStart] 
      print('========================================================================') 
      i += 1 
     stepStart = stepEnd 

class Network(): 
    def __init__(self, num_hidden, batch_size): 
     self.batch_size = batch_size 
     self.test_batch_size = 500 
     self.num_hidden = num_hidden 

     self.image_size = load.image_size 
     self.num_channels = 1 

     self.graph = tf.Graph() 
     self.tf_train_samples = None 
     self.tf_train_labels = None 
     self.tf_test_samples = None 
     self.tf_test_labels = None 

    def define_graph(self): 
     with self.graph.as_default(): 
      self.tf_train_samples = tf.placeholder(tf.float32, shape=(self.batch_size, image_size, image_size, num_channels) ) 
      #self.tf_train_samples = tf.placeholder(tf.float32, shape=(self.batch_size, image_size, image_size, num_channels)) 
      self.tf_train_labels = tf.placeholder(tf.float32, shape=(self.batch_size, num_labels) ) 
      self.tf_test_samples = tf.placeholder(tf.float32, shape=(self.test_batch_size, image_size, image_size, num_channels) ) 

      fc1_weights = tf.Variable(tf.truncated_normal([image_size*image_size, self.num_hidden], stddev=0.1) ) 
      fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]) ) 

      fc2_weights = tf.Variable(tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1) ) 
      fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]) ) 

      def model(data): 
       shape = data.get_shape().as_list() 
       print('this is model() ============================================') 
       print(data.get_shape(), shape) 
       reshape = tf.reshape(data, [shape[0], shape[1]*shape[2]*shape[3]]) 
       print(reshape.get_shape(), fc1_weights.get_shape(), fc1_biases.get_shape()) 
       print('model ended ================================================') 
       hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases ) 

       return tf.matmul(hidden, fc2_weights) + fc2_biases 

      logits = model(self.tf_train_samples) 
      self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.tf_train_labels)) 
      self.optimizer = tf.train.GradientDescentOptimizer(0.0001).minimize(self.loss) 

      self.train_prediction = tf.nn.softmax(logits) 
      self.test_prediction = tf.nn.softmax(model(self.tf_test_samples) ) 

    def run(self): 
     self.session = tf.Session(graph=self.graph) 
     with self.session as session: 
      tf.initialize_all_variables().run() 
      #tf.global_variables_initializer().run() 

      print('start Training') 
      for i, samples, labels in get_chunk(train_samples, train_labels, chunkSize=self.batch_size ): 
       print('this is the start of get placeholder') 
       _, l, predictions = session.run(
         [self.optimizer, self.loss, self.train_prediction], 
         feed_dict={ self.tf_train_samples:samples, self.tf_train_labels:labels}  
       ) 
       accuracy, _ = self.accuracy(predictions, labels) 
       if i % 50 == 0: 
        print('accuracy:'+str(accuracy)) 

    def accuracy(self, predictions, labels, need_confusion_matrix = False): 
     #pass 
     _predictions = np.argmax(predictions, 1) 
     _labels = np.argmax(labels, 1) 
     cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None 

     accuracy = (100.0*np.sum(_predictions == labels)/print.shape[0]) 
     return accuracy, cm 

if __name__ == '__main__': 
    train_samples, train_labels = load._train_samples, load._train_labels 
    test_samples, test_labels = load._test_samples, load._test_labels 

    print('Training set', train_samples.shape, train_labels.shape) 
    print(' Test set', test_samples.shape, test_labels.shape) 

    image_size = load.image_size 
    num_labels = load.num_labels 
    num_channels = 1#load.channels 
    net = Network(num_hidden=128, batch_size=100) 
    net.define_graph() 
    net.run() 

と私は、コードを実行すると、それはこのようなエラーを与える:それは私が間違っているプレースホルダを使用する意味

[email protected]:~/code/number_test$ python bp.py 
Training set (73257, 32, 32, 1) (73257, 10) 
    Test set (26032, 32, 32, 1) (26032, 10) 
this is model() ============================================ 
(100, 32, 32, 1) [100, 32, 32, 1] 
(100, 1024) (1024, 128) (128,) 
model ended ================================================ 
this is model() ============================================ 
(500, 32, 32, 1) [500, 32, 32, 1] 
(500, 1024) (1024, 128) (128,) 
model ended ================================================ 
2017-05-27 13:48:07.343556: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 
2017-05-27 13:48:07.343640: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 
2017-05-27 13:48:07.343653: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 
2017-05-27 13:48:07.343661: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations. 

2017-05-27 13:48:07.343668: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations. 
WARNING:tensorflow:From bp.py:75: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02. 
Instructions for updating: 
Use `tf.global_variables_initializer` instead. 
start Training 
(73257, 10) 
==================this is the shape of placeholder====================== 
(100, 10) 
------------------------------------------------------------------------ 
this is the start of get placeholder 
Traceback (most recent call last): 
    File "bp.py", line 110, in <module> 
    net.run() 
    File "bp.py", line 83, in run 
    feed_dict={ self.tf_train_samples:samples, self.tf_train_labels:labels}  
    File "/home/panda/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 778, in run 
    run_metadata_ptr) 
    File "/home/panda/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 961, in _run 
    % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape()))) 
ValueError: Cannot feed value of shape (0, 10) for Tensor 'Placeholder_1:0', which has shape '(100, 10)' 
[email protected]:~/code/number_test$ 

Iguess、私はこのgithubの enter link description hereからプログラムを学び、私はいくつかのコードをcopyedここでは、私はそれを実行するので、環境の問題ではありませんが、私は間違っていた場所について別のものを見つけることができません、誰かが私にいくつかの助けを与えることができる場合。

+0

私の推測では、ローダーがサンプルを正しく読み込まないということです。あなたはサンプルの形をプリントアウトして見てください。 – Jason

+0

このコード情報はありませんトレーニングセット(73257,32,32,1)(73257,10) テストセット(26032,32,32,1)(26032,10)は負荷から読み込まれたデータであり、私はgithubからこのコードを学ぶので、適切なコードを実行できるので、ロードムーデルが正しいと約束します。問題はこの問題のコードにあります。 – panda404

答えて

0

最後に私のコードとgithubのデモを印刷して理由を確認しました。これは関数get_chunkがListラベル[stepStart:stepStart]を返すためです。 デフget_chunk(サンプル、ラベル、チャンク): (サンプル)lenの場合= LEN(ラベル):!ここ 昇給の例外( 'データセットが間違っている!!!!!')

print(np.shape(labels)) 
stepStart = 0 
i = 0 
while stepStart < len(samples): 
    stepEnd = stepStart + chunkSize 
    if stepEnd < len(samples): 
     print('==================this is the shape of placeholder======================') 
     print(np.shape(labels[stepStart: stepEnd])) 
     print('------------------------------------------------------------------------') 
     yield i, samples[stepStart:stepEnd], labels[stepStart: stepStart] 
     print('========================================================================') 
     i += 1 
    stepStart = stepEnd 

が絵です私は、印刷:answear私の質問のための

the wrong code of mine

the right code from demo

おかげでジェイソン。

関連する問題