以前は、2進イメージセグメンテーション(前景&背景)を実装したネットワークを構築しました。私は2つの分類を行うことでこれを行いました。今ではバイナリ分類の代わりに、各ピクセルの線形回帰を行いたいと思います。線形回帰によるTensorflowイメージセグメンテーション
画像ビュー内に3Dサーフェスがあるとします。そのサーフェスの真ん中を線形値10で分割したいとします。サーフェスのエッジは、5とします。もちろん、すべてのボクセルは5〜10の範囲内である。次に、ボクセルがサーフェスから遠ざかるにつれて、値はすぐにゼロになります。
バイナリ分類では、フォアグラウンドの場所に1の画像があり、バックグラウンドの代わりに1の画像があります。つまり、分類です。今は、1つのグラウンドトゥルース画像cost = tf.square(y - pred)
- この線形回帰の例を介して、以下に...
のような値で、私は単に最小二乗関数にコスト関数を変更することができます仮定しました。もちろん私は地上の真実を変えるでしょう。
しかし、これを行うと、私の予測結果はNaN
となります。私の最後の層は行列の重み値の線形和に最終出力を乗じたものです。私はこれと何か関係があると思っていますか?
だから、cost = tf.square(y - pred)
が問題の原因だと私は信じています。私はtf.nn.softmax()
関数を0と1の間の値を正規化することができません。私はこれを次に試みた... cost = tf.reduce_sum(tf.square(y - pred))
とそれは動作しませんでした。
だから私はこれを試しました(hereを推奨)cost = tf.reduce_sum(tf.pow(pred - y, 2))/(2 * batch_size)
これは動作しませんでした。
私は体重を別々に初期化すべきですか?体重を標準化する?
完全なコードは次のようになりますようにコメントで言っ
import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
from tensorflow.python.ops import rnn, rnn_cell
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
# Parameters
learning_rate = 0.001
training_iters = 1000000
batch_size = 2
display_step = 1
# Network Parameters
n_input_x = 396 # Input image x-dimension
n_input_y = 396 # Input image y-dimension
n_classes = 1 # Binary classification -- on a surface or not
n_steps = 396
n_hidden = 128
n_output = n_input_y * n_classes
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input_x, n_input_y])
y = tf.placeholder(tf.float32, [None, n_input_x * n_input_y], name="ground_truth")
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
def deconv2d(prev_layer, w, b, output_shape, strides):
# Deconv layer
deconv = tf.nn.conv2d_transpose(prev_layer, w, output_shape=output_shape, strides=strides, padding="VALID")
deconv = tf.nn.bias_add(deconv, b)
deconv = tf.nn.relu(deconv)
return deconv
# Create model
def net(x, cnn_weights, cnn_biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 396, 396, 1])
with tf.name_scope("conv1") as scope:
# Convolution Layer
conv1 = conv2d(x, cnn_weights['wc1'], cnn_biases['bc1'])
# Max Pooling (down-sampling)
#conv1 = tf.nn.local_response_normalization(conv1)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
with tf.name_scope("conv2") as scope:
conv2 = conv2d(conv1, cnn_weights['wc2'], cnn_biases['bc2'])
# Max Pooling (down-sampling)
# conv2 = tf.nn.local_response_normalization(conv2)
conv2 = maxpool2d(conv2, k=2)
# Convolution Layer
with tf.name_scope("conv3") as scope:
conv3 = conv2d(conv2, cnn_weights['wc3'], cnn_biases['bc3'])
# Max Pooling (down-sampling)
# conv3 = tf.nn.local_response_normalization(conv3)
conv3 = maxpool2d(conv3, k=2)
temp_batch_size = tf.shape(x)[0] #batch_size shape
with tf.name_scope("deconv1") as scope:
output_shape = [temp_batch_size, 99, 99, 64]
strides = [1,2,2,1]
# conv4 = deconv2d(conv3, weights['wdc1'], biases['bdc1'], output_shape, strides)
deconv = tf.nn.conv2d_transpose(conv3, cnn_weights['wdc1'], output_shape=output_shape, strides=strides, padding="SAME")
deconv = tf.nn.bias_add(deconv, cnn_biases['bdc1'])
conv4 = tf.nn.relu(deconv)
# conv4 = tf.nn.local_response_normalization(conv4)
with tf.name_scope("deconv2") as scope:
output_shape = [temp_batch_size, 198, 198, 32]
strides = [1,2,2,1]
conv5 = deconv2d(conv4, cnn_weights['wdc2'], cnn_biases['bdc2'], output_shape, strides)
# conv5 = tf.nn.local_response_normalization(conv5)
with tf.name_scope("deconv3") as scope:
output_shape = [temp_batch_size, 396, 396, 1]
#this time don't use ReLu -- since output layer
conv6 = tf.nn.conv2d_transpose(conv5, cnn_weights['wdc3'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
x = tf.nn.bias_add(conv6, cnn_biases['bdc3'])
# Include dropout
#conv6 = tf.nn.dropout(conv6, dropout)
x = tf.reshape(conv6, [-1, n_input_x, n_input_y])
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input_x])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
# This input shape is required by `rnn` function
x = tf.split(0, n_steps, x)
# Define a lstm cell with tensorflow
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True, activation=tf.nn.relu)
# lstm_cell = rnn_cell.MultiRNNCell([lstm_cell] * 12, state_is_tuple=True)
# lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.8)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
# pdb.set_trace()
output = []
for i in xrange(396):
output.append(tf.matmul(outputs[i], lstm_weights[i]) + lstm_biases[i])
return output
cnn_weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1' : tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2' : tf.Variable(tf.random_normal([5, 5, 32, 64])),
# 5x5 conv, 32 inputs, 64 outputs
'wc3' : tf.Variable(tf.random_normal([5, 5, 64, 128])),
'wdc1' : tf.Variable(tf.random_normal([2, 2, 64, 128])),
'wdc2' : tf.Variable(tf.random_normal([2, 2, 32, 64])),
'wdc3' : tf.Variable(tf.random_normal([2, 2, 1, 32])),
}
cnn_biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bdc1': tf.Variable(tf.random_normal([64])),
'bdc2': tf.Variable(tf.random_normal([32])),
'bdc3': tf.Variable(tf.random_normal([1])),
}
lstm_weights = {}
lstm_biases = {}
for i in xrange(396):
lstm_weights[i] = tf.Variable(tf.random_normal([n_hidden, n_output]))
lstm_biases[i] = tf.Variable(tf.random_normal([n_output]))
# Construct model
# with tf.name_scope("net") as scope:
pred = net(x, cnn_weights, cnn_biases, keep_prob)
# pdb.set_trace()
pred = tf.pack(pred)
pred = tf.transpose(pred, [1,0,2])
pred = tf.reshape(pred, [-1, n_input_x * n_input_y])
with tf.name_scope("opt") as scope:
# cost = tf.reduce_sum(tf.square(y-pred))
cost = tf.reduce_sum(tf.pow((pred-y),2))/(2*batch_size)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
with tf.name_scope("acc") as scope:
# accuracy is the difference between prediction and ground truth matrices
correct_pred = tf.equal(0,tf.cast(tf.sub(cost,y), tf.int32))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph) #initialize graph for tensorboard
step = 1
# Import data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = data.train.next_batch(batch_size)
# Run optimization op (backprop)
# pdb.set_trace()
batch_x = batch_x.reshape((batch_size, n_input_x, n_input_y))
batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
step = step + 1
if step % display_step == 0:
batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y})
# Make prediction
im = Image.open('/home/kendall/Desktop/cropped/temp data0001.tif')
batch_x = np.array(im)
batch_x = batch_x.reshape((1, n_input_x, n_input_y))
batch_x = batch_x.astype(float)
prediction = sess.run(pred, feed_dict={x: batch_x})
prediction = prediction.reshape((1, n_input_x * n_input_y))
prediction = tf.nn.softmax(prediction)
prediction = prediction.eval()
prediction = prediction.reshape((n_input_x, n_input_y))
# my_accuracy = accuracy_custom(temp_arr1,batch_y[0,:,:,0])
#
# print "Step = " + str(step) + " | Accuracy = " + str(my_accuracy)
print "Step = " + str(step) + " | Accuracy = " + str(acc)
# csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(my_accuracy) + ".csv"
csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(acc) + ".csv"
np.savetxt(csv_file, prediction, delimiter=",")
まず、 '0.1'または' 0.01'の標準偏差であなたの重みを初期化し、 '0.01'に初期化 –
@OlivierMoindrotが、' 0.1'に初期化運 –
@OlivierMoindrotを何が起こるか見て...と...成功!ありがとう、答えを書くこと自由に感じてください:) –