2017-06-07 8 views
0

私はhereに記述されているカテゴリ生成的対立ネットワークを実装しています。カテゴリ生成的対立ネットの損失関数をどのように解釈するか?

[Jost T. Springenberg。教師なしと カテゴリ生成的敵対ネットワークとの半教師あり学習、4月2016]

formula

これは、6ページで紹介した損失関数であるとの事で式が原因奇数であるARG_MAXを使用していることですTensorflowなどのさまざまなフレームワークで使用できるほとんどのオプティマイザは、arg_minでのみ動作します。

この式を実装する方法を教えてもらえますか?

ここに実装したコードを示します。

import tensorflow as tf 
import numpy as np 
import PIL.Image as Image 
# constants 
X_dim = 256 
Y_dim = 2 
Z_dim = 256 * 256 
value_lambda = 1.0 

X = tf.placeholder(tf.float32, shape=[None, X_dim, X_dim, 1]) 
Y = tf.placeholder(tf.float32, shape=[None, Y_dim]) 
Z = tf.placeholder(tf.float32, shape=[None, Z_dim]) 

initializer = tf.contrib.layers.variance_scaling_initializer 
activation_function = tf.nn.elu 
regularizer = tf.contrib.layers.l2_regularizer(0.5) 

custom_filter = np.ones(shape=[32, 256, 256, 1], dtype=np.float) 
custom_filter[:, 255, :, :] = 0 
custom_filter[:, :, 255, :] = 0 

custom_filter = tf.constant(custom_filter, dtype=tf.float32) 


def discriminator(x, name=None): 
    with tf.name_scope(name, "discriminator", [x]) as scope: 

     D_conv_1 = tf.layers.conv2d(inputs=x, filters=16, kernel_size=[ 
            5, 5], padding='SAME', activation=activation_function, kernel_regularizer=regularizer) 
     # [256, 256] 
     D_mean_pool_1 = tf.nn.pool(D_conv_1, window_shape=[ 
            2, 2], pooling_type='AVG', padding='VALID', strides=[2, 2]) 
     # [128, 128] 
     D_conv_2 = tf.layers.conv2d(D_mean_pool_1, filters=32, kernel_size=[ 
            3, 3], padding='SAME', activation=activation_function, kernel_regularizer=regularizer) 
     # [128, 128] 
     D_mean_pool_2 = tf.nn.pool(D_conv_2, window_shape=[ 
            2, 2], pooling_type='AVG', padding='VALID', strides=[2, 2]) 
     # [64, 64] 
     D_conv_3 = tf.layers.conv2d(D_mean_pool_2, filters=64, kernel_size=[ 
            3, 3], padding='SAME', activation=activation_function, kernel_regularizer=regularizer) 
     # [64, 64] 
     D_mean_pool_3 = tf.nn.pool(D_conv_3, window_shape=[ 
            2, 2], pooling_type='AVG', padding='VALID', strides=[2, 2]) 
     # [32, 32] 
     D_conv_4 = tf.layers.conv2d(D_mean_pool_3, filters=128, kernel_size=[ 
            3, 3], padding='SAME', activation=activation_function, kernel_regularizer=regularizer) 
     # [32, 32] 
     D_mean_pool_4 = tf.nn.pool(D_conv_4, window_shape=[ 
            2, 2], pooling_type='AVG', padding='VALID', strides=[2, 2]) 
     # [16, 16] 
     D_conv_5 = tf.layers.conv2d(D_mean_pool_4, filters=256, kernel_size=[ 
            3, 3], padding='SAME', activation=activation_function, kernel_regularizer=regularizer) 
     # [16, 16] 
     D_mean_pool_5 = tf.nn.pool(D_conv_5, window_shape=[ 
            4, 4], pooling_type='AVG', padding='VALID', strides=[4, 4]) 
     # [4, 4] 
     D_conv_6 = tf.layers.conv2d(D_mean_pool_5, filters=2, kernel_size=[ 
            3, 3], padding='SAME', activation=activation_function, kernel_regularizer=regularizer) 
     # [4, 4] 
     D_mean_pool_6 = tf.nn.pool(D_conv_6, window_shape=[ 
            4, 4], pooling_type='AVG', padding='VALID', strides=[4, 4]) 
     # [1, 1], and finally, [batch_size][1][1][2] 
     D_logit = tf.reshape(D_mean_pool_6, shape=[32, 2]) 
     # [batch_size][2] 

     return D_logit 

     ''' 
     D_hidden_layer_1 = tf.layers.dense(
      inputs=x, units=255, activation=activation_function) 
     D_hidden_layer_2 = tf.layers.dense(
      inputs=D_hidden_layer_1, units=16, activation=activation_function) 
     D_logit = tf.layers.dense(inputs=D_hidden_layer_2, units=Y_dim, 
            activation=activation_function) 

     return D_logit 
     ''' 


def generator(z, name=None): 
    with tf.name_scope(name, "generator", [z]) as scope: 
     # z[32, 4096] 
     input = tf.reshape(z, shape=[32, 256, 256, 1]) 
     # input[32, 64, 64, 1] 
     G_conv_1 = tf.layers.conv2d(input, filters=96, kernel_size=[ 
            8, 8], padding='SAME', activation=activation_function) 
     # [32, 64, 64, 96] 
     # G_upscaled_1 = tf.image.resize_bicubic(images=G_conv_1, size=[128, 128]) 
     # [32, 128, 128, 96] 
     G_conv_2 = tf.layers.conv2d(G_conv_1, filters=64, kernel_size=[ 
            5, 5], padding='SAME', activation=activation_function) 
     # [32, 128, 128, 64] 
     # G_upscaled_2 = tf.image.resize_bicubic(G_conv_2, size=[256, 256]) 
     # [32, 256, 256, 64] 
     G_conv_3 = tf.layers.conv2d(G_conv_2, filters=64, kernel_size=[ 
            5, 5], padding='SAME', activation=activation_function) 
     # [32, 256, 256, 64] 
     G_conv_4 = tf.layers.conv2d(G_conv_3, filters=1, kernel_size=[ 
            5, 5], padding='SAME', activation=activation_function) 

     # [32, 256, 256, 1] 
     G_logit = G_conv_4 * custom_filter 
     # [32, 256, 256, 1], but filtered out the last column and row 

     return G_logit 

     ''' 
     G_hidden_layer_1 = tf.layers.dense(
      inputs=z, units=255, activation=activation_function) 
     G_outputs = tf.layers.dense(inputs=G_hidden_layer_1, units=X_dim, 
            activation=activation_function) 

     return G_outputs 
     ''' 


with tf.name_scope("training") as scope: 
    # Getting samples from random data 
    G_sample = generator(Z) 
    # Getting logits 
    D_logit_real = discriminator(X) 
    D_logit_fake = discriminator(G_sample) 

    # Applying softmax 
    D_proba_real = tf.nn.softmax(logits=D_logit_real) 
    D_proba_real = tf.clip_by_value(
     D_proba_real, clip_value_min=1e-4, clip_value_max=1.0) 
    D_proba_fake = tf.nn.softmax(logits=D_logit_fake) 
    D_proba_fake = tf.clip_by_value(
     D_proba_fake, clip_value_min=1e-4, clip_value_max=1.0) 

    with tf.name_scope("category_1") as sub_scope: 
     # Getting Shannon's entrophy in X's distribution 
     D_log_real = tf.log(D_proba_real) 
     D_entrophy_real = D_proba_real * D_log_real 
     D_mean_real = tf.reduce_sum(D_entrophy_real, axis=1) 
     D_mean_real = -D_mean_real 
     D_entrophy_real_mean = tf.reduce_mean(D_mean_real, axis=0) 
     D_entrophy_real_mean = tf.reshape(D_entrophy_real_mean, shape=[1]) 

    with tf.name_scope("category_2") as sub_scope: 
     # Gettning Shannon's entrophy in Z's distribution 
     G_log_fake = tf.log(D_proba_fake) 
     G_entrophy_fake = D_proba_fake * G_log_fake 
     G_mean = tf.reduce_sum(G_entrophy_fake, axis=1) 
     G_mean = -G_mean 
     G_entrophy_fake_mean = tf.reduce_mean(G_mean, axis=0) 
     G_entrophy_fake_mean = tf.reshape(G_entrophy_fake_mean, shape=[1]) 

    with tf.name_scope("category_3") as sub_scope: 
     # Getting Shannon's entrophy between classes 
     D_class_mean = tf.reduce_mean(D_proba_real, axis=0, keep_dims=True) 
     D_class_mean_log = tf.log(D_class_mean) 
     D_class_entropy = D_class_mean * D_class_mean_log 
     D_class = tf.reduce_sum(D_class_entropy, axis=1) 
     D_class = -D_class 
     D_class = tf.reshape(D_class, shape=[1]) 

     G_class_mean = tf.reduce_mean(D_proba_fake, axis=0, keep_dims=True) 
     G_class_mean_log = tf.log(G_class_mean) 
     G_class_entrophy = G_class_mean * G_class_mean_log 
     G_class = tf.reduce_sum(G_class_entrophy, axis=1) 
     G_class = -G_class 
     G_class = tf.reshape(G_class, shape=[1]) 

    with tf.name_scope("supervised") as sub_scope: 
     # Getting cross entrophy for labeled data 
     D_labeled = Y * D_log_real 
     D_cross_entrophy = tf.reduce_sum(D_labeled, axis=1) 
     D_cross_entrophy = -D_cross_entrophy 
     D_supervised = tf.reduce_mean(D_cross_entrophy, axis=0) 
     D_supervised_weighted = value_lambda * D_supervised 
     D_supervised_weighted = tf.reshape(D_supervised_weighted, shape=[1]) 

    D_loss = D_class - D_entrophy_real_mean + \ 
     G_entrophy_fake_mean + D_supervised_weighted 
    G_loss = -G_class + G_entrophy_fake_mean 
    D_loss = -D_loss 

    D_solver = tf.train.AdamOptimizer().minimize(D_loss) 
    G_solver = tf.train.AdamOptimizer().minimize(G_loss) 

# with tf.name_scope("testing") as scope: 
+0

何を試しましたか?いくつかのコードを見てみましょう。 – Alex

+0

@Alexコードを追加しました! – user3551261

+0

これはたくさんのコード@ user3551261です。何が役に立つのかは、最小コードセットと期待される出力です。私はあなたが必要とするのは2行か3行のコードと2行か3行の出力例だと思う。実際にarg_minの代わりにarg_maxを求めているなら、非常に短い例がはるかに役立つでしょう。私は助けたいと思いますが、あなたが投稿した上記のコードをすべて実行する見込みは、手伝いを始めるのが難しいことです。 – Wontonimo

答えて

0

私はいくつかの調査を行い、深い学習の研究を行っている大企業で働く友人にいくつかの質問をしました。生成的な敵対的ネットワークは、分類作業がうまくいかないことが判明したので、だから私は心を変えてGoogLenetで実装しました。問題が解決しました!

関連する問題