2017-05-23 10 views
0
class MyLayer(Layer): 

    def __init__(self, output_dim, **kwargs): 
     self.output_dim = output_dim 
     super(MyLayer, self).__init__(**kwargs) 
     # self.activation = activations.get(activation) 
    def build(self, input_shape): 
     # Create a trainable weight variable for this layer. 
     input_dim = input_shape[-1] 
     # print input_shape 
     kernel_shape = [input_dim/2, input_dim/2] 
     print kernel_shape 
     self.kernel = self.add_weight(shape=kernel_shape, 
             initializer='uniform', 
             trainable=True) 
     super(MyLayer, self).build(input_shape) # Be sure to call this somewhere! 

    def call(self, x): 
     inputs = x 
     input_shape = K.int_shape(inputs) 
     print input_shape 
     T = tf.reshape(inputs,[-1,2,tf.to_int32(input_shape[1]/2)]) 
     # P = tf.matmul(T,self.kernel) 
     P = tf.matmul(T[:,1,:], self.kernel) 
     G = T[:,0,:] 
     op = tf.concat([P,G], axis=0) 
     op = tf.reshape(op, [-1, 2, tf.to_int32(input_shape[1]/2)]) 
     print op 
     return op 

    def compute_output_shape(self, input_shape): 
     return (input_shape[0], self.output_dim) 

どうか私を助けてくださいこれで何が欠けていますか?TypeError:add_weight()は少なくとも3つの引数をとります(4が指定されています)

スタックトレースは以下のとおりである:

Traceback (most recent call last): 
File "/root/PycharmProjects/tranferNET/modelBuild.py", line 193, in <module> model = create_network([100, 100, 3]) 
File "/root/PycharmProjects/tranferNET/modelBuild.py", line 174, in create_network com_distribution = MyLayer((2,256))(merge_common2) 
File "/root/Tensorflow/local/lib/python2.7/site-packages/keras/engine/topology.py", line 558, in __call__ self.build(input_shapes[0]) 
File "/root/PycharmProjects/tranferNET/ncLayer.py", line 70, in build trainable=True) 
File "/root/Tensorflow/local/lib/python2.7/site-packages/keras/legacy/interfaces.py", line 88, in wrapper 
return func(*args, **kwargs) 
TypeError: add_weight() takes at least 3 arguments (4 given) 
+0

コードとエラーメッセージを再フォーマットしました。うまくいけば、もう少し読みやすくなりました。 – Rook

+0

ありがとうございます!!!!!! –

+0

Documentationの 'Layer.add_weight(...')のパラメータを確認してください。自動追加された最初のパラメータ 'self'を監視してください。 – stovfl

答えて

0

これはadd_weight()関数の定義である:あなたが前に位置引数として(自己)の名前と形状を渡す必要があり

def add_weight(self, 
        name, 
        shape, 
        dtype=None, 
        initializer=None, 
        regularizer=None, 
        trainable=True, 
        constraint=None): 
     """Adds a weight variable to the layer. 
     # Arguments 
      name: String, the name for the weight variable. 
      shape: The shape tuple of the weight. 
      dtype: The dtype of the weight. 
      initializer: An Initializer instance (callable). 
      regularizer: An optional Regularizer instance. 
      trainable: A boolean, whether the weight should 
       be trained via backprop or not (assuming 
       that the layer itself is also trainable). 
      constraint: An optional Constraint instance. 
     # Returns 
      The created weight variable. 
     """ 
     initializer = initializers.get(initializer) 
     if dtype is None: 
      dtype = K.floatx() 
     weight = K.variable(initializer(shape), dtype=dtype, name=name) 
     if regularizer is not None: 
      self.add_loss(regularizer(weight)) 
     if constraint is not None: 
      self.constraints[weight] = constraint 
     if trainable: 
      self._trainable_weights.append(weight) 
     else: 
      self._non_trainable_weights.append(weight) 
return weight 

kwagrs初期化子と訓練可能なものを渡します。 Shape自体は "name ="の後に "shape = kernel_shape"ではなく "kernel_shape"として渡す必要があります。

+1

助けてくれてありがとう –

関連する問題