2017-07-25 138 views
3

这么大的图片,我试图做一个keras w2v自动编码器。我试图按照official exampleCustomVariationalLayer类。Keras自定义图层 - AttributeError:'张量'对象没有属性'_keras_history'

我的班级是这样的:无论我是否返回emb_lookupreconstruction

class custom_ae_layer(Layer): 
    """custom keras layer to handle looking up wv inputs 
    example from https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder.py 
    """ 
    def __init__(self, **kwargs): 
     self.is_placeholder = True 
     super(custom_ae_layer, self).__init__(**kwargs) 
    def ae_loss(self, reconstruction,emb_lookup): 
     loss = K.sum(emb_lookup - reconstruction,axis=-1) 
     return K.mean(loss) 

    def call(self, inputs): 
     reconstruction = inputs[1] 
     emb_lookup = inputs[0] 
     loss = self.ae_loss(emb_lookup,reconstruction) 
     self.add_loss(loss) 
     return emb_lookup 

出现此错误。我的层和所述正式示例之间的主要差别是我使用的嵌入查找作为输入,这是keras.layers.Embedding object的输出,和重建是不管如果我返回emb_lookupreconstruction

recon_layer = Dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input) 
s_recon_layer = K.squeeze(recon_layer,2) 

发生该错误。


完整的错误信息是这样的:

Traceback (most recent call last): 
     File "semi_sup_cnn_big_data_test.py", line 166, in <module> 
     main() 
     File "semi_sup_cnn_big_data_test.py", line 84, in main 
     args,run_time,micro,macro = basic_cnn_train_val_test(args) 
     File "semi_sup_cnn_big_data_test.py", line 100, in basic_cnn_train_val_test 
     clf,args = init_export_network(args) 
     File "/home/qqi/git/MPI_CNN/models/auto_encoder_multilayer_cnn.py", line 257, in init_export_network 
     model = Model(model_input, y) 
     File "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 88, in wrapper 
     return func(*args, **kwargs) 
     File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1705, in __init__ 
     build_map_of_graph(x, finished_nodes, nodes_in_progress) 
     File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1695, in build_map_of_graph 
     layer, node_index, tensor_index) 
     File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1665, in build_map_of_graph 
     layer, node_index, tensor_index = tensor._keras_history 
    AttributeError: 'Tensor' object has no attribute '_keras_history' 

按照要求,这里是全init_export_network功能:

def init_export_network(in_args): 
     import_dir = os.path.join('cv_data', 
            in_args.data_name, 
            in_args.label_name, 
            in_args.this_fold) 

     # set output dir as models/[model_name]/[data_name]/[label_file_name]/[this_fold] 
     output_dir = os.path.join("initialized_models", 
            in_args.model_name, 
            in_args.data_name, 
            in_args.label_name, 
            in_args.this_fold) 
     print("exporting to", output_dir) 
     if not os.path.exists(output_dir): 
      os.makedirs(output_dir) 
     else: 
      print(output_dir, "data dir identified but will be re-populated") 
      shutil.rmtree(output_dir) 
      os.makedirs(output_dir) 
     "returns base cnn architecture and placeholder/untrained weights" 
     # unpckl wv_matrix, class_names 
     wv_matrix = unpckl(os.path.join(import_dir,'wv_matrix.pickle')) 
     print("valid pre-processed data found in", import_dir) 
     # define network layers ---------------------------------------------------- 
     input_shape = (in_args.seq_len,) 
     output_shape = (in_args.seq_len,len(wv_matrix[0]),) 
     emb_size = len(wv_matrix[0]) 
     model_input = Input(shape=input_shape) 
     emb_lookup = Embedding(len(wv_matrix), 
           len(wv_matrix[0]), 
           embeddings_regularizer=l2(in_args.emb_l2_rate), 
           input_length=in_args.seq_len, name="embedding")(model_input) 
     #emb_lookup = Embedding(len(wv_matrix), len(wv_matrix[0]), input_length=in_args.seq_len, name="embedding",)(model_input) 
     if in_args.emb_dropout: 
      emb_lookup = Dropout(in_args.emb_dropout)(emb_lookup) 
     conv_blocks = [] 
     # conv blocks -------------------------------------------------------------- 
     print("emb_lookup shape!!!!",emb_lookup.shape) 
     for ith_conv,sz in enumerate(in_args.filter_sizes): 
      if ith_conv == 0: 
       conv_input = emb_lookup 
      else: 
       conv_input = conv 
      conv = Convolution1D(filters=in_args.feat_maps[ith_conv], 
           kernel_size=sz, 
           padding="valid", 
           activation="relu", 
           kernel_initializer = 'lecun_uniform', 
           kernel_regularizer=l2(in_args.l2_rate), 
           strides=1, 
           name = "{}_conv".format(ith_conv))(conv_input) 
      print("{}_conv".format(ith_conv), conv.shape) 
     # deconv blocks with dimensions reverse of multilayer_cnn ------------------ 
     deconv_blocks = [] 
     deconv_filter_sizes = in_args.filter_sizes 
     deconv_filter_sizes.reverse() 

     #print("conv_shape!!!", conv.shape) 
     conv_input = conv 
     print("conv_upsampling_shape!!!", conv_input.shape) 

     #unpool_shape = ((conv[1],-1,conv[2])) 
     #conv_input = Reshape((1,conv_input[1],conv_input[2]))(conv_input) 
     #print("conv_input_shape!!!", conv_input.shape) 

     #conv_input = Reshape(unpool_shape),conv_input 
     #conv_input = Reshape(unpool_shape)(conv_input) 
     deconv_input=K.expand_dims(conv_input,2) 

     print("conv_reshape_shape!!!", conv_input) 
     for ith_conv,sz in enumerate(deconv_filter_sizes): 
      print("{}_deconv input shape!!!".format(ith_conv), deconv_input) 
      deconv = Conv2DTranspose(filters=in_args.feat_maps[ith_conv], 
           kernel_size=(sz,1), 
           #kernel_size=sz, 
           padding="valid", 
           activation="relu", 
           kernel_initializer = 'lecun_uniform', 
           kernel_regularizer=l2(in_args.l2_rate), 
           strides=(1,1), 
           name = "{}_deconv".format(ith_conv))(deconv_input) 
      deconv_input = deconv 
     print("{}_deconv input shape!!!".format(ith_conv), deconv_input) 
     print("deconv_output shape",deconv) 
     #z = Flatten()(conv) 
     #deconv_out = Flatten(deconv) 
     #outshape = (in_args.seq_len,len(wv_matrix[0])) 
     outshape = len(wv_matrix[0]) 
     recon_layer = Dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input) 
     print("recon_layer shape",recon_layer) 
     #s_recon_layer = K.squeeze(recon_layer,2) 
     s_recon_layer = Lambda(lambda x: K.squeeze(x, 2))(recon_layer) 
     print("squeezed recon_layer shape",s_recon_layer) 
     #print("conv_reshape_shape!!!", conv_input.shape)(conv) 
     # end define network layers ------------------------------------------------ 
     #model_output = Dense(outshape, activation="elu",kernel_regularizer=l2(in_args.l2_rate))(z) 
     y = custom_ae_layer()([model_input,emb_lookup,s_recon_layer]) 
     model = Model(model_input, y) 
     # finished network layers definition - compile network 
     opt = optimizers.Adamax() 

     model.compile(loss=None, optimizer='adamax') 
     embedding_layer = model.get_layer("embedding") 
     embedding_layer.set_weights([wv_matrix]) 
     # load wv_matrix into embedidng layer 
     print("Initializing embedding layer with word2vec weights, shape", wv_matrix.shape) 


     # save model architecture as json 
     open(os.path.join(output_dir,"structure.json"),"w").write(model.to_json()) 
     # save initialized model weights as .hdf5fmacro 
     model.save_weights(os.path.join(output_dir, "weights"+".hdf5")) 
     print("multilayer network/initial weights successfully saved in", output_dir) 
     print(in_args) 
     #print(model.summary()) 
     return model,in_args 
+0

你的错误不是来自该层而是来自'init_export_network'功能。你能否给我们提供它的定义? –

+0

这是init_export_network代码和@Yyu-Yang的lambda修复实现。所有这些功能都是初始化keras模型。仍然有同样的问题。 – qiubs

回答

0

错误消息看起来非常类似于这样的问题:https://stackoverflow.com/a/45309816/1531463

总之,我认为你需要包装这一行:

s_recon_layer = K.squeeze(recon_layer,2) 

(或任何其他后端函数调用)到Lambda层。

具体来说,

s_recon_layer = Lambda(lambda x: K.squeeze(x, 2))(recon_layer) 
+0

哇,是的,这完全工作后,我** keras lambda层所有**我的keras.backend函数。感谢您的帮助! – qiubs

相关问题