2016-06-14 72 views
2

我有一个问题让我的VGG19神经网络连接。下面是整个图表:张量流中连接vgg19的问题

class VGG(object): 
    def __init__(self, n_classes=252): 

     self.input_data = tf.placeholder(dtype=tf.float32, name='input_data', shape=(5, 224, 224, 3)) 

     with tf.variable_scope("group1"): 
      with tf.variable_scope("conv1"): 
       self.weights1 = tf.get_variable('weights',[3, 3, 3, 64], initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias1 = tf.get_variable('bias', [64], initializer=tf.constant_initializer(0.0)) 
       self.convolve1 = tf.nn.conv2d(self.input_data, self.weights1, [1, 1, 1, 1], padding="SAME") 
       self.conv1 = tf.nn.elu(tf.nn.bias_add(self.convolve1, self.bias1)) 
      with tf.variable_scope("conv2"): 
       self.weights2= tf.get_variable('weights', [3, 3, 64, 64], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias2 = tf.get_variable('bias', [64], initializer=tf.constant_initializer(0.0)) 
       self.convolve2 = tf.nn.conv2d(self.conv1, self.weights2, [1, 1, 1, 1], padding="SAME") 
       self.conv2 = tf.nn.elu(tf.nn.bias_add(self.convolve2, self.bias2)) 

      self.pool1 = tf.nn.max_pool(self.conv2, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool1') 

     with tf.variable_scope("group2"): 
      with tf.variable_scope("conv3"): 
       self.weights3 = tf.get_variable('weights', [3, 3, 64, 128], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias3 = tf.get_variable('bias', [128], initializer=tf.constant_initializer(0.0)) 
       self.convolve3 = tf.nn.conv2d(self.pool1, self.weights3, [1, 1, 1, 1], padding="SAME") 
       self.conv3 = tf.nn.elu(tf.nn.bias_add(self.convolve3, self.bias3)) 
      with tf.variable_scope("conv4"): 
       self.weights4 = tf.get_variable('weights', [3, 3, 128, 128], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias4 = tf.get_variable('bias', [128], initializer=tf.constant_initializer(0.0)) 
       self.convolve4 = tf.nn.conv2d(self.conv3, self.weights4, [1, 1, 1, 1], padding="SAME") 
       self.conv4 = tf.nn.elu(tf.nn.bias_add(self.convolve4, self.bias4)) 
      self.pool2 = tf.nn.max_pool(self.conv4, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool2') 

     with tf.variable_scope("group3"): 
      with tf.variable_scope("conv5"): 
       self.weights5 = tf.get_variable('weights', [3, 3, 128, 256], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias5 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0)) 
       self.convolve5 = tf.nn.conv2d(self.pool2, self.weights5, [1, 1, 1, 1], padding="SAME") 
       self.conv5 = tf.nn.elu(tf.nn.bias_add(self.convolve5, self.bias5)) 
      with tf.variable_scope("conv6"): 
       self.weights6 = tf.get_variable('weights', [3, 3, 256, 256], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias6 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0)) 
       self.convolve6 = tf.nn.conv2d(self.conv5, self.weights6, [1, 1, 1, 1], padding="SAME") 
       self.conv6 = tf.nn.elu(tf.nn.bias_add(self.convolve6, self.bias6)) 
      with tf.variable_scope("conv7"): 
       self.weights7 = tf.get_variable('weights', [3, 3, 256, 256], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias7 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0)) 
       self.convolve7 = tf.nn.conv2d(self.conv6, self.weights7, [1, 1, 1, 1], padding="SAME") 
       self.conv7 = tf.nn.elu(tf.nn.bias_add(self.convolve7, self.bias7)) 
      with tf.variable_scope("conv8"): 
       self.weights8 = tf.get_variable('weights', [3, 3, 256, 256], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias8 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0)) 
       self.convolve8 = tf.nn.conv2d(self.conv7, self.weights8, [1, 1, 1, 1], padding="SAME") 
       self.conv8 = tf.nn.elu(tf.nn.bias_add(self.convolve8, self.bias8)) 
      self.pool3 = tf.nn.max_pool(self.conv8, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool3') 

     with tf.variable_scope("group4"): 
      with tf.variable_scope("conv9"): 
       self.weights9 = tf.get_variable('weights', [3, 3, 256, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias9 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve9 = tf.nn.conv2d(self.pool3, self.weights9, [1, 1, 1, 1], padding="SAME") 
       self.conv9 = tf.nn.elu(tf.nn.bias_add(self.convolve9, self.bias9)) 
      with tf.variable_scope("conv10"): 
       self.weights10 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias10 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve10 = tf.nn.conv2d(self.conv9, self.weights10, [1, 1, 1, 1], padding="SAME") 
       self.conv10 = tf.nn.elu(tf.nn.bias_add(self.convolve10, self.bias10)) 
      with tf.variable_scope("conv11"): 
       self.weights11 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias11 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve11 = tf.nn.conv2d(self.conv10, self.weights11, [1, 1, 1, 1], padding="SAME") 
       self.conv11 = tf.nn.elu(tf.nn.bias_add(self.convolve11, self.bias11)) 
      with tf.variable_scope("conv12"): 
       self.weights12 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias12 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve12 = tf.nn.conv2d(self.conv11, self.weights12, [1, 1, 1, 1], padding="SAME") 
       self.conv12 = tf.nn.elu(tf.nn.bias_add(self.convolve12, self.bias12)) 
      self.pool4 = tf.nn.max_pool(self.conv12, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name="pool4") 

     with tf.variable_scope("group5"): 
      with tf.variable_scope("conv13"): 
       self.weights13 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias13 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve13 = tf.nn.conv2d(self.pool4, self.weights13, [1, 1, 1, 1], padding="SAME") 
       self.conv13 = tf.nn.elu(tf.nn.bias_add(self.convolve13, self.bias13)) 

      with tf.variable_scope("conv14"): 
       self.weights14 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias14 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve14 = tf.nn.conv2d(self.conv13, self.weights14, [1, 1, 1, 1], padding="SAME") 
       self.conv14 = tf.nn.elu(tf.nn.bias_add(self.convolve14, self.bias14)) 
      with tf.variable_scope("conv15"): 
       self.weights15 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias15 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve15 = tf.nn.conv2d(self.conv14, self.weights15, [1, 1, 1, 1], padding="SAME") 
       self.conv15 = tf.nn.elu(tf.nn.bias_add(self.convolve15, self.bias15)) 
      with tf.variable_scope("conv16"): 
       self.weights16 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias16 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve16 = tf.nn.conv2d(self.conv15, self.weights16, [1, 1, 1, 1], padding="SAME") 
       self.conv16 = tf.nn.elu(tf.nn.bias_add(self.convolve16, self.bias16)) 
      self.pool5 = tf.nn.max_pool(self.conv16, [1, 2, 2, 1], [1, 2, 2, 1], "SAME", name="pool5") 

     with tf.variable_scope("group6"): 
      self.fc6 = tf.reshape(self.pool5, [-1, 4096], 'fc6') 
      with tf.variable_scope("fc7"): 
       self.weights17 = tf.get_variable('weights', [4096, 4096], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias17 = tf.get_variable('bias', [4096], initializer=tf.constant_initializer(0.0)) 
       self.fc7 = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.fc6, self.weights17), self.bias17)) 
      with tf.variable_scope("logits"): 
       self.weights17 = tf.get_variable('weights', [4096, n_classes], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias17 = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0)) 
       self.logits = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.fc6, self.weights17), self.bias17)) 

     self.softmax = tf.nn.softmax(self.logits, 'softmax') 

我已经经历过并重复阅读论文并搜索了其他Tensorflow实现。在这一点上,我认为我对事业失明。从self.pool5重塑成self.fc6时,我得到了一个错误的形状错误。当它应该是(?,8,8,512)时,self.pool5以形状(?,7,7,512)结束。任何帮助将不胜感激。

谢谢。

回答

1

我觉得你的问题是,你试图重塑pool5成完全连接层4096的大小。但是那些4096仅仅是fc层具有的神经元的数量,而不是其输入的数量。我试图相应地更改您的代码。

 with tf.variable_scope("conv16"): 
       self.weights16 = tf.get_variable('weights', [3, 3, 512, 512], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias16 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0)) 
       self.convolve16 = tf.nn.conv2d(self.conv15, self.weights16, [1, 1, 1, 1], padding="SAME") 
       self.conv16 = tf.nn.elu(tf.nn.bias_add(self.convolve16, self.bias16)) 
      self.pool5 = tf.nn.max_pool(self.conv16, [1, 2, 2, 1], [1, 2, 2, 1], "SAME", name="pool5") 
      self.pool5_flatten = tf.reshape(self.pool5, [-1, 25088]) 

     with tf.variable_scope("group6"): 
      with tf.variable_scope("fc7"): 
       self.weights17 = tf.get_variable('weights', [25088, 4096], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias17 = tf.get_variable('bias', [4096], initializer=tf.constant_initializer(0.0)) 
       self.fc7 = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.pool5_flatten, self.weights17), self.bias17)) 
      with tf.variable_scope("logits"): 
       self.weights17 = tf.get_variable('weights', [4096, n_classes], 
               initializer=tf.random_normal_initializer(stddev=1e-2)) 
       self.bias17 = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0)) 
       self.logits = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.fc7, self.weights17), self.bias17)) 

     self.softmax = tf.nn.softmax(self.logits, 'softmax') 
+0

因此,只要fc7出现为4096,那么pool5不会重塑为4096?我有一个函数在那里创建链接 reduce(x,y:lambda x * y,self.pool5.get_shape()。as_list()[1:]) 感谢您的帮助。 – Andrew

0

这是完全正常的,你的实现如果没问题。

VGG使用5最大池与步幅2和padding='SAME',这将特征映射大小降低高度2和宽度2(如果它可以被2整除)。

您指定大小224x224的输入图像:

self.input_data = tf.placeholder(dtype=tf.float32, name='input_data', shape=(5, 224, 224, 3)) 

因此,功能地图的大小是由2^5=32分割,你到底得到形状的输出:[?, 7, 7, feature_size]224/32 = 7)。


如果你真的想与VGG建筑形状[?, 8, 8, feature_size]的输出尺寸,你应该形状的图像开始256 x 256

+0

感谢这一点,我将通过实验来了解整体如何影响培训。 – Andrew