2017-10-17 102 views

回答

3

请看教程Deep MNIST for Expertsmnist_deep.py

# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image 
# is down to 7x7x64 feature maps -- maps this to 1024 features. 
with tf.name_scope('fc1'): 
    W_fc1 = weight_variable([7 * 7 * 64, 1024]) 
    b_fc1 = bias_variable([1024]) 

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) 
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 

# Dropout - controls the complexity of the model, prevents co-adaptation of 
# features. 
with tf.name_scope('dropout'): 
    keep_prob = tf.placeholder(tf.float32) 
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) 

# Map the 1024 features to 10 classes, one for each digit 
with tf.name_scope('fc2'): 
    W_fc2 = weight_variable([1024, 10]) 
    b_fc2 = bias_variable([10]) 

    y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 

源代码,或者,如果你想使用tf.contrib.layers.fully_connected你可以做这样的事情:

h_pool2_flatten = tf.contrib.layers.flatten.flatten(h_pool2) 
h_fc1 = tf.contrib.layers.fully_connected(h_pool2_flatten, 1024, scope='fc1') 
keep_prob = tf.placeholder(tf.float32) 
h_fc1_drop = tf.contrib.layers.dropout(h_fc1, keep_prob) 
y_conv = tf.contrib.layers.fully_connected(h_fc1_drop, 10, activation_fn=None, scope='fc2')