2017-07-18 82 views
0

我目前正试图训练我的模型来对cifar-10数据集进行分类。我读出的数据是这样的:错误培训Cifar-10模型Tensorflow - 准确性为0,不会优化和损失未报告

def convert_images(raw): 
raw_float = np.array(raw, dtype = float) 
images = raw_float.reshape([-1,3,32,32]) 
images = images.transpose([0,2,3,1]) 
return images 

def load_data(filename): 
data = unpickle(filename) 
raw_images = data[b'data'] 
labels = np.array(data[b'labels']) 
images = convert_images(raw_images) 
return images, labels 

def load_training_data(): 
images = np.zeros(shape=[50000,32,32,3], dtype = float) 
labels = np.zeros(shape = [50000], dtype = int) 
begin = 0 
for i in range(5): 
    filename = "data_batch_" + str(i+1) 
    images_batch, labels_batch = load_data(filename) 
    num_images = len(images_batch) 
    end = begin + num_images 
    images[begin:end, :] = images_batch 
    labels[begin:end] = labels_batch 
    begin = end 
    return images, labels, OneHotEncoder(categorical_features=labels, n_values=10) 

这样做是重塑数据,以便它是与像素和RGB颜色的32x32x3值的四维阵列。我这样定义我的模型(我第一次重塑X是一个行向量,因为四维阵列产生错误):

X = tf.placeholder(tf.float32, [None,32,32,3]) 
Y_labeled = tf.placeholder(tf.int32, [None]) 
data = load_training_data() 

with tf.name_scope('dnn'): 
    XX = tf.reshape(X, [-1,3072]) 
    hidden1 = tf.layers.dense(XX, 300, name = 'hidden1', activation = tf.nn.relu) 
    hidden2 = tf.layers.dense(hidden1, 200, name = 'hidden2', activation = tf.nn.relu) 
    hidden3 = tf.layers.dense(hidden2, 200, name = 'hidden3', activation = tf.nn.relu) 
    hidden4 = tf.layers.dense(hidden3, 100, name = 'hidden4', activation = tf.nn.relu) 
    logits = tf.layers.dense(hidden4, 10, name = 'outputs') 

with tf.name_scope('loss'): 
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = (Y_labeled), logits = logits) 
    loss = tf.reduce_mean(cross_entropy, name = 'loss') 

learning_rate = 0.01 

with tf.name_scope('train'): 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate) 
    training_op = optimizer.minimize(loss) 

with tf.name_scope('eval'): 
    correct = tf.nn.in_top_k(logits,Y_labeled, 1) 
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) 

init = tf.global_variables_initializer() 

batch_size = 100 
n_epochs = 50 

with tf.Session() as sess: 
init.run() 
for epoch in range(n_epochs): 
    for iteration in range(50000 // batch_size): 
     X_batch = data[0][iteration*batch_size:(iteration+1)*batch_size] 
     y_batch = data[1][iteration*batch_size:(iteration+1)*batch_size] 
     #X_batch, y_batch = data.train.next_batch(batch_size) 
     sess.run(training_op, feed_dict = {X: X_batch,Y_labeled: y_batch}) 
    acc_train = accuracy.eval(feed_dict = {X: X_batch,Y_labeled: y_batch}) 
    print(epoch, "train accuracy:", acc_train, "loss", loss) 

我想定义一个有4个隐藏层的简单模型。当我运行它时,它编译时没有错误并开始“训练”,但准确度为0.0,并且不会打印任何损失。我不确定这个错误是在我计算准确度和损失还是在我对模型的定义中。

回答

0

您提供标签的方式似乎存在问题。当您创建拼接板Y_labeled = tf.placeholder(tf.int32, [None, 10])时,它似乎是尺寸为10的矢量,但后来在创建标签numpy张量labels = np.zeros(shape = [50000], dtype = int)时,它似乎是一个标量。

这就是为什么你有这样的错误,占位符需要与维(batch_size, 10)的张量供给,但你(batch_size, 0)

+0

我设法改变我定义如何y_labeled解决我的错误给它(具有形状(无)而不是(none,10)。我还通过使用tf.reshape(x,[-1,3072])将我的x重塑为一个长度为3072的矢量。我还取出tf.squeeze(y_labels)并替换它与y_labels在交叉熵中,这让我的模型运行,但它一直给我一个0的准确性,并不会显示损失,这导致我相信那些定义不正确。 –

相关问题