2017-09-19 72 views
0

我从tensorflow github的神经网络简单的例子,并试图将其分成两部分。第一部分是培训+测试,第二部分是分离出需要恢复的测试部分。恢复似乎有效,但无法找到预测功能。张量流量估算模板基于模型的保存和恢复

这里是第一部分:

from __future__ import print_function 

from tensorflow.python.saved_model import builder as saved_model_builder 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

import tensorflow as tf 
import matplotlib 
import matplotlib.pyplot as plt 
import numpy as np 
import shutil 

matplotlib.use('TkAgg') 

# Parameters 
learning_rate = 0.1 
num_steps = 1000 
batch_size = 128 
display_step = 100 

# Network Parameters 
n_hidden_1 = 256 # 1st layer number of neurons 
n_hidden_2 = 256 # 2nd layer number of neurons 
num_input = 784 # MNIST data input (img shape: 28*28) 
num_classes = 10 # MNIST total classes (0-9 digits) 

#init = tf.initialize_all_variables() 

sess = tf.Session() 

# Define the input function for training 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.train.images}, y=mnist.train.labels, 
    batch_size=batch_size, num_epochs=None, shuffle=True) 

# Define the neural network 
def neural_net(x_dict): 
    # TF Estimator input is a dict, in case of multiple inputs 
    x = x_dict['images'] 
    # Hidden fully connected layer with 256 neurons 
    layer_1 = tf.layers.dense(x, n_hidden_1, name="layer_1") 
    # Hidden fully connected layer with 256 neurons 
    layer_2 = tf.layers.dense(layer_1, n_hidden_2, name="layer_2") 
    # Output fully connected layer with a neuron for each class 
    out_layer = tf.layers.dense(layer_2, num_classes, name="out_layer") 
    return out_layer 

# Define the model function (following TF Estimator Template) 
def model_fn(features, labels, mode): 
    # Build the neural network 
    logits = neural_net(features) 

    # Predictions 
    pred_classes = tf.argmax(logits, axis=1) 
    pred_probas = tf.nn.softmax(logits) 

    # If prediction mode, early return 
    if mode == tf.estimator.ModeKeys.PREDICT: 
     return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 

    # Define loss and optimizer 
    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
     logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 
    train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) 

    # Evaluate the accuracy of the model 
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 

    # TF Estimators requires to return a EstimatorSpec, that specify 
    # the different ops for training, evaluating, ... 
    estim_specs = tf.estimator.EstimatorSpec(
     mode=mode, 
     predictions=pred_classes, 
     loss=loss_op, 
     train_op=train_op, 
     eval_metric_ops={'accuracy': acc_op}) 

    return estim_specs 

# Build the Estimator 
model = tf.estimator.Estimator(model_fn) 

# Train the Model 
model.train(input_fn, steps=num_steps) 

# Evaluate the Model 
# Define the input function for evaluating 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.test.images}, y=mnist.test.labels, 
    batch_size=batch_size, shuffle=False) 
# Use the Estimator 'evaluate' method 
model.evaluate(input_fn) 

#model.export_savedmodel(".", input_fn) 

init = tf.global_variables_initializer() 
sess.run(init) 

tf.add_to_collection("nn_model", model) 

# Add ops to save and restore all the variables. 
#saver = tf.train.Saver() 

#save_path = saver.save(sess, "model/model.ckpt") 

try: 
    shutil.rmtree("model") 
except: 
    pass 

builder = saved_model_builder.SavedModelBuilder("model") 
builder.add_meta_graph_and_variables(sess, ["nn"]) 
builder.save() 

print("Model saved in file") 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(model.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

上述程序工作正常。它会保存模型,不能确定,因为我看到所有正在创建的目录。尽管它确实给出了一个警告:

警告:tensorflow:序列化nn_model时遇到错误。 类型不受支持,或者项目类型与CollectionDef中的字段类型不匹配。 “估计”对象有没有属性“名”

这里是恢复,并尝试应用,并未能在预测()线“应用”程序:

import tensorflow as tf 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

sess=tf.Session() 
#First let's load meta graph and restore weights 
#saver = tf.train.import_meta_graph('model/model.ckpt.meta') 
#saver.restore(sess,tf.train.latest_checkpoint('nn_model')) 
tf.saved_model.loader.load(sess, ["nn"], "model") 

model = tf.get_collection('nn_model') 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(model.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

它给人的错误:

回溯(最近通话最后一个): 文件 “applynn.py” 35行,在 preds =名单(model.predict(input_fn)) AttributeError的: '模块' 对象有没有属性“预测'

那么这里缺少什么?

回答

0

所以这个问题现在已经修复。这是我必须做的,以解决这个问题。

第一部分是:

from __future__ import print_function 

from tensorflow.python.saved_model import builder as saved_model_builder 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

import tensorflow as tf 
import matplotlib 
import matplotlib.pyplot as plt 
import numpy as np 
import shutil 

matplotlib.use('TkAgg') 

# Parameters 
learning_rate = 0.1 
num_steps = 1000 
batch_size = 128 
display_step = 100 

# Network Parameters 
n_hidden_1 = 256 # 1st layer number of neurons 
n_hidden_2 = 256 # 2nd layer number of neurons 
num_input = 784 # MNIST data input (img shape: 28*28) 
num_classes = 10 # MNIST total classes (0-9 digits) 

#init = tf.initialize_all_variables() 

sess = tf.Session() 

# Define the input function for training 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.train.images}, y=mnist.train.labels, 
    batch_size=batch_size, num_epochs=None, shuffle=True) 

# Define the neural network 
def neural_net(x_dict): 
    # TF Estimator input is a dict, in case of multiple inputs 
    x = x_dict['images'] 
    # Hidden fully connected layer with 256 neurons 
    layer_1 = tf.layers.dense(x, n_hidden_1, name="layer_1") 
    # Hidden fully connected layer with 256 neurons 
    layer_2 = tf.layers.dense(layer_1, n_hidden_2, name="layer_2") 
    # Output fully connected layer with a neuron for each class 
    out_layer = tf.layers.dense(layer_2, num_classes, name="out_layer") 
    return out_layer 

# Define the model function (following TF Estimator Template) 
def model_fn(features, labels, mode): 
    # Build the neural network 
    logits = neural_net(features) 

    # Predictions 
    pred_classes = tf.argmax(logits, axis=1) 
    pred_probas = tf.nn.softmax(logits) 

    # If prediction mode, early return 
    if mode == tf.estimator.ModeKeys.PREDICT: 
     return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 

    # Define loss and optimizer 
    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
     logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 
    train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) 

    # Evaluate the accuracy of the model 
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 

    # TF Estimators requires to return a EstimatorSpec, that specify 
    # the different ops for training, evaluating, ... 
    estim_specs = tf.estimator.EstimatorSpec(
     mode=mode, 
     predictions=pred_classes, 
     loss=loss_op, 
     train_op=train_op, 
     eval_metric_ops={'accuracy': acc_op}) 

    return estim_specs 

# Build the Estimator 
estimator = tf.estimator.Estimator(model_fn, model_dir='estimator') 

# Train the Model 
estimator.train(input_fn, steps=num_steps) 

# Evaluate the Model 
# Define the input function for evaluating 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': mnist.test.images}, y=mnist.test.labels, 
    batch_size=batch_size, shuffle=False) 
# Use the Estimator 'evaluate' method 
estimator.evaluate(input_fn) 

#model.export_savedmodel(".", input_fn) 

init = tf.global_variables_initializer() 
sess.run(init) 

tf.add_to_collection("nn_model", estimator) 

# Add ops to save and restore all the variables. 
#saver = tf.train.Saver() 

#save_path = saver.save(sess, "model/model.ckpt") 

try: 
    shutil.rmtree("model") 
except: 
    pass 

builder = saved_model_builder.SavedModelBuilder("model") 
builder.add_meta_graph_and_variables(sess, ["nn"]) 
builder.save() 

print("Model saved in file") 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(estimator.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

第二部分是:

import tensorflow as tf 
import matplotlib.pyplot as plt 
import numpy as np 

# Import MNIST data 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) 

# Network Parameters 
n_hidden_1 = 256 # 1st layer number of neurons 
n_hidden_2 = 256 # 2nd layer number of neurons 
num_classes = 10 # MNIST total classes (0-9 digits) 

# Define the neural network 
def neural_net(x_dict): 
    # TF Estimator input is a dict, in case of multiple inputs 
    x = x_dict['images'] 
    # Hidden fully connected layer with 256 neurons 
    layer_1 = tf.layers.dense(x, n_hidden_1, name="layer_1") 
    # Hidden fully connected layer with 256 neurons 
    layer_2 = tf.layers.dense(layer_1, n_hidden_2, name="layer_2") 
    # Output fully connected layer with a neuron for each class 
    out_layer = tf.layers.dense(layer_2, num_classes, name="out_layer") 
    return out_layer 

# Define the model function (following TF Estimator Template) 
def model_fn(features, labels, mode): 
    # Build the neural network 
    logits = neural_net(features) 

    # Predictions 
    pred_classes = tf.argmax(logits, axis=1) 
    pred_probas = tf.nn.softmax(logits) 

    # If prediction mode, early return 
    if mode == tf.estimator.ModeKeys.PREDICT: 
     return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) 

    # Define loss and optimizer 
    loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
     logits=logits, labels=tf.cast(labels, dtype=tf.int32))) 
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) 
    train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) 

    # Evaluate the accuracy of the model 
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) 

    # TF Estimators requires to return a EstimatorSpec, that specify 
    # the different ops for training, evaluating, ... 
    estim_specs = tf.estimator.EstimatorSpec(
     mode=mode, 
     predictions=pred_classes, 
     loss=loss_op, 
     train_op=train_op, 
     eval_metric_ops={'accuracy': acc_op}) 

    return estim_specs 


sess=tf.Session() 

estimator = tf.estimator.Estimator(model_fn, model_dir='estimator') 

# Predict single images 
n_images = 4 
# Get images from test set 
test_images = mnist.test.images[:n_images] 
# Prepare the input data 
input_fn = tf.estimator.inputs.numpy_input_fn(
    x={'images': test_images}, shuffle=False) 
# Use the model to predict the images class 
preds = list(estimator.predict(input_fn)) 

# Display 
for i in range(n_images): 
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') 
    plt.show() 
    print("Model prediction:", preds[i]) 

请注意,我所说的模型变量估计,因为它确实是估计。另外,我传递一个model_dir,以便与其他变量分开序列化估计器。我还必须明确确保第二个python文件可以访问这两个函数以及它们依赖的任何变量。在代码中做了一些小的其他修复。