2016-12-27 131 views
1

我正在使用chatbot-retrieval项目开发张量流服务客户端/服务器应用程序。您必须为dtype字符串和形状提供占位符张量'input_example_tensor'的值[1]

我的代码有两部分,分别是服务部分和客户端部分。

下面是服务零件的代码片段。

def get_features(context, utterance): 

    context_len = 50 
    utterance_len = 50 

    features = { 
    "context": context, 
    "context_len": tf.constant(context_len, shape=[1,1], dtype=tf.int64), 
    "utterance": utterance, 
    "utterance_len": tf.constant(utterance_len, shape=[1,1], dtype=tf.int64), 
    } 

    return features 


def my_input_fn(estimator, input_example_tensor): 
     feature_configs = { 
       'context':tf.FixedLenFeature(shape=[50], dtype=tf.int64), 
       'utterance':tf.FixedLenFeature(shape=[50], dtype=tf.int64) 
       } 
     tf_example = tf.parse_example(input_example_tensor, feature_configs) 
     context = tf.identity(tf_example['context'], name='context') 
     utterance = tf.identity(tf_example['utterance'], name='utterance') 
     features = get_features(context, utterance) 
     return features 

def my_signature_fn(input_example_tensor, features, predictions): 
    feature_configs = { 
      'context':tf.FixedLenFeature(shape=[50], dtype=tf.int64), 
      'utterance':tf.FixedLenFeature(shape=[50], dtype=tf.int64) 
      } 

    tf_example = tf.parse_example(input_example_tensor, feature_configs) 
    tf_context = tf.identity(tf_example['context'], name='tf_context_utterance') 
    tf_utterance = tf.identity(tf_example['utterance'], name='tf_utterance') 

    default_graph_signature = exporter.regression_signature(
       input_tensor=input_example_tensor, 
       output_tensor=tf.identity(predictions) 
      ) 

    named_graph_signatures = { 
       'inputs':exporter.generic_signature(
        { 
         'context':tf_context, 
         'utterance':tf_utterance 
        } 
       ), 
       'outputs':exporter.generic_signature(
        { 
         'scores':predictions 
        } 
       ) 
       } 

    return default_graph_signature, named_graph_signatures 

def main(): 
     ##preliminary codes here## 

     estimator.fit(input_fn=input_fn_train, steps=100, monitors=[eval_monitor]) 

     estimator.export(
       export_dir = FLAGS.export_dir, 
       input_fn = my_input_fn, 
       use_deprecated_input_fn = True, 
       signature_fn = my_signature_fn, 
       exports_to_keep = 1 
      ) 

以下是客户端部分的代码片段。

def tokenizer_fn(iterator): 
    return (x.split(" ") for x in iterator) 

    vp = tf.contrib.learn.preprocessing.VocabularyProcessor.restore(FLAGS.vocab_processor_file) 

input_context = "biz banka kart farkli bir banka atmsinde para" 
input_utterance = "farkli banka kart biz banka atmsinde para" 

context_feature = np.array(list(vp.transform([input_context]))) 
utterance_feature = np.array(list(vp.transform([input_utterance]))) 

context_tensor = tf.contrib.util.make_tensor_proto(context_feature, shape=[1, context_feature.size]) 
utterance_tensor = tf.contrib.util.make_tensor_proto(context_feature, shape=[1, context_feature.size]) 

request.inputs['context'].CopyFrom(context_tensor) 
request.inputs['utterance'].CopyFrom(utterance_tensor) 

result_counter.throttle() 
result_future = stub.Predict.future(request, 5.0) # 5 seconds 
result_future.add_done_callback(
_create_rpc_callback(label[0], result_counter)) 
    return result_counter.get_error_rate() 

服务和客户端部件均构建时没有错误。运行服务应用程序,然后运行客户端应用程序后,当rpc调用完成时,我得到了传递给客户端应用程序的以下奇怪错误。

下面是当RPC调用完成

AbortionError(code=StatusCode.INVALID_ARGUMENT, details="You must feed a value for placeholder tensor 'input_example_tensor' with dtype string and shape [1] 
     [[Node: input_example_tensor = Placeholder[_output_shapes=[[1]], dtype=DT_STRING, shape=[1], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]") 

的错误是奇怪,因为似乎有没有办法从客户端应用程序养活占位符,我得到的错误。

如果我通过张量流服务访问模型,如何为占位符'input_example_tensor'提供数据?

答案: (我张贴我的答案在这里,因为我无法发布它作为一个答案,由于缺乏StackOverflow的徽章的任何人谁是志愿者提交他/她的问题的答案不止。 )

我可以通过在estimator.export函数中使用选项use_deprecated_input_fn = False来解决问题,并相应地更改输入签名。

下面是运行没有问题的最终代码。

def get_features(input_example_tensor, context, utterance): 
    context_len = 50 
    utterance_len = 50 
    features = { 
    "my_input_example_tensor": input_example_tensor, 
    "context": context, 
    "context_len": tf.constant(context_len, shape=[1,1], dtype=tf.int64), 
    "utterance": utterance, 
    "utterance_len": tf.constant(utterance_len, shape=[1,1], dtype=tf.int64), 
    } 

    return features 

def my_input_fn(): 
    input_example_tensor = tf.placeholder(tf.string, name='tf_example_placeholder') 

    feature_configs = { 
      'context':tf.FixedLenFeature(shape=[50], dtype=tf.int64), 
      'utterance':tf.FixedLenFeature(shape=[50], dtype=tf.int64) 
      } 
    tf_example = tf.parse_example(input_example_tensor, feature_configs) 
    context = tf.identity(tf_example['context'], name='context') 
    utterance = tf.identity(tf_example['utterance'], name='utterance') 
    features = get_features(input_example_tensor, context, utterance) 

    return features, None 

def my_signature_fn(input_example_tensor, features, predictions): 
    default_graph_signature = exporter.regression_signature(
       input_tensor=input_example_tensor, 
       output_tensor=predictions 
      ) 

    named_graph_signatures = { 
       'inputs':exporter.generic_signature(
        { 
         'context':features['context'], 
         'utterance':features['utterance'] 
        } 
       ), 
       'outputs':exporter.generic_signature(
        { 
         'scores':predictions 
        } 
       ) 
       } 

    return default_graph_signature, named_graph_signatures 

def main(): 
    ##preliminary codes here## 

    estimator.fit(input_fn=input_fn_train, steps=100, monitors=[eval_monitor]) 

    estimator._targets_info = tf.contrib.learn.estimators.tensor_signature.TensorSignature(tf.constant(0, shape=[1,1])) 

    estimator.export(
      export_dir = FLAGS.export_dir, 
      input_fn = my_input_fn, 
      input_feature_key ="my_input_example_tensor", 
      use_deprecated_input_fn = False, 
      signature_fn = my_signature_fn, 
      exports_to_keep = 1 
     ) 
+0

你能够得到错误的堆栈跟踪?不清楚实际提供的值的类型和形状是什么,这会导致错误。 –

+1

我可以通过在estimator.export函数中使用选项use_deprecated_input_fn = False来解决问题,并相应地更改输入签名。由于我是该网站的新手,因此StackOverflow无法接受我帐户的回复,并且此问题被低估。 –

+0

你可以请upvote这个问题,以便我可以发布该问题的运行代码? –

回答

1

OP自行解决,但不能自我的答案,所以这里是他们的答案:

问题得到了解决,通过使用选项use_deprecated_input_fn = Falseestimator.export功能,并相应地改变输入签名:

def my_signature_fn(input_example_tensor, features, predictions): 
    default_graph_signature = exporter.regression_signature(
     input_tensor=input_example_tensor, 
     output_tensor=predictions 
    ) 

    named_graph_signatures = { 
     'inputs':exporter.generic_signature(
      { 
      'context':features['context'], 
      'utterance':features['utterance'] 
      } 
     ), 
     'outputs':exporter.generic_signature(
      { 
      'scores':predictions 
      } 
     ) 
     } 

    return default_graph_signature, named_graph_signatures 

def main(): 
    ##preliminary codes here## 

    estimator.fit(input_fn=input_fn_train, steps=100, monitors=[eval_monitor]) 

    estimator._targets_info = tf.contrib.learn.estimators.tensor_signature.TensorSignature(tf.constant(0, shape=[1,1])) 

    estimator.export(
     export_dir = FLAGS.export_dir, 
     input_fn = my_input_fn, 
     input_feature_key ="my_input_example_tensor", 
     use_deprecated_input_fn = False, 
     signature_fn = my_signature_fn, 
     exports_to_keep = 1 
    )