tensorflow 模型保存与加载 和TensorFlow serving + grpc + docker项目部署

TensorFlow 模型保存与加载

TensorFlow中总共有两种保存和加载模型的方法。第一种是利用 tf.train.Saver() 来保存,第二种就是利用 SavedModel 来保存模型,接下来以自己项目中的代码为例。

项目中模型的代码:

class TensorFlowDKT(object):
def __init__(self, config, batch_size):
# 导入配置好的参数
self.hiddens = hiddens = config.modelConfig.hidden_layers
self.num_skills = num_skills = config.num_skills
self.input_size = input_size = config.input_size
self.keep_prob_value = config.modelConfig.dropout_keep_prob # 定义需要喂给模型的参数
self.max_steps = tf.placeholder(tf.int32, name="max_steps") # 当前batch中最大序列长度
self.input_data = tf.placeholder(tf.float32, [None, None, input_size], name="input_x") self.sequence_len = tf.placeholder(tf.int32, [None], name="sequence_len")
self.keep_prob = tf.placeholder(tf.float32, name="keep_prob") # dropout keep prob self.target_id = tf.placeholder(tf.int32, [None, None], name="target_id")
self.target_correctness = tf.placeholder(tf.float32, [None, None], name="target_correctness")
self.flat_target_correctness = None
self.batch_size = tf.placeholder(tf.int32, name="batch_size") # 构建lstm模型结构
hidden_layers = []
for idx, hidden_size in enumerate(hiddens):
lstm_layer = tf.nn.rnn_cell.LSTMCell(num_units=hidden_size, state_is_tuple=True)
hidden_layer = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_layer,
output_keep_prob=self.keep_prob)
hidden_layers.append(hidden_layer)
self.hidden_cell = tf.nn.rnn_cell.MultiRNNCell(cells=hidden_layers, state_is_tuple=True) # 采用动态rnn,动态输入序列的长度
outputs, self.current_state = tf.nn.dynamic_rnn(cell=self.hidden_cell,
inputs=self.input_data,
sequence_length=self.sequence_len,
dtype=tf.float32) # 隐层到输出层的权重系数[最后隐层的神经元数量,知识点数]
output_w = tf.get_variable("W", [hiddens[-1], num_skills])
output_b = tf.get_variable("b", [num_skills]) self.output = tf.reshape(outputs, [-1, hiddens[-1]])
# 因为权值共享的原因,对生成的矩阵[batch_size * self.max_steps, num_skills]中的每一行都加上b
self.logits = tf.matmul(self.output, output_w) + output_b self.mat_logits = tf.reshape(self.logits, [-1, self.max_steps, num_skills]) # 对每个batch中每个序列中的每个时间点的输出中的每个值进行sigmoid计算,这里的值表示对某个知识点的掌握情况,
# 每个时间点都会输出对所有知识点的掌握情况
self.pred_all = tf.sigmoid(self.mat_logits, name="pred_all") # 计算损失loss
flat_logits = tf.reshape(self.logits, [-1]) flat_target_correctness = tf.reshape(self.target_correctness, [-1])
self.flat_target_correctness = flat_target_correctness flat_base_target_index = tf.range(self.batch_size * self.max_steps) * num_skills # 因为flat_logits的长度为batch_size * num_steps * num_skills,我们要根据每一步的target_id将其长度变成batch_size * num_steps
flat_base_target_id = tf.reshape(self.target_id, [-1]) flat_target_id = flat_base_target_id + flat_base_target_index
# gather是从一个tensor中切片一个子集
flat_target_logits = tf.gather(flat_logits, flat_target_id) # 对切片后的数据进行sigmoid转换
self.pred = tf.sigmoid(tf.reshape(flat_target_logits, [-1, self.max_steps]), name="pred")
# 将sigmoid后的值表示为0或1
self.binary_pred = tf.cast(tf.greater_equal(self.pred, 0.5), tf.float32, name="binary_pred") # 定义损失函数
with tf.name_scope("loss"):
# flat_target_logits_sigmoid = tf.nn.log_softmax(flat_target_logits)
# self.loss = -tf.reduce_mean(flat_target_correctness * flat_target_logits_sigmoid)
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=flat_target_correctness,
logits=flat_target_logits))

在之后的预测时,我需要输入的参数有 input_data,max_steps,sequence_len,keep_prob,target_id,batch_size。输出的值有pred_all。

首先来看第一种模型保存的方法:

with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_options
)
sess = tf.Session(config=session_conf)
...... saver = tf.train.Saver(tf.global_variables())
sess.run(tf.global_variables_initializer()) print("初始化完毕,开始训练")
for i in range(config.trainConfig.epochs):
np.random.shuffle(train_seqs)
for params in dataGen.next_batch(train_seqs):
# 批次获得训练集,训练模型
self.train_step(params, train_op) current_step = tf.train.global_step(sess, global_step)
# train_step.run(feed_dict={x: batch_train[0], y_actual: batch_train[1], keep_prob: 0.5})
# 对结果进行记录
if current_step % config.trainConfig.evaluate_every == 0:
print("\nEvaluation:")
# 获得测试数据 losses = []
accuracys = []
aucs = []
for params in dataGen.next_batch(test_seqs):
loss, accuracy, auc = self.dev_step(params)
losses.append(loss)
accuracys.append(accuracy)
aucs.append(auc) time_str = datetime.datetime.now().isoformat()
print("dev: {}, step: {}, loss: {}, acc: {}, auc: {}".
format(time_str, current_step, mean(losses), mean(accuracys), mean(aucs))) if current_step % config.trainConfig.checkpoint_every == 0:
path = saver.save(sess, "model/my-model", global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))

利用 tf.train.Saver() 保存模型非常简单,就只要上述代码中标红的两句就行了。

模型加载的代码:

graph = tf.Graph()
with graph.as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)
sess = tf.Session(config=session_conf) with sess.as_default():
checkpoint_file = tf.train.latest_checkpoint("model/")
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file) # 获得需要喂给模型的参数,输出的结果依赖的输入值
input_x = graph.get_operation_by_name("test/dkt/input_x").outputs[0]
target_id = graph.get_operation_by_name("test/dkt/target_id").outputs[0]
keep_prob = graph.get_operation_by_name("test/dkt/keep_prob").outputs[0]
max_steps = graph.get_operation_by_name("test/dkt/max_steps").outputs[0]
sequence_len = graph.get_operation_by_name("test/dkt/sequence_len").outputs[0]
batch_size = graph.get_operation_by_name("test/dkt/batch_size").outputs[0] # 获得输出的结果
pred_all = graph.get_tensor_by_name("test/dkt/pred_all:0") for params in dataGen.next_batch(train_seqs):
print("step: {}".format(step)) target_correctness = params['target_correctness'] pred_all_1 = sess.run([pred_all], feed_dict={input_x: params["input_x"],
target_id: params["target_id"],
keep_prob: 1.0,
max_steps: params["max_len"],
sequence_len: params["seq_len"],
batch_size: len(params["seq_len"])})
print(params["seq_len"])
sequence_lens.append(params["seq_len"])
studentSkillMasterProbs.append(pred_all_1)
studentTargetId.append(params["target_id"])
studentTargetCorrectness.append(params["target_correctness"])

加载模型时要把我们需要的输入参数和输出结果的 tensor 读出来,利用get_tensor_by_name() 的方法,方法中需要传入tensor 的名称,所以在定义模型类时需要为这些 tensor 指定 name 参数。之后就是直接 sess.run() 去运行模型进行预测。

这种方法会保存四个文件 xxx.checkpoint,xxx.index,xxx.meta和 xxx.data_00000_of_00001

第二种模型保存的方法:

with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_options
)
sess = tf.Session(config=session_conf)
self.sess = sess with sess.as_default():
# 实例化dkt模型对象
...... builder = tf.saved_model.builder.SavedModelBuilder("./builder") sess.run(tf.global_variables_initializer()) print("初始化完毕,开始训练")
for i in range(config.trainConfig.epochs):
...... inputs = {"input_x": tf.saved_model.utils.build_tensor_info(self.train_dkt.input_data),
"target_id": tf.saved_model.utils.build_tensor_info(self.train_dkt.target_id),
"max_steps": tf.saved_model.utils.build_tensor_info(self.train_dkt.max_steps),
"sequence_len": tf.saved_model.utils.build_tensor_info(self.train_dkt.sequence_len),
"keep_prob": tf.saved_model.utils.build_tensor_info(self.train_dkt.keep_prob),
"batch_size": tf.saved_model.utils.build_tensor_info(self.train_dkt.batch_size)} outputs = {"pred_all": tf.saved_model.utils.build_tensor_info(self.train_dkt.pred_all)} prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(inputs=inputs, outputs=outputs,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
legacy_init_op = tf.group(tf.tables_initializer(), name="legacy_init_op")
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={"predict": prediction_signature}, legacy_init_op=legacy_init_op) builder.save()

利用 saved_model 也可以把整个模型中的变量全部保存起来,但更一般的形式是指定 输入参数和输出结果来进行保存,而且除了可以保存predict之外,还可以保存training,classify等等。

模型加载的代码:

   graph = tf.Graph()
with graph.as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)
sess = tf.Session(config=session_conf) signature_key = "predict" # 这里的值是要和保存模型中的 builder.add_meta_graph_and_variables()方法里面的 signature_def_map={"predict": prediction_signature} 对应上的。
   with tf.Session(graph=graph) as sess:
meta_graph_def = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], "./builder")
signature = meta_graph_def.signature_def
input_x = sess.graph.get_tensor_by_name(signature[signature_key].inputs["input_x"].name)
keep_prob = sess.graph.get_tensor_by_name(signature[signature_key].inputs["keep_prob"].name)
target_id = sess.graph.get_tensor_by_name(signature[signature_key].inputs["target_id"].name)
max_steps = sess.graph.get_tensor_by_name(signature[signature_key].inputs["max_steps"].name)
sequence_len = sess.graph.get_tensor_by_name(signature[signature_key].inputs["sequence_len"].name)
batch_size = sess.graph.get_tensor_by_name(signature[signature_key].inputs["batch_size"].name) pred_all = sess.graph.get_tensor_by_name(signature[signature_key].outputs["pred_all"].name)
for params in dataGen.next_batch(train_seqs):
print("step: {}".format(step)) target_correctness = params['target_correctness'] pred_all_1 = sess.run([pred_all], feed_dict={input_x: params["input_x"],
target_id: params["target_id"],
keep_prob: 1.0,
max_steps: params["max_len"],
sequence_len: params["seq_len"],
batch_size: len(params["seq_len"])})
print(params["seq_len"])
sequence_lens.append(params["seq_len"])
studentSkillMasterProbs.append(pred_all_1)
studentTargetId.append(params["target_id"])
studentTargetCorrectness.append(params["target_correctness"]) step += 1

模型加载和 tf.train.Saver() 保存的模型差不多,不过不需要指定在模型图中的全名。

这种方法会保存一个文件 xxx.pb 和一个文件夹 variables,variables中有两个文件 variables.data_00000_of_00001(数字可能会有所不同,不确定)和variables.index。

TensorFlow serving 服务布署

利用上面第二种保存的模型可以构建TensorFlow serving 服务,具体的是利用docker来构建TensorFlow serving 的服务端。然后在客户端通过grpc来连接。整个步骤如下:

服务器系统:Ubuntu16.04

1,安装docker-ce

  具体的安装流程见官网

2,拉取官方的TensorFlow serving 仓库,可以直接拉取最新版的,也可以自己选择版本

  docker pull tensorflow/serving:latest-devel

3,启动容器,选择grpc的端口

  docker run -p 8500:8500 --name grpc -it tensorflow/serving:latest-devel

  8500端口:grpc的端口

  8501端口:restful api的端口

  除了grpc调用服务,也可以用restful api调用服务

4,将自己的model文件复制到容器中

  本地模型路径:~/builder/00000123 。模型名称为builder,00000123为版本号(必须需要),00000123文件夹下面就是xxx.pb 文件和variables文件夹

  docker cp ~/model grpc:/online_model 。grpc 是我们的容器名称,将本地的模型copy到容器中的根路径下的online_model文件夹下

5,在docker中启动TensorFlow serving服务

  进入到容器中:docker exec -it grpc bash

  启动TensorFlow serving服务:tensorflow_model_server --port=8500 --model_name=builder --model_base_path=/online_model/builder/

  这样服务端的TensorFlow serving就启动了

6,客户端代码

import grpc
import numpy as np
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow_serving.apis import prediction_service_pb2
from grpc.beta import implementations dataGen = DataGenerator(fileName, config) # 数据预处理的类
dataGen.gen_attr()
test_seqs = dataGen.test_seqs
params = dataGen.format_data(test_seqs)
input_x = params["input_x"][:1]
max_steps = params["max_len"]
batch_size = 1
keep_prob = 1.0
target_id = params["target_id"][:1]
sequence_len = params["seq_len"][:1] # 利用grpc 进行连接
channel = implementations.insecure_channel("192.168.39.39", 8500)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
# 模型的名称
request.model_spec.name = "builder"
# 签名的名称
request.model_spec.signature_name = "predict" # 每次只支持传入一条数据进行预测,传入数据时要注意数据格式和模型定义时的格式一致
request.inputs['input_x'].CopyFrom(tf.contrib.util.make_tensor_proto(input_x, dtype=tf.float32, shape=[input_x.shape[0], input_x.shape[1], input_x.shape[2]]))
request.inputs['target_id'].CopyFrom(tf.contrib.util.make_tensor_proto(target_id, dtype=tf.int32, shape=[target_id.shape[0], target_id.shape[1]]))
request.inputs['max_steps'].CopyFrom(tf.contrib.util.make_tensor_proto(max_steps, dtype=tf.int32))
request.inputs['keep_prob'].CopyFrom(tf.contrib.util.make_tensor_proto(keep_prob, dtype=tf.float32))
request.inputs["sequence_len"].CopyFrom(tf.contrib.util.make_tensor_proto(sequence_len, dtype=tf.int32, shape=[1]))
request.inputs["batch_size"].CopyFrom(tf.contrib.util.make_tensor_proto(batch_size, dtype=tf.int32)) # response返回的是protobuff的格式
response = stub.Predict.future(request) # 去除预测的数值,对于many to many 的LSTM,输出的结果是多个,读取成列表的形式
res_list = response.result().outputs["pred_all"].float_val

利用TensorFlow serving搭建服务的具体流程就这么些,另外还支持gpu版的TensorFlow serving。但是需要安装nvidia-docker ,还需要启动nvidia-container-runtime等,总之很复杂,对于NLP中的任务,预测时用CPU也是完全可以的。

上一篇:利用django-crontab设定定时任务


下一篇:Spring Boot WebFlux 快速入门实践