tensorRT 7.0 学习笔记(三)---使用python api转换onnx模型并序列化

import tensorrt as trt
#build logger
trt_logger=trt.Logger(trt.Logger.Warning)
#create network
#1.parser 
dataType=trt.float32
With trt.Builder(trt_logger) as builder,builder.create_network() as 
  network,trt.CaffeParser() as parser:
	                                                                                                                    
    Model_tensors=parser.parse(
                 deploy="sample.prototxt",
                 model="sample_weight.caffemodel",
                 network=network,dtype=datatype)

    #using onnx to create network

    EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as 
      network, trt.OnnxParser(network, TRT_LOGGER) as parser:
        with open(model_path, 'rb') as model:
            if not parser.parse(model.read()):
                for error in range(parser.num_errors):
                    print(parser.get_error(error))

            #创建engine:
            Config=builder.create_builder_config()
            Config.max_workspace_size=1<<20
            With builder.build_engine(network,config) as engine:
                #engine 序列化:
                Str=engine.serialize()
                With open("sample.engine","wb") as f:
	                f.write(engine.serialize())


 

上一篇:java-为什么printStackTrace在Clojure中不起作用?


下一篇:pytorch模型tensorrt加速之-pth转onnx转trt,在推理trt模型测试模型速度