netrans model conversion examples

This commit is contained in:
xujiao 2025-04-07 11:32:42 +08:00
parent b39abda2fa
commit 5efdea39c5
10 changed files with 0 additions and 692 deletions

View File

@ -1,175 +0,0 @@
# TensorFlow模型转换示例
本文档以 lenet 为例,介绍如何使用 Netrans 对 Tensorflow 模型进行转换。
Netrans 支持 TensorFlow 版本1.4.x, 2.0.x, 2.3.x, 2.6.x, 2.8.x, 2.10.x, 2.12.x 以tf.io.write_graph()保存的模型。
## 安装Netrans
1. 先确定您的 Netrans 下载目录,使用以下命令将 Netrans 加入系统配置文件。记得使用您真实的 Netrans下载目录 替换下行命令中的文字。
```bash
export NETRANS_PATH=Netrans下载目录/bin
```
2. 安装 netrans_py
```bash
cd netrans_py
pip3 install -e .
```
## 数据准备
转换 TensorFlow 模型时,模型工程目录应包含以下文件:
- .pb 文件:冻结图模型文件
- inputs_outputs.txt输入输出节点定义文件
- dataset.txt数据路径配置文件
我们的示例 已经完成数据准备,可以使用下面命令进入目录执行。
```bash
cd netrans/
cd examples/tensorflow
```
此时目录如下:
```bash
lenet/
├── 0.jpg # 校准数据
├── dataset.txt # 指定数据地址的文件
├── inputs_outputs.txt # 输入输出节点定义文件
└── lenet.pb # 冻结图模型文件
```
## 使用 nertans_cli 命令行工具
使用 netrans_cli 之前,请先使用以下命令将 命令行脚本 拷贝至当前目录。
```bash
cp ../../netrans_cli/*sh ./
```
此时目录如下:
```bash
tensorflow/
├── export.sh
├── gen_inputmeta.sh
├── import_model.sh
├── infer.sh
├── lenet
│ ├── 0.jpg
│ ├── dataset.txt
│ ├── inputs_outputs.txt
│ └── lenet.pb
└── quantize.sh
```
### 模型导入
```bash
./import_model.sh lenet
```
该步骤会生成 .json 结尾的网络结构文件和 .data 结尾的权重数据文件。
此时 lenet 的目录结构如下:
```bash
lenet/
├── 0.jpg
├── dataset.txt
├── inputs_outputs.txt
├── lenet.data
├── lenet.json
└── lenet.pb
```
### 配置文件生成
数据在推理前一般会经过预处理,为了确保模型可以正确的输入数据,需要生产对应的配置文件。
```bash
./gen_inputmeta.sh lenet
```
此时 lenet 的目录结构如下:
```bash
lenet/
├── 0.jpg
├── dataset.txt
├── inputs_outputs.txt
├── lenet.data
├── lenet_inputmeta.yml
├── lenet.json
└── lenet.pb
```
### 模型量化
为了优化模型的推理效率,加快模型的推理速度,我们使用下行命令对模型进行量化处理。
量化模型需要两个参数目录模型名字和量化类型。量化类型包括float,int16, int8 和 uint8。
```bash
./quantize.sh lenet uint8
```
此时 lenet 的目录结构如下:
```bash
lenet/
├── 0.jpg
├── dataset.txt
├── inputs_outputs.txt
├── lenet_asymmetric_affine.quantize
├── lenet.data
├── lenet_inputmeta.yml
├── lenet.json
└── lenet.pb
```
### 模型导出
最后我们使用 export.sh 将模型导出到nbg格式并生成应用程序工程。量化模型需要两个参数目录模型名字和量化类型。量化类型包括float,int16, int8 和 uint8。量化类型应于 quantize.sh 使用的一致。
```bash
./export.sh lenet uint8
```
此时 lenet 的目录结构如下:
```bash
lenet/
├── 0.jpg
├── dataset.txt
├── inputs_outputs.txt
├── lenet_asymmetric_affine.quantize
├── lenet.data
├── lenet_inputmeta.yml
├── lenet.json
├── lenet.pb
└── wksp
└── asymmetric_affine
├── BUILD
├── dump_core_graph.json
├── graph.json
├── lenetasymmetricaffine.2012.vcxproj
├── lenet_asymmetric_affine.export.data
├── lenetasymmetricaffine.vcxproj
├── main.c
├── makefile.linux
├── network_binary.nb
├── vnn_global.h
├── vnn_lenetasymmetricaffine.c
├── vnn_lenetasymmetricaffine.h
├── vnn_post_process.c
├── vnn_post_process.h
├── vnn_pre_process.c
└── vnn_pre_process.h
```
## 使用 netrans_py python api
### 3.2.1 安装netrans_py
```bash
cd netrans_py
pip3 install -e .
```
### 准备示例脚本
```bash
cd ../example/tensorflow
cp ../../netrans_py/example.py ./
```
### 运行示例脚本
```bash
python3 example.py lenet -q uint8
```

View File

@ -1,137 +0,0 @@
#!/bin/bash
if [ -z "$NETRANS_PATH" ]; then
echo "Need to set enviroment variable NETRANS_PATH"
exit 1
fi
OVXGENERATOR=$NETRANS_PATH/pnnacc
OVXGENERATOR="$OVXGENERATOR export ovxlib"
DATASET=dataset.txt
VERIFT='FLASE'
function export_network()
{
NAME=$1
pushd $NAME
QUANTIZED=$2
if [ ${QUANTIZED} = 'float' ]; then
TYPE=float;
quantization_type="none_quantized"
generate_path='./wksp/none_quantized'
elif [ ${QUANTIZED} = 'uint8' ]; then
quantization_type="asymmetric_affine"
generate_path='./wksp/asymmetric_affine'
TYPE=quantized;
elif [ ${QUANTIZED} = 'int8' ]; then
quantization_type="dynamic_fixed_point-8"
generate_path='./wksp/dynamic_fixed_point-8'
TYPE=quantized;
elif [ ${QUANTIZED} = 'int16' ]; then
quantization_type="dynamic_fixed_point-16"
generate_path='./wksp/dynamic_fixed_point-16'
TYPE=quantized;
else
echo "=========== wrong quantization_type ! ( float / uint8 / int8 / int16 )==========="
exit -1
fi
echo " ======================================================================="
echo " =========== Start Generate $NAME ovx C code with type of ${quantization_type} ==========="
echo " ======================================================================="
mkdir -p "${generate_path}"
# if want to import c code into win IDE , change --target-ide-project command-line param from 'linux64' -> 'win32'
if [ ${QUANTIZED} = 'float' ]; then
cmd="$OVXGENERATOR \
--model ${NAME}.json \
--model-data ${NAME}.data \
--model-quantize ${NAME}.quantize \
--dtype ${TYPE} \
--pack-nbg-viplite \
--model-quantize ${NAME}_${quantization_type}.quantize \
--with-input-meta ${NAME}_inputmeta.yml\
--optimize 'VIP8000NANOQI_PLUS_PID0XB1'\
#--optimize None\
--target-ide-project 'linux64' \
--viv-sdk ${NETRANS_PATH}/pnna_sdk \
--output-path ${generate_path}/${NAME}_${quantization_type}"
else
if [ -f ${NAME}_${quantization_type}.quantize ]; then
echo -e "\033[31m using ${NAME}_${quantization_type}.quantize \033[0m"
else
echo -e "\033[31m Can not find ${NAME}_${quantization_type}.quantize \033[0m"
exit -1;
fi
cmd="$OVXGENERATOR \
--model ${NAME}.json \
--model-data ${NAME}.data \
--model-quantize ${NAME}.quantize \
--dtype ${TYPE} \
--pack-nbg-viplite \
--model-quantize ${NAME}_${quantization_type}.quantize \
--with-input-meta ${NAME}_inputmeta.yml\
--optimize 'VIP8000NANOQI_PLUS_PID0XB1'\
--target-ide-project 'linux64' \
--viv-sdk ${NETRANS_PATH}/pnna_sdk \
--output-path ${generate_path}/${NAME}_${quantization_type}"
fi
if [${VERIFY}='TRUE']; then
echo $cmd
fi
eval $cmd
# copy input file into source code folder
# sourcefile="`cat ${DATASET}`"
# cpcmd="cp -fr $sourcefile ${generate_path}/"
# echo $cpcmd
# eval $cpcmd
# temp='wksp/temp'
# mkcmd="mkdir -p ${temp}"
# eval $mkcmd
# sourcefile="`cat ${DATASET}`"
# cpcmd="cp -fr $sourcefile ${temp}/"
# echo $cpcmd
# eval $cpcmd
cpcmd="cp ${generate_path}_nbg_viplite/network_binary.nb ${generate_path}/"
eval $cpcmd
delcmd="rm -rf ${generate_path}_nbg_viplite"
eval $delcmd
# rm -rf ${generate_path}
# mvcmd="mv ${temp} ${generate_path}"
# eval $mvcmd
echo " ======================================================================="
echo " =========== End Generate $NAME ovx C code with type of ${quantization_type} ==========="
echo " ======================================================================="
popd
}
if [ "$#" -lt 2 ]; then
echo "Input a network name and quantized type ( float / uint8 / int8 / int16 )"
exit -1
fi
if [ ! -e "${1%/}" ]; then
echo "Directory ${1%/} does not exist !"
exit -2
fi
export_network ${1%/} ${2%/}

View File

@ -1,28 +0,0 @@
#!/bin/sh
if [ -z "$NETRANS_PATH" ]; then
echo "Need to set enviroment variable NETRANS_PATH"
exit 1
fi
if [ "$#" -ne 1 ]; then
echo "Enter a network name !"
exit 2
fi
if [ ! -e "${1%/}" ]; then
echo "Directory ${1%/} does not exist !"
exit 3
fi
netrans=$NETRANS_PATH/pnnacc
NAME=${1%/}
cd $NAME
$netrans generate \
inputmeta \
--model ${NAME}.json \
--separated-database \

View File

@ -1,209 +0,0 @@
#!/bin/bash
if [ -z "$NETRANS_PATH" ]; then
echo "Need to set enviroment variable NETRANS_PATH"
exit 1
fi
function import_caffe_network()
{
NAME=$1
CONVERTCAFFE=$NETRANS_PATH/pnnacc
CONVERTCAFFE="$CONVERTCAFFE import caffe"
if [ -f ${NAME}.json ]; then
echo -e "\033[31m rm ${NAME}.json \033[0m"
rm ${NAME}.json
fi
if [ -f ${NAME}.data ]; then
echo -e "\033[31m rm ${NAME}.data \033[0m"
rm ${NAME}.data
fi
echo "=========== Converting $NAME Caffe model ==========="
if [ -f ${NAME}.caffemodel ]; then
cmd="$CONVERTCAFFE \
--model ${NAME}.prototxt \
--weights ${NAME}.caffemodel \
--output-model ${NAME}.json \
--output-data ${NAME}.data"
else
echo "=========== fake Caffe model data file==========="
cmd="$CONVERTCAFFE \
--model ${NAME}.prototxt \
--output-model ${NAME}.json \
--output-data ${NAME}.data"
fi
}
function import_tensorflow_network()
{
NAME=$1
CONVERTF=$NETRANS_PATH/pnnacc
CONVERTF="$CONVERTF import tensorflow"
if [ -f ${NAME}.json ]; then
echo -e "\033[31m rm ${NAME}.json \033[0m"
rm ${NAME}.json
fi
if [ -f ${NAME}.data ]; then
echo -e "\033[31m rm ${NAME}.data \033[0m"
rm ${NAME}.data
fi
echo "=========== Converting $NAME Tensorflow model ==========="
cmd="$CONVERTF \
--model ${NAME}.pb \
--output-data ${NAME}.data \
--output-model ${NAME}.json \
$(cat inputs_outputs.txt)"
}
function import_onnx_network()
{
NAME=$1
CONVERTONNX=$NETRANS_PATH/pnnacc
CONVERTONNX="$CONVERTONNX import onnx"
if [ -f ${NAME}.json ]; then
echo -e "\033[31m rm ${NAME}.json \033[0m"
rm ${NAME}.json
fi
if [ -f ${NAME}.data ]; then
echo -e "\033[31m rm ${NAME}.data \033[0m"
rm ${NAME}.data
fi
echo "=========== Converting $NAME ONNX model ==========="
cmd="$CONVERTONNX \
--model ${NAME}.onnx \
--output-model ${NAME}.json \
--output-data ${NAME}.data"
}
function import_tflite_network()
{
NAME=$1
CONVERTTFLITE=$NETRANS_PATH/pnnacc
CONVERTTFLITE="$CONVERTTFLITE import tflite"
if [ -f ${NAME}.json ]; then
echo -e "\033[31m rm ${NAME}.json \033[0m"
rm ${NAME}.json
fi
if [ -f ${NAME}.data ]; then
echo -e "\033[31m rm ${NAME}.data \033[0m"
rm ${NAME}.data
fi
echo "=========== Converting $NAME TFLite model ==========="
cmd="$CONVERTTFLITE \
--model ${NAME}.tflite \
--output-model ${NAME}.json \
--output-data ${NAME}.data"
}
function import_darknet_network()
{
NAME=$1
CONVERTDARKNET=$NETRANS_PATH/pnnacc
CONVERTDARKNET="$CONVERTDARKNET import darknet"
if [ -f ${NAME}.json ]; then
echo -e "\033[31m rm ${NAME}.json \033[0m"
rm ${NAME}.json
fi
if [ -f ${NAME}.data ]; then
echo -e "\033[31m rm ${NAME}.data \033[0m"
rm ${NAME}.data
fi
echo "=========== Converting $NAME darknet model ==========="
cmd="$CONVERTDARKNET \
--model ${NAME}.cfg \
--weight ${NAME}.weights \
--output-model ${NAME}.json \
--output-data ${NAME}.data"
}
function import_pytorch_network()
{
NAME=$1
CONVERTPYTORCH=$NETRANS_PATH/pnnacc
CONVERTPYTORCH="$CONVERTPYTORCH import pytorch"
if [ -f ${NAME}.json ]; then
echo -e "\033[31m rm ${NAME}.json \033[0m"
rm ${NAME}.json
fi
if [ -f ${NAME}.data ]; then
echo -e "\033[31m rm ${NAME}.data \033[0m"
rm ${NAME}.data
fi
echo "=========== Converting $NAME pytorch model ==========="
cmd="$CONVERTPYTORCH \
--model ${NAME}.pt \q
--output-model ${NAME}.json \
--output-data ${NAME}.data \
$(cat input_size.txt)"
}
function import_network()
{
NAME=$1
pushd $NAME
if [ -f ${NAME}.prototxt ]; then
import_caffe_network ${1%/}
elif [ -f ${NAME}.pb ]; then
import_tensorflow_network ${1%/}
elif [ -f ${NAME}.onnx ]; then
import_onnx_network ${1%/}
elif [ -f ${NAME}.tflite ]; then
import_tflite_network ${1%/}
elif [ -f ${NAME}.weights ]; then
import_darknet_network ${1%/}
elif [ -f ${NAME}.pt ]; then
import_pytorch_network ${1%/}
else
echo "=========== can not find suitable model files ==========="
fi
echo $cmd
eval $cmd
if [ -f ${NAME}.data -a -f ${NAME}.json ]; then
echo -e "\033[31m SUCCESS \033[0m"
else
echo -e "\033[31m ERROR ! \033[0m"
fi
popd
}
if [ "$#" -ne 1 ]; then
echo "Input a network name !"
exit -1
fi
if [ ! -e "${1%/}" ]; then
echo "Directory ${1%/} does not exist !"
exit -2
fi
import_network ${1%/}

View File

@ -1,65 +0,0 @@
#!/bin/bash
if [ -z "$NETRANS_PATH" ]; then
echo "Need to set enviroment variable NETRANS_PATH"
exit 1
fi
TENSORZONX=$NETRANS_PATH/pnnacc
TENSORZONX="$TENSORZONX inference"
DATASET=./dataset.txt
function inference_network()
{
NAME=$1
pushd $NAME
QUANTIZED=$2
inf_path='./inf'
if [ ${QUANTIZED} = 'float' ]; then
TYPE=float32;
quantization_type="float32"
elif [ ${QUANTIZED} = 'uint8' ]; then
quantization_type="asymmetric_affine"
TYPE=quantized;
elif [ ${QUANTIZED} = 'int8' ]; then
quantization_type="dynamic_fixed_point-8"
TYPE=quantized;
elif [ ${QUANTIZED} = 'int16' ]; then
quantization_type="dynamic_fixed_point-16"
TYPE=quantized;
else
echo "=========== wrong quantization_type ! ( float / uint8 / int8 / int16 )==========="
exit -1
fi
cmd="$TENSORZONX \
--dtype ${TYPE} \
--batch-size 1 \
--model-quantize ${NAME}_${quantization_type}.quantize \
--model ${NAME}.json \
--model-data ${NAME}.data \
--output-dir ${inf_path} \
--with-input-meta ${NAME}_inputmeta.yml \
--device CPU"
echo $cmd
eval $cmd
echo "=========== End inference $NAME model ==========="
popd
}
if [ "$#" -lt 2 ]; then
echo "Input a network name and quantized type ( float / uint8 / int8 / int16 )"
exit -1
fi
if [ ! -e "${1%/}" ]; then
echo "Directory ${1%/} does not exist !"
exit -2
fi
inference_network ${1%/} ${2%/}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 553 B

View File

@ -1 +0,0 @@
0.jpg

View File

@ -1 +0,0 @@
--inputs input/x-input --outputs output --input-size-list "28,28,1"

Binary file not shown.

View File

@ -1,76 +0,0 @@
#!/bin/bash
if [ -z "$NETRANS_PATH" ]; then
echo "Need to set enviroment variable NETRANS_PATH"
exit 1
fi
TENSORZONEX=$NETRANS_PATH/pnnacc
TENSORZONEX="$TENSORZONEX quantize"
DATASET=./dataset.txt
function quantize_network()
{
NAME=$1
pushd $NAME
QUANTIZED=$2
if [ ${QUANTIZED} = 'float' ]; then
echo "=========== do not need quantied==========="
exit -1
elif [ ${QUANTIZED} = 'uint8' ]; then
quantization_type="asymmetric_affine"
elif [ ${QUANTIZED} = 'int8' ]; then
quantization_type="dynamic_fixed_point-8"
elif [ ${QUANTIZED} = 'int16' ]; then
quantization_type="dynamic_fixed_point-16"
else
echo "=========== wrong quantization_type ! ( uint8 / int8 / int16 )==========="
exit -1
fi
echo " ======================================================================="
echo " ==== Start Quantizing $NAME model with type of ${quantization_type} ==="
echo " ======================================================================="
if [ -f ${NAME}_${quantization_type}.quantize ]; then
echo -e "\033[31m rm ${NAME}_${quantization_type}.quantize \033[0m"
rm ${NAME}_${quantization_type}.quantize
fi
cmd="$TENSORZONEX \
--batch-size 1 \
--qtype ${QUANTIZED} \
--rebuild \
--quantizer ${quantization_type%-*} \
--model-quantize ${NAME}_${quantization_type}.quantize \
--model ${NAME}.json \
--model-data ${NAME}.data \
--with-input-meta ${NAME}_inputmeta.yml \
--device CPU"
echo $cmd
eval $cmd
if [ -f ${NAME}_${quantization_type}.quantize ]; then
echo -e "\033[31m SUCCESS \033[0m"
else
echo -e "\033[31m ERROR ! \033[0m"
fi
popd
}
if [ "$#" -lt 2 ]; then
echo "Input a network name and quantized type ( uint8 / int8 / int16 )"
exit -1
fi
if [ ! -e "${1%/}" ]; then
echo "Directory ${1%/} does not exist !"
exit -2
fi
quantize_network ${1%/} ${2%/}