-
pateman16 authoredb093e023
howto_wiki 2.46 KiB
### HowTo Wiki for some very usefull tensorflow scripts
## change paths according to your system
## make sure to do protobuf compilation and add pyton path before anything else
# From tensorflow/models/research
protoc object_detection/protos/*.proto --python_out=.
# From tensorflow/models/research
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim
## export new frozen model from a pre-trained checkpoint
# From tensorflow/models/research
python object_detection/export_inference_graph.py \
--input_type image_tensor \
--pipeline_config_path ../stuff/ssd_mobilenet_v11_coco.config \
--trained_checkpoint_prefix ../stuff/ssd_mobilenet_checkpoints/model.ckpt \
--output_directory ../models/ssd_mobilenet_v11_coco
## optimize frozen model for inference
python -m tensorflow.python.tools.optimize_for_inference \
--input ../models/ssd_mobilenet_v11_coco/frozen_inference_graph.pb \
--output ../models/ssd_mobilenet_v11_coco/optimized_inference_graph.pb \
--input_names=image_tensor \
--output_names=detection_boxes,detection_scores,num_detections,detection_classes
### MAYBE DEPRECATED (Not tested yet)
# summarize graph to get information about the model content
bazel build tensorflow/tools/graph_transforms:summarize_graph && \
bazel-bin/tensorflow/tools/graph_transforms/summarize_graph \
--in_graph=/tmp/tensorflow_inception_graph.pb
# Transorfm graph and strip unused nodes to improve inference performance
bazel run tensorflow/tools/graph_transforms:transform_graph --
--in_graph=tensorflow_inception_graph.pb
--out_graph=optimized_inception_graph.pb --inputs='Mul' --outputs='softmax'
--transforms='
strip_unused_nodes(type=float, shape="1,299,299,3")
fold_constants(ignore_errors=true)
fold_batch_norms
fold_old_batch_norms'
# Quantize Graph
bazel build tensorflow/tools/graph_transforms:transform_graph && \
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=/tmp/tensorflow_inception_optimized.pb \
--out_graph=/tmp/tensorflow_inception_quantized.pb \
--inputs='Mul:0' --outputs='softmax:0' --transforms='quantize_weights'
# How to Profile your Model
bazel build -c opt tensorflow/tools/benchmark:benchmark_model && \
bazel-bin/tensorflow/tools/benchmark/benchmark_model \
--graph=/tmp/tensorflow_inception_graph.pb --input_layer="Mul" \
--input_layer_shape="1,299,299,3" --input_layer_type="float" \
--output_layer="softmax:0" --show_run_order=false --show_time=false \
--show_memory=false --show_summary=true --show_flops=true --logtostderr