(flappbird) luo@luo-All-Series:~/MyFile/tf-faster-rcnn_box$
(flappbird) luo@luo-All-Series:~/MyFile/tf-faster-rcnn_box$
(flappbird) luo@luo-All-Series:~/MyFile/tf-faster-rcnn_box$
(flappbird) luo@luo-All-Series:~/MyFile/tf-faster-rcnn_box$ ./experiments/scripts/test_faster_rcnn.sh 0 pascal_voc_0712 res101
+ set -e
+ export PYTHONUNBUFFERED=True
+ PYTHONUNBUFFERED=True
+ GPU_ID=0
+ DATASET=pascal_voc_0712
+ NET=res101
+ array=($@)
+ len=3
+ EXTRA_ARGS=
+ EXTRA_ARGS_SLUG=
+ case ${DATASET} in
+ TRAIN_IMDB=voc_2007_trainval+voc_2012_trainval
+ TEST_IMDB=voc_2007_test
+ ITERS=110000
+ ANCHORS='[8,16,32]'
+ RATIOS='[0.5,1,2]'
++ date +%Y-%m-%d_%H-%M-%S
+ LOG=experiments/logs/test_res101_voc_2007_trainval+voc_2012_trainval_.txt.2019-05-17_07-34-16
+ exec
++ tee -a experiments/logs/test_res101_voc_2007_trainval+voc_2012_trainval_.txt.2019-05-17_07-34-16
+ echo Logging output to experiments/logs/test_res101_voc_2007_trainval+voc_2012_trainval_.txt.2019-05-17_07-34-16
Logging output to experiments/logs/test_res101_voc_2007_trainval+voc_2012_trainval_.txt.2019-05-17_07-34-16
+ set +x
+ [[ ! -z '' ]]
+ CUDA_VISIBLE_DEVICES=0
+ time python ./tools/test_net.py --imdb voc_2007_test --model output/res101/voc_2007_trainval+voc_2012_trainval/default/res101_faster_rcnn_iter_110000.ckpt --cfg experiments/cfgs/res101.yml --net res101 --set ANCHOR_SCALES '[8,16,32]' ANCHOR_RATIOS '[0.5,1,2]'
Called with args:
Namespace(cfg_file='experiments/cfgs/res101.yml', comp_mode=False, imdb_name='voc_2007_test', max_per_image=100, model='output/res101/voc_2007_trainval+voc_2012_trainval/default/res101_faster_rcnn_iter_110000.ckpt', net='res101', set_cfgs=['ANCHOR_SCALES', '[8,16,32]', 'ANCHOR_RATIOS', '[0.5,1,2]'], tag='')
Using config:
{'ANCHOR_RATIOS': [0.5, 1, 2],
'ANCHOR_SCALES': [8, 16, 32],
'DATA_DIR': '/home/luo/MyFile/tf-faster-rcnn_box/data',
'EXP_DIR': 'res101',
'MATLAB': 'matlab',
'MOBILENET': {'DEPTH_MULTIPLIER': 1.0,
'FIXED_LAYERS': 5,
'REGU_DEPTH': False,
'WEIGHT_DECAY': 4e-05},
'PIXEL_MEANS': array([[[102.9801, 115.9465, 122.7717]]]),
'POOLING_MODE': 'crop',
'POOLING_SIZE': 7,
'RESNET': {'FIXED_BLOCKS': 1, 'MAX_POOL': False},
'RNG_SEED': 3,
'ROOT_DIR': '/home/luo/MyFile/tf-faster-rcnn_box',
'RPN_CHANNELS': 512,
'TEST': {'BBOX_REG': True,
'HAS_RPN': True,
'MAX_SIZE': 1000,
'MODE': 'nms',
'NMS': 0.3,
'PROPOSAL_METHOD': 'gt',
'RPN_NMS_THRESH': 0.7,
'RPN_POST_NMS_TOP_N': 300,
'RPN_PRE_NMS_TOP_N': 6000,
'RPN_TOP_N': 5000,
'SCALES': [600],
'SVM': False},
'TRAIN': {'ASPECT_GROUPING': False,
'BATCH_SIZE': 256,
'BBOX_INSIDE_WEIGHTS': [1.0, 1.0, 1.0, 1.0],
'BBOX_NORMALIZE_MEANS': [0.0, 0.0, 0.0, 0.0],
'BBOX_NORMALIZE_STDS': [0.1, 0.1, 0.2, 0.2],
'BBOX_NORMALIZE_TARGETS': True,
'BBOX_NORMALIZE_TARGETS_PRECOMPUTED': True,
'BBOX_REG': True,
'BBOX_THRESH': 0.5,
'BG_THRESH_HI': 0.5,
'BG_THRESH_LO': 0.0,
'BIAS_DECAY': False,
'DISPLAY': 20,
'DOUBLE_BIAS': False,
'FG_FRACTION': 0.25,
'FG_THRESH': 0.5,
'GAMMA': 0.1,
'HAS_RPN': True,
'IMS_PER_BATCH': 1,
'LEARNING_RATE': 0.001,
'MAX_SIZE': 640,
'MOMENTUM': 0.9,
'PROPOSAL_METHOD': 'gt',
'RPN_BATCHSIZE': 256,
'RPN_BBOX_INSIDE_WEIGHTS': [1.0, 1.0, 1.0, 1.0],
'RPN_CLOBBER_POSITIVES': False,
'RPN_FG_FRACTION': 0.5,
'RPN_NEGATIVE_OVERLAP': 0.3,
'RPN_NMS_THRESH': 0.7,
'RPN_POSITIVE_OVERLAP': 0.7,
'RPN_POSITIVE_WEIGHT': -1.0,
'RPN_POST_NMS_TOP_N': 2000,
'RPN_PRE_NMS_TOP_N': 12000,
'SCALES': [600],
'SNAPSHOT_ITERS': 500,
'SNAPSHOT_KEPT': 3,
'SNAPSHOT_PREFIX': 'res101_faster_rcnn',
'STEPSIZE': [3000],
'SUMMARY_INTERVAL': 10,
'TRUNCATED': False,
'USE_ALL_GT': True,
'USE_FLIPPED': True,
'USE_GT': False,
'WEIGHT_DECAY': 0.0001},
'USE_E2E_TF': True,
'USE_GPU_NMS': False}
2019-05-17 07:34:17.363980: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
Loading model check point from output/res101/voc_2007_trainval+voc_2012_trainval/default/res101_faster_rcnn_iter_110000.ckpt
Loaded.
im_detect: 1/388 5.542s 0.000s
im_detect: 2/388 5.418s 0.000s
im_detect: 3/388 5.362s 0.000s
im_detect: 4/388 5.249s 0.000s
im_detect: 5/388 5.173s 0.000s
im_detect: 6/388 5.245s 0.000s
im_detect: 7/388 5.317s 0.000s
im_detect: 8/388 5.317s 0.000s
im_detect: 9/388 5.284s 0.000s
im_detect: 10/388 5.313s 0.000s
im_detect: 11/388 5.270s 0.000s
im_detect: 12/388 5.272s 0.000s
im_detect: 13/388 5.260s 0.000s
im_detect: 14/388 5.284s 0.000s
im_detect: 15/388 5.258s 0.000s
im_detect: 16/388 5.254s 0.000s
im_detect: 17/388 5.262s 0.000s
im_detect: 18/388 5.272s 0.000s
im_detect: 19/388 5.275s 0.000s
im_detect: 20/388 5.273s 0.000s
im_detect: 21/388 5.285s 0.000s
im_detect: 22/388 5.275s 0.000s
im_detect: 23/388 5.291s 0.000s
im_detect: 24/388 5.316s 0.000s
im_detect: 25/388 5.321s 0.000s
im_detect: 26/388 5.327s 0.000s
im_detect: 27/388 5.314s 0.000s
im_detect: 28/388 5.305s 0.000s
im_detect: 29/388 5.307s 0.000s
im_detect: 30/388 5.312s 0.000s
im_detect: 31/388 5.315s 0.000s
相关文章
- 12-25tensorflow2 keras 调用官方提供的模型训练分类与测试
- 12-25TensorFlow Object Detection API —— 测试自己的模型
- 12-25window10+tensorflow+Keras cpu版本的YOLO V3 训练自己的数据集
- 12-25mnist数据集利用tensorflow(keras)模型训练,预测(内置数据集),保存,以及.h5转换为.pb再利用opencv-dnn模块的加载和预测(自己手写拍照后的本地数据集)
- 12-25tensorflow(cpu)-object-detection-api安装并训练自己的模型(Ubuntu)避坑备忘录
- 12-25ubuntu16.04中gpu版本caffe-ssd模型训练自己的数据
- 12-25ubuntu16.04中gpu版本的caffe-ssd模型训练与测试
- 12-25ubuntu16.04 使用tensorflow object detection训练自己的模型
- 12-25tensorflow训练自己的数据集实现CNN图像分类2(保存模型&测试单张图片)
- 12-25TensorFlow下利用MNIST训练模型并识别自己手写的数字