Faster RCNN fine-tune 의 error
1. KeyError: 'max_overlaps'
File "./tools/train_net.py", line 112, in
max_iters=args.max_iters)
File "/usr/local/caffes/xlw/faster-rcnn-third/tools/../lib/fast_rcnn/train.py", line 155, in train_net
roidb = filter_roidb(roidb)
File "/usr/local/caffes/xlw/faster-rcnn-third/tools/../lib/fast_rcnn/train.py", line 145, in filter_roidb
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
File "/usr/local/caffes/xlw/faster-rcnn-third/tools/../lib/fast_rcnn/train.py", line 134, in is_valid
overlaps = entry['max_overlaps']
KeyError: 'max_overlaps'
해결 방법:
FRCN 정리_ROOT/data/cache/디렉토리의 캐시 파일
2. AssertionError assert (boxes[:, 2] >= boxes[:, 0]).all()
File "/py-faster-rcnn/tools/../lib/datasets/imdb.py", line 108, in append_flipped_images
assert (boxes[:, 2] >= boxes[:, 0]).all()
AssertionError
이 문제의 발생은 자신이 만든 데이터가 집중되어 있기 때문에boundingbox의 위치는 그림의 가장자리에 있다. 이때 좌표값(x, y) 중 하나는 0이고fasterrcnn은 Xmin, Ymin, Xmax, Ymax를 감량한다.
해결 방법
lib/datasets/imdb.py
의 append_flipped_images()
함수.boxes[:, 2] = widths[i] - oldx1 - 1
이 줄 아래에 코드를 추가합니다: for b in range(len(boxes)):
if boxes[b][2]< boxes[b][0]:
boxes[b][0] = 0
lib/datasets/pascal_voc.py
의 _load_pascal_annotation()
함수.Xmin, Ymin, Xmax, Ymax의 감량 동작을 제거하고 다음과 같이 변경합니다. # Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text)
y1 = float(bbox.find('ymin').text)
x2 = float(bbox.find('xmax').text)
y2 = float(bbox.find('ymax').text)
3. Waiting for Faster-RCNN_TF/output/faster_rcnn_end2end/voc_2007_trainval/VGGnet_fast_rcnn_iter_70000.ckpt to exist...
해결 방법
1. experiment/script/faster_ 수정rcnn_end2end.sh
#!/bin/bash
# Usage:
# ./experiments/scripts/faster_rcnn_end2end.sh GPU NET DATASET [options args to {train,test}_net.py]
# DATASET is either pascal_voc or coco.
#
# Example:
# ./experiments/scripts/faster_rcnn_end2end.sh 0 VGG_CNN_M_1024 pascal_voc \
# --set EXP_DIR foobar RNG_SEED 42 TRAIN.SCALES "[400, 500, 600, 700]"
set -x
set -e
export PYTHONUNBUFFERED="True"
DEV=$1
DEV_ID=$2
NET=$3
DATASET=$4
array=( $@ )
len=${#array[@]}
EXTRA_ARGS=${array[@]:4:$len}
EXTRA_ARGS_SLUG=${EXTRA_ARGS// /_}
case $DATASET in
pascal_voc)
TRAIN_IMDB="voc_2007_trainval"
TEST_IMDB="voc_2007_test"
PT_DIR="pascal_voc"
ITERS=70000
;;
coco)
# This is a very long and slow training schedule
# You can probably use fewer iterations and reduce the
# time to the LR drop (set in the solver to 350,000 iterations).
TRAIN_IMDB="coco_2014_train"
TEST_IMDB="coco_2014_minival"
PT_DIR="coco"
ITERS=490000
;;
*)
echo "No dataset given"
exit
;;
esac
LOG="experiments/logs/faster_rcnn_end2end_${NET}_${EXTRA_ARGS_SLUG}.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
exec &> >(tee -a "$LOG")
echo Logging output to "$LOG"
# model ,
#time python ./tools/train_net.py --device ${DEV} --device_id ${DEV_ID} \
# --weights data/pretrain_model/VGG_imagenet.npy \
# --imdb ${TRAIN_IMDB} \
# --iters ${ITERS} \
# --cfg experiments/cfgs/faster_rcnn_end2end.yml \
# --network VGGnet_train \
# ${EXTRA_ARGS}
set +x
NET_FINAL=`grep -B 1 "done solving" ${LOG} | grep "Wrote snapshot" | awk '{print $4}'`
set -x
time python ./tools/test_net.py --device ${DEV} --device_id ${DEV_ID} \
# weights
--weights yourPath/Faster-RCNN_TF/output/faster_rcnn_end2end/voc_2007_trainval \
--imdb ${TEST_IMDB} \
--cfg experiments/cfgs/faster_rcnn_end2end.yml \
--network VGGnet_test \
${EXTRA_ARGS}
2. tools/test_ 수정net.py
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from networks.factory import get_network
import argparse
import pprint
import time, os, sys
import tensorflow as tf
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--device', dest='device', help='device to use',
default='cpu', type=str)
parser.add_argument('--device_id', dest='device_id', help='device id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--weights', dest='model',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
#
# while not os.path.exists(args.model) and args.wait:
# print('Waiting for {} to exist...'.format(args.model))
# time.sleep(10)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
device_name = '/{}:{:d}'.format(args.device,args.device_id)
print device_name
network = get_network(args.network_name)
print 'Use network `{:s}` in training'.format(args.network_name)
if args.device == 'gpu':
cfg.USE_GPU_NMS = True
cfg.GPU_ID = args.device_id
else:
cfg.USE_GPU_NMS = False
# start a session
saver = tf.train.Saver()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# tf .ckpt , 3 checkpoint , restore
checkpoint_dir = '/home/nfdw/nfdw/Faster-RCNN_TF/output/faster_rcnn_end2end/voc_2007_trainval'
while True:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
break
else:
print('Waiting for checkpoint in directory {} to exist...'.format(checkpoint_dir))
time.sleep(10)
# args.model
saver.restore(sess, ckpt.model_checkpoint_path)
print ('Loading model weights from {:s}').format(ckpt.model_checkpoint_path)
# weights_filename
weights_filename = os.path.splitext(os.path.basename(ckpt.model_checkpoint_path))[0]
test_net(sess, network, imdb, weights_filename)
4. display error
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.use() must be called before pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time.
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
다양한 언어의 JSONJSON은 Javascript 표기법을 사용하여 데이터 구조를 레이아웃하는 데이터 형식입니다. 그러나 Javascript가 코드에서 이러한 구조를 나타낼 수 있는 유일한 언어는 아닙니다. 저는 일반적으로 '객체'{}...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.