[목표 검출] faster - rcnn demo. py 분석
13810 단어 DL
py - faster - rcnn / tools / demo. py 파일 에 대한 분석, 실행 방식 은. / demo. py – net vgg 16 입 니 다.
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt #
import numpy as np # numpy:
import scipy.io as sio # scipy.io: matlab mat ( )
import caffe, os, sys, cv2
import argparse # argparse: python
# CLASSES = ('__background__',
# '10', '16', '17', '20',
# '22', '23', '30')
CLASSES = ('__background__', # +
'car', 'truck')
''' vgg16 demo.py , ./demo.py --net vgg16
VGG16 , , .caffemodel model
'''
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'vgg_m': ('VGG_CNN_M_1024',
'VGG_CNN_M_1024_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0] #
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4] # (Xmin,Ymin,Xmax,Ymax)
score = dets[i, -1] #
# bbox[0]:x, bbox[1]:y, bbox[2]:x+w, bbox[3]:y+h
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
# ,
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
# im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)#
im_file = image_name
# print(im_file)
# print('
')
im = cv2.imread(im_file) #
# cv2.imshow("1",im)
# cv2.waitKey()
# Detect all object classes and regress object bounds
timer = Timer() # time.time()
timer.tic() # , 'time.py'
scores, boxes = im_detect(net, im) # ,
timer.toc() # ,'time.py'
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.5 #
# CONF_THRESH = 0.7
NMS_THRESH = 0.2 #
for cls_ind, cls in enumerate(CLASSES[1:]): # enumerate:
cls_ind += 1 # because we skipped background , cls_ind: ,cls:
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)] #
cls_scores = scores[:, cls_ind] #
dets = np.hstack((cls_boxes, # hstack: ,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH) #
def parse_args(): # demo.py ,
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int) # GPU 0
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16') # vgg16
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
# , prototxt
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.
Did you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST) #
print '
Loaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _ = im_detect(net, im) #
# , txt
f = open("./resize.txt")
lines = f.readlines()
for line in lines:
line = line[:-2] + ".jpg" # windows , \r
, -2
# line=line[:-1]
line = os.path.join("/home/txl/Data/resize", line)#
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for {}'.format(line)
demo(net, line)
plt.show()""" """
"""
im_names = ['1.jpg', '2.jpg','3.jpg','4.jpg','5.jpg','6.jpg','DJI_0269.JPG','DJI_0178.JPG']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
"""
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
[ML/DL] Train/Test/Valid Dataset모델을 학습 시킬 때 사용할 데이터는 train/test/valid(혹은 Dev) 으로 분리해서 학습에 사용된다. 이렇게 분리하는 이유와 각 데이터 셋의 용도와 데이터를 분리할 때 사용할 수 있는 모듈들을 알아보자~...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.