Inception-v3 기존 권한을 이용하여 특징 추출(이미지 식별)
9025 단어 TensorflowInception-v3특징 추출
......
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
.......
원본 코드는 바로 이곳에서 소개한 것으로 세 개의 인터페이스가 있는데,
'softmax:0': A tensor containing the normalized prediction across 1000 labels.
'pool_3:0': A tensor containing the next-to-last layer containing 2048 float description of the image.
'DecodeJpeg/contents:0': A tensor containing a string providing JPEG encoding of the image.
예측하면'softmax:0':'DecodeJpeg/contents:0': 이미지 식별 테스트 가능
특징을 추출하려면 이렇게.
fc_tensor = sess.graph.get_tensor_by_name('pool_3:0')
pool_1 = sess.run(fc_tensor,{'DecodeJpeg/contents:0': image_data})
저장하면 CSV 또는.mat 파일
import tensorflow as tf
import numpy as np
import os
from PIL import Image
import matplotlib.pyplot as plt
import scipy.io as scio
model_dir='F:/fqh/models-master/tutorials/image/imagenet/2015'
image = 'F:/fqh/models-master/tutorials/image/imagenet/data_set/face/faces95_72_20_180-200jpgfar-close/'
target_path=image+'wjhugh/'
class NodeLookup(object):
def __init__(self, label_lookup_path=None, uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
for line in proto_as_ascii_lines:
line = line.strip('
')
parse_items = line.split('\t')
uid = parse_items[0]
human_string = parse_items[1]
uid_to_human[uid] = human_string
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
node_id_to_uid = {}
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
with tf.gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
for root, dirs, files in os.walk(target_path):
for file in files:
# print(file)
img_path = target_path+file
image_data = tf.gfile.FastGFile(img_path, 'rb').read()
fc_tensor = sess.graph.get_tensor_by_name('pool_3:0')
pool_1 = sess.run(fc_tensor,{'DecodeJpeg/contents:0': image_data})
# print(pool_1)
img_path=img_path[:len(img_path)-4]
#print(img_path)
scio.savemat(img_path+'.mat', {"pool_1": pool_1})
사형은 자신의 데이터 집합의 이미지 특징을 추출해야 하기 때문에 이렇게 썼고 하나의 순환을 더해서 전체 데이터 집합을 두루 돌아다닐 수 있다. 컴퓨터 설정에 한계가 있기 때문에 이렇게 썼다.내가 변경한 원본 + 가중치
새로 고치다
import tensorflow as tf
import numpy as np
import os
from PIL import Image
import matplotlib.pyplot as plt
import scipy.io as scio
model_dir='F:/fqh/models-master/tutorials/image/imagenet/2015'
image = 'F:/fqh/models-master/tutorials/image/imagenet/data_set/face/faces96_152_20_180-200jpgview-depth/'
target_path=image+'wjhugh/'
class NodeLookup(object):
def __init__(self, label_lookup_path=None, uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
for line in proto_as_ascii_lines:
line = line.strip('
')
parse_items = line.split('\t')
uid = parse_items[0]
human_string = parse_items[1]
uid_to_human[uid] = human_string
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
node_id_to_uid = {}
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
with tf.gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
create_graph()
list0=[]
for root, dirs,files in os.walk(image):
list0.append(dirs)
#print(list0[0])
img_list=[]
# print(img_list)
for ii in list0[0]:
img_list.append(ii)
list_img_name=np.array(img_list)
list_img_name.sort()
# print(list_img_name[0])
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
for jj in range(0,len(list_img_name)):#len(list_img_name)
target_path=image+list_img_name[jj]+'/'
for root, dirs, files in os.walk(target_path):
for file in files:
img_path = target_path+file
image_data = tf.gfile.FastGFile(img_path, 'rb').read()
fc_tensor = sess.graph.get_tensor_by_name('pool_3:0')
pool_1 = sess.run(fc_tensor,{'DecodeJpeg/contents:0': image_data})
pool_2 = pool_1[0,0,0,:]
img_path=img_path[:len(img_path)-4]
scio.savemat(img_path+'.mat', {"pool_2": pool_2})
pi= (jj/(len(list_img_name)-1))*100
print("%4.2f %%" % pi)
벡터를 펴서 전체 데이터 집합을 옮겨다니기
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
[번역] TF-api(1)tf.nn.max_poolArgs: value: A 4-D Tensor with shape [batch, height, width, channels] and type tf.float32 . value는 4D의 tensor입니다. maxpoo...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.