python 신경 볼 륨 네트워크 기반 얼굴 인식
1.얼굴 인식 전체 디자인 방안
객서버 인 터 랙 션 흐름 도:
2.서버 코드 전시
sk = socket.socket()
# s.bind(address) 。 AF_INET , (host,port) 。
sk.bind(("172.29.25.11",8007))
# 。
sk.listen(True)
while True:
for i in range(100):
# (conn,address),conn , 。address 。
conn,address = sk.accept()
#
path = str(i+1) + '.jpg'
# ( )
size = conn.recv(1024)
size_str = str(size,encoding="utf-8")
size_str = size_str[2 :]
file_size = int(size_str)
#
conn.sendall(bytes('finish', encoding="utf-8"))
# has_size
has_size = 0
#
f = open(path,"wb")
while True:
#
if file_size == has_size:
break
date = conn.recv(1024)
f.write(date)
has_size += len(date)
f.close()
#
resize(path)
# cut_img(path): True; False
if cut_img(path):
yuchuli()
result = test('test.jpg')
conn.sendall(bytes(result,encoding="utf-8"))
else:
print('falue')
conn.sendall(bytes(' , ',encoding="utf-8"))
conn.close()
3.이미지 전처리1)그림 크기 조정
#
def resize(path):
image=cv2.imread(path,0)
row,col = image.shape
if row >= 2500:
x,y = int(row/5),int(col/5)
elif row >= 2000:
x,y = int(row/4),int(col/4)
elif row >= 1500:
x,y = int(row/3),int(col/3)
elif row >= 1000:
x,y = int(row/2),int(col/2)
else:
x,y = row,col
#
res=cv2.resize(image,(y,x),interpolation=cv2.INTER_CUBIC)
cv2.imwrite(path,res)
2)직사 도 균형 화 와 중간 값 필터
#
eq = cv2.equalizeHist(img)
#
lbimg=cv2.medianBlur(eq,3)
3)사람의 눈 검사
# -*- coding: utf-8 -*-
# ,
import numpy as np
import cv2
def eye_test(path):
#
imagepath = path
#
eyeglasses_cascade = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
#
img = cv2.imread(imagepath)
#
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#
eyeglasses = eyeglasses_cascade.detectMultiScale(gray)
# 2
if len(eyeglasses) == 2:
num = 0
for (e_gx,e_gy,e_gw,e_gh) in eyeglasses:
cv2.rectangle(img,(e_gx,e_gy),(e_gx+int(e_gw/2),e_gy+int(e_gh/2)),(0,0,255),2)
if num == 0:
x1,y1 = e_gx+int(e_gw/2),e_gy+int(e_gh/2)
else:
x2,y2 = e_gx+int(e_gw/2),e_gy+int(e_gh/2)
num += 1
print('eye_test')
return x1,y1,x2,y2
else:
return False
4)사람의 눈 을 맞 추고 재단한다
# -*- coding: utf-8 -*-
#
# :
# CropFace(image, eye_left, eye_right, offset_pct, dest_sz)
# eye_left is the position of the left eye
# eye_right is the position of the right eye
# : ,
# offset_pct is the percent of the image you want to keep next to the eyes (horizontal, vertical direction)
# 。
# dest_sz is the size of the output image
#
import sys,math
from PIL import Image
from eye_test import eye_test
#
def Distance(p1,p2):
dx = p2[0]- p1[0]
dy = p2[1]- p1[1]
return math.sqrt(dx*dx+dy*dy)
# , 。
def ScaleRotateTranslate(image, angle, center =None, new_center =None, scale =None, resample=Image.BICUBIC):
if (scale is None)and (center is None):
return image.rotate(angle=angle, resample=resample)
nx,ny = x,y = center
sx=sy=1.0
if new_center:
(nx,ny) = new_center
if scale:
(sx,sy) = (scale, scale)
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine/sx
b = sine/sx
c = x-nx*a-ny*b
d =-sine/sy
e = cosine/sy
f = y-nx*d-ny*e
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
# , , , , 。
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
# calculate offsets in original image 。
offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
# get the direction 。
eye_direction = (eye_right[0]- eye_left[0], eye_right[1]- eye_left[1])
# calc rotation angle in radians 。
rotation =-math.atan2(float(eye_direction[1]),float(eye_direction[0]))
# distance between them # 。
dist = Distance(eye_left, eye_right)
# calculate the reference eye-width 。
reference = dest_sz[0]-2.0*offset_h
# scale factor # 。
scale =float(dist)/float(reference)
# rotate original around the left eye # 。
image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)
# crop the rotated image #
crop_xy = (eye_left[0]- scale*offset_h, eye_left[1]- scale*offset_v) #
crop_size = (dest_sz[0]*scale, dest_sz[1]*scale) #
image = image.crop((int(crop_xy[0]),int(crop_xy[1]),int(crop_xy[0]+crop_size[0]),int(crop_xy[1]+crop_size[1])))
# resize it
image = image.resize(dest_sz, Image.ANTIALIAS)
return image
def cut_img(path):
image = Image.open(path)
# True; , False
if eye_test(path):
print('cut_img')
#
leftx,lefty,rightx,righty = eye_test(path)
#
if leftx > rightx:
temp_x,temp_y = leftx,lefty
leftx,lefty = rightx,righty
rightx,righty = temp_x,temp_y
#
CropFace(image, eye_left=(leftx,lefty), eye_right=(rightx,righty), offset_pct=(0.30,0.30), dest_sz=(92,112)).save('test.jpg')
return True
else:
print('falue')
return False
4.신경 볼 륨 네트워크 트 레이 닝 데이터 로
# -*- coding: utf-8 -*-
from numpy import *
import cv2
import tensorflow as tf
#
TYPE = 112*92
#
PEOPLENUM = 42
#
TRAINNUM = 15 #( train_face_num )
#
EACH = 21 #( test_face_num + train_face_num )
# 2 =>1
def img2vector1(filename):
img = cv2.imread(filename,0)
row,col = img.shape
vector1 = zeros((1,row*col))
vector1 = reshape(img,(1,row*col))
return vector1
#
def ReadData(k):
path = 'face_flip/'
train_face = zeros((PEOPLENUM*k,TYPE),float32)
train_face_num = zeros((PEOPLENUM*k,PEOPLENUM))
test_face = zeros((PEOPLENUM*(EACH-k),TYPE),float32)
test_face_num = zeros((PEOPLENUM*(EACH-k),PEOPLENUM))
# 42
for i in range(PEOPLENUM):
#
people_num = i + 1
for j in range(k):
#
filename = path + 's' + str(people_num) + '/' + str(j+1) + '.jpg'
#2 =>1
img = img2vector1(filename)
#train_face: ;train_face_num:
train_face[i*k+j,:] = img/255
train_face_num[i*k+j,people_num-1] = 1
for j in range(k,EACH):
#
filename = path + 's' + str(people_num) + '/' + str(j+1) + '.jpg'
#2 =>1
img = img2vector1(filename)
# test_face: ;test_face_num:
test_face[i*(EACH-k)+(j-k),:] = img/255
test_face_num[i*(EACH-k)+(j-k),people_num-1] = 1
return train_face,train_face_num,test_face,test_face_num
# lable
train_face,train_face_num,test_face,test_face_num = ReadData(TRAINNUM)
#
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
return result
#
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
#
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#
def conv2d(x, W):
# stride [1, x_movement, y_movement, 1]
# Must have strides[0] = strides[3] = 1
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# ,x,y 2
def max_pool_2x2(x):
# stride [1, x_movement, y_movement, 1]
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 10304])/255. # 112*92
ys = tf.placeholder(tf.float32, [None, PEOPLENUM]) # 42
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 112, 92, 1])
# print(x_image.shape) # [n_samples, 112,92,1]
#
W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 112x92x32
h_pool1 = max_pool_2x2(h_conv1) # output size 56x46x64
#
W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 56x46x64
h_pool2 = max_pool_2x2(h_conv2) # output size 28x23x64
#
W_fc1 = weight_variable([28*23*64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 28, 23, 64] ->> [n_samples, 28*23*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 28*23*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#
W_fc2 = weight_variable([1024, PEOPLENUM])
b_fc2 = bias_variable([PEOPLENUM])
prediction = tf.nn.softmax((tf.matmul(h_fc1_drop, W_fc2) + b_fc2))
#
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = tf.matmul(h_fc1_drop, W_fc2)+b_fc2, labels=ys))
regularizers = tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(b_fc1) +tf.nn.l2_loss(W_fc2) + tf.nn.l2_loss(b_fc2)
#
cost += 5e-4 * regularizers
#
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init)
# 1000 , 50
for i in range(1000):
sess.run(train_step, feed_dict={xs: train_face, ys: train_face_num, keep_prob: 0.5})
if i % 50 == 0:
print(sess.run(prediction[0],feed_dict= {xs: test_face,ys: test_face_num,keep_prob: 1}))
print(compute_accuracy(test_face,test_face_num))
#
save_path = saver.save(sess,'my_data/save_net.ckpt')
5.신경 볼 륨 네트워크 로 데 이 터 를 측정 한다.
# -*- coding: utf-8 -*-
#
from numpy import *
import cv2
import tensorflow as tf
#
PEOPLENUM = 42
# 2 =>1
def img2vector1(img):
row,col = img.shape
vector1 = zeros((1,row*col),float32)
vector1 = reshape(img,(1,row*col))
return vector1
#
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
#
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#
def conv2d(x, W):
# stride [1, x_movement, y_movement, 1]
# Must have strides[0] = strides[3] = 1
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# ,x,y 2
def max_pool_2x2(x):
# stride [1, x_movement, y_movement, 1]
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 10304])/255. # 112*92
ys = tf.placeholder(tf.float32, [None, PEOPLENUM]) # 42
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 112, 92, 1])
# print(x_image.shape) # [n_samples, 112,92,1]
#
W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 112x92x32
h_pool1 = max_pool_2x2(h_conv1) # output size 56x46x64
#
W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 56x46x64
h_pool2 = max_pool_2x2(h_conv2) # output size 28x23x64
#
W_fc1 = weight_variable([28*23*64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 28, 23, 64] ->> [n_samples, 28*23*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 28*23*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#
W_fc2 = weight_variable([1024, PEOPLENUM])
b_fc2 = bias_variable([PEOPLENUM])
prediction = tf.nn.softmax((tf.matmul(h_fc1_drop, W_fc2) + b_fc2))
sess = tf.Session()
init = tf.global_variables_initializer()
#
saver = tf.train.Saver()
saver.restore(sess,'my_data/save_net.ckpt')
#
def find_people(people_num):
if people_num == 41:
return ' '
elif people_num == 42:
return 'LZT'
else:
return 'another people'
def test(path):
#
img = cv2.imread(path,0)/255
test_face = img2vector1(img)
print('true_test')
#
prediction1 = sess.run(prediction,feed_dict={xs:test_face,keep_prob:1})
prediction1 = prediction1[0].tolist()
people_num = prediction1.index(max(prediction1))+1
result = max(prediction1)/sum(prediction1)
print(result,find_people(people_num))
# 0.5
if result > 0.50:
#
qiandaobiao = load('save.npy')
qiandaobiao[people_num-1] = 1
save('save.npy',qiandaobiao)
# +
print(find_people(people_num) + ' ')
result = find_people(people_num) + ' '
else:
result = ' '
return result
신경 권 적 네트워크 입문 안내 이상 이 바로 본 고의 모든 내용 입 니 다.여러분 의 학습 에 도움 이 되 고 저 희 를 많이 응원 해 주 셨 으 면 좋 겠 습 니 다.
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
로마 숫자를 정수로 또는 그 반대로 변환그 중 하나는 로마 숫자를 정수로 변환하는 함수를 만드는 것이었고 두 번째는 그 반대를 수행하는 함수를 만드는 것이었습니다. 문자만 포함합니다'I', 'V', 'X', 'L', 'C', 'D', 'M' ; 문자열이 ...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.