-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest.py
More file actions
173 lines (160 loc) · 6.75 KB
/
test.py
File metadata and controls
173 lines (160 loc) · 6.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# -*- coding: utf-8 -*-
"""
Created on Sat May 26 00:12:50 2018
@author: Administrator
"""
1# -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 14:42:06 2018
@author: Administrator
"""
import cv2
import matplotlib.pyplot as pplt
import numpy as np
from skimage import transform
import tensorflow as tf
import matplotlib.image as plb
import matplotlib.pyplot as plt
classifier_path = "H:/Anaconda/Anaconda_exercise/RLJC/haarcascade_frontalface_default.xml"
#sess = tf.Session()
#new_saver = tf.train.import_meta_graph('trained_variables.ckpt.meta')
#new_saver.restore(sess, tf.train.latest_checkpoint('./'))
#graph = tf.get_default_graph()
#x = graph.get_tensor_by_name('x:0')
#y_ = graph.get_tensor_by_name('y_:0')
#new_y = graph.get_tensor_by_name('new_y:0')
#keep_prob = graph.get_tensor_by_name('keep_prob:0')
xc = 1
if xc == 1:
img_1=plb.imread("C:/Users/Administrator/Desktop/face/kuang.jpg")
img_1 = np.dot(img_1[...,:3], [0.299, 0.587, 0.114])
img_raw,img_col = np.shape(img_1)
imgs = []
classfier = cv2.CascadeClassifier(classifier_path)
color = (0, 255, 0)
frame=plb.imread("C:/Users/Administrator/Desktop/qwe/2 (1).jpg")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faceRects = classfier.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags = 0)
if len(faceRects) > 0: #大于0则检测到人脸
for faceRect in faceRects: #单独框出每一张人脸
x, y, w, h = faceRect
image = frame[y - 10: y + h + 10, x - 10: x + w + 10]
plt.imshow(gray,cmap='gray'),plt.show()
image = np.dot(image[...,:3], [0.299, 0.587, 0.114])
img_2 = transform.resize(image,(100,100))
# for i in range(img_raw):
# for j in range(img_col):
# if img_1[i][j] == 0:
# img_2[i,j]=0
new_img = np.zeros((img_raw-11,img_col-27))
new_img_raw,new_img_col = np.shape(new_img)
for i in range(new_img_raw):
for j in range(new_img_col):
new_img[i][j] = img_2[i+3][j+13]
images = transform.resize(new_img,(88,72,1))
plt.imshow(new_img,cmap='gray'),plt.show()
cv2.imwrite("C:/Users/Administrator/Desktop/qwe/2 (12).jpg",new_img)
# imgs.append(images)
# faceCascade = cv2.CascadeClassifier(classifier_path)
# imgs = []
# img_1=plb.imread("C:/Users/Administrator/Desktop/face/kuang.jpg")
# img_1 = np.dot(img_1[...,:3], [0.299, 0.587, 0.114])
# img_raw,img_col = np.shape(img_1)
# im = plb.imread("C:/Users/Administrator/Desktop/qwe/2 (1).jpg")
# gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# faces = faceCascade.detectMultiScale(
# gray,
# scaleFactor=1.2,
# minNeighbors=5,
# minSize=(30, 30),
# flags = 0)
# x = faces[0,0]-10
# y = faces[0,1]-10
# w = faces[0,2]+10
# h = faces[0,3]+10
# image = gray[y:(y+h),x:(x+w)]
# plt.imshow(image,cmap='gray'),plt.show()
# img_2 = transform.resize(image,(100,100))
# for i in range(img_raw):
# for j in range(img_col):
# if img_1[i][j] == 0:
# img_2[i,j]=0
# plt.imshow(img_2,cmap='gray'),plt.show()
# new_img = np.zeros((img_raw-11,img_col-27))
# new_img_raw,new_img_col = np.shape(new_img)
# for i in range(new_img_raw):
# for j in range(new_img_col):
# new_img[i][j] = img_2[i+3][j+13]
# plt.imshow(new_img,cmap='gray'),plt.show()
# images = transform.resize(new_img,(44,36,1))
# images= np.float32(images)
# imgs.append(images)
else:
def catch_video(window_name, camera_idx):
img_1=plb.imread("C:/Users/Administrator/Desktop/face/kuang.jpg")
img_1 = np.dot(img_1[...,:3], [0.299, 0.587, 0.114])
img_raw,img_col = np.shape(img_1)
imgs = []
cv2.namedWindow(window_name)
#视频来源,可以来自一段已存好的视频,也可以直接来自USB摄像头
cap = cv2.VideoCapture(camera_idx)
classfier = cv2.CascadeClassifier(classifier_path)
#识别出人脸后要画的边框的颜色,RGB格式
color = (0, 255, 0)
while cap.isOpened():
ok, frame = cap.read() #读取一帧数据
if not ok:
break
#将当前帧转换成灰度图像
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#人脸检测,1.2和5分别为图片缩放比例和需要检测的有效点数
faceRects = classfier.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags = 0)
if len(faceRects) > 0: #大于0则检测到人脸
for faceRect in faceRects: #单独框出每一张人脸
x, y, w, h = faceRect
cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, 2)
cv2.ellipse(frame,(int(x+w/2),int(y+h/2-0.025*h)),
(int(74*(w+26)/200),int(90*(h+26)/200)),0,0,360,255,1)#加偏置是因为线宽原因
#显示图像
cv2.imshow(window_name, frame)
c = cv2.waitKey(10)
#从视频中裁剪出人脸图片
if c & 0xFF == ord('x'):
image = frame[y - 10: y + h + 10, x - 10: x + w + 10]
image = np.dot(image[...,:3], [0.299, 0.587, 0.114])
img_2 = transform.resize(image,(100,100))
for i in range(img_raw):
for j in range(img_col):
if img_1[i][j] == 0:
img_2[i,j]=0
new_img = np.zeros((img_raw-11,img_col-27))
new_img_raw,new_img_col = np.shape(new_img)
for i in range(new_img_raw):
for j in range(new_img_col):
new_img[i][j] = img_2[i+3][j+13]
pplt.imshow(new_img,cmap='gray')
images = transform.resize(new_img,(44,36,1))
imgs.append(images)
break
#释放摄像头并销毁所有窗口
cap.release()
cv2.destroyAllWindows()
return imgs,images
imgs,img = catch_video("Get Video Stream",0)
#
#a = np.array([8])
#new_y = sess.run([new_y],feed_dict = {x:imgs,y_:a,keep_prob:1.0})
#position = np.argmax(new_y)
#name = ['薛义权','宋锦涛','蓝高杰','陆权忠','覃绍彬','周俊成','黄凯','潘俊伟','陈东良']
#print('result :',name[position])
#print(new_y)