Skip to content

Commit 32d7d64

Browse files
committed
Update 22-08-2020
1 parent 78d5c3b commit 32d7d64

File tree

181 files changed

+901
-1466
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

181 files changed

+901
-1466
lines changed

Drowsiness/Notebook.ipynb

Lines changed: 310 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,310 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": null,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"import cv2\n",
10+
"import torch.hub\n",
11+
"import os\n",
12+
"import model\n",
13+
"from PIL import Image\n",
14+
"from torchvision import transforms\n",
15+
"from grad_cam import BackPropagation\n",
16+
"import time \n",
17+
"import matplotlib.pyplot as plt\n",
18+
"import matplotlib.image as mpimg\n",
19+
"from IPython import display\n",
20+
"import threading\n",
21+
"import vlc\n",
22+
"\n",
23+
"#import smbus\n",
24+
"#import requests\n",
25+
"#from twilio.rest import Client\n",
26+
"#import urllib.request\n",
27+
"#import json\n",
28+
"\n",
29+
"# Alarm sound file\n",
30+
"file = 'alarm.mp3'\n",
31+
"# Sound player start\n",
32+
"p = vlc.MediaPlayer(\"alarm.mp3\")\n",
33+
"\n",
34+
"\"\"\"\n",
35+
"class MMA7455():\n",
36+
" bus = smbus.SMBus(1)\n",
37+
" def __init__(self):\n",
38+
" self.bus.write_byte_data(0x1D, 0x16, 0x55) # Setup the Mode\n",
39+
" self.bus.write_byte_data(0x1D, 0x10, 0) # Calibrate\n",
40+
" self.bus.write_byte_data(0x1D, 0x11, 0) # Calibrate\n",
41+
" self.bus.write_byte_data(0x1D, 0x12, 0) # Calibrate\n",
42+
" self.bus.write_byte_data(0x1D, 0x13, 0) # Calibrate\n",
43+
" self.bus.write_byte_data(0x1D, 0x14, 0) # Calibrate\n",
44+
" self.bus.write_byte_data(0x1D, 0x15, 0) # Calibrate\n",
45+
" def getValueX(self):\n",
46+
" return self.bus.read_byte_data(0x1D, 0x06)\n",
47+
" def getValueY(self):\n",
48+
" return self.bus.read_byte_data(0x1D, 0x07)\n",
49+
" def getValueZ(self):\n",
50+
" return self.bus.read_byte_data(0x1D, 0x08)\n",
51+
"\n",
52+
"\n",
53+
"# Crash Sensibility\n",
54+
"sens=30\n",
55+
"\n",
56+
"# Sending SMS if Crash Detected\n",
57+
"\n",
58+
"\n",
59+
"def send():\n",
60+
" # Your Account SID from twilio.com/console\n",
61+
" account_sid = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n",
62+
" # Your Auth Token from twilio.com/console\n",
63+
" auth_token = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n",
64+
" client = Client(account_sid, auth_token)\n",
65+
" phone = \"+XXXXXXXXXXXX\"\n",
66+
" print('crash')\n",
67+
" send_url = 'http://ip-api.com/json'\n",
68+
" r = requests.get(send_url)\n",
69+
" j = json.loads(r.text)\n",
70+
" text=\"The Driver Crash Here: \"\n",
71+
" text+=\"http://maps.google.com/maps?q=loc:{},{}\".format(j['lat'],j['lon'])\n",
72+
" print(text)\n",
73+
" message = client.messages.create(to=phone, from_=\"++XXXXXXXXXXXX\",body=text)\n",
74+
" print(message.sid)\n",
75+
" time.sleep(10)\n",
76+
" stop()\n",
77+
"\n",
78+
"\n",
79+
"# Accelerometer Declaration\n",
80+
"mma = MMA7455()\n",
81+
"\n",
82+
"# Obtaining the X, Y and Z values.\n",
83+
"\n",
84+
"xmem=mma.getValueX()\n",
85+
"ymem=mma.getValueY()\n",
86+
"zmem=mma.getValueZ()\n",
87+
"x = mma.getValueX()\n",
88+
"y = mma.getValueY()\n",
89+
"z = mma.getValueZ()\n",
90+
"\n",
91+
"\n",
92+
"# Creating the base accelerometer values.\n",
93+
"\n",
94+
"if(xmem > 127):\n",
95+
" xmem=xmem-255\n",
96+
"if(ymem > 127):\n",
97+
" ymem=ymem-255\n",
98+
"if(zmem > 127):\n",
99+
" zmem=zmem-255\n",
100+
"if(x > 127):\n",
101+
" x=x-255\n",
102+
"if(y > 127):\n",
103+
" y=y-255\n",
104+
"if(z > 127):\n",
105+
" z=z-255\n",
106+
"\n",
107+
"\"\"\"\n",
108+
"\n",
109+
"timebasedrow= time.time()\n",
110+
"timebasedis= time.time()\n",
111+
"timerundrow= time.time()\n",
112+
"timerundis= time.time()\n",
113+
"\n",
114+
"face_cascade = cv2.CascadeClassifier('haar_models/haarcascade_frontalface_default.xml')\n",
115+
"eye_cascade = cv2.CascadeClassifier('haar_models/haarcascade_eye.xml') \n",
116+
"MyModel=\"BlinkModel.t7\"\n",
117+
"\n",
118+
"shape = (24,24)\n",
119+
"classes = [\n",
120+
" 'Close',\n",
121+
" 'Open',\n",
122+
"]\n",
123+
"\n",
124+
"eyess=[]\n",
125+
"cface=0"
126+
]
127+
},
128+
{
129+
"cell_type": "code",
130+
"execution_count": null,
131+
"metadata": {},
132+
"outputs": [],
133+
"source": [
134+
"def preprocess(image_path):\n",
135+
" global cface\n",
136+
" transform_test = transforms.Compose([\n",
137+
" transforms.ToTensor()\n",
138+
" ])\n",
139+
" image = cv2.imread(image_path['path']) \n",
140+
" faces = face_cascade.detectMultiScale(\n",
141+
" image,\n",
142+
" scaleFactor=1.1,\n",
143+
" minNeighbors=5,\n",
144+
" minSize=(1, 1),\n",
145+
" flags=cv2.CASCADE_SCALE_IMAGE\n",
146+
" )\n",
147+
" if len(faces) == 0:\n",
148+
" ...\n",
149+
" else:\n",
150+
" cface=1\n",
151+
" (x, y, w, h) = faces[0]\n",
152+
" face = image[y:y + h, x:x + w]\n",
153+
" cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)\n",
154+
" roi_color = image[y:y+h, x:x+w]\n",
155+
" \"\"\"\n",
156+
" Depending on the quality of your camera, this number can vary \n",
157+
" between 10 and 40, since this is the \"sensitivity\" to detect the eyes.\n",
158+
" \"\"\"\n",
159+
" sensi=20\n",
160+
" eyes = eye_cascade.detectMultiScale(face,1.3, sensi) \n",
161+
" i=0\n",
162+
" for (ex,ey,ew,eh) in eyes:\n",
163+
" (x, y, w, h) = eyes[i]\n",
164+
" cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n",
165+
" eye = face[y:y + h, x:x + w]\n",
166+
" eye = cv2.resize(eye, shape)\n",
167+
" eyess.append([transform_test(Image.fromarray(eye).convert('L')), eye, cv2.resize(face, (48,48))])\n",
168+
" i=i+1\n",
169+
" cv2.imwrite('temp-images/display.jpg',image) \n",
170+
" \n",
171+
"\n",
172+
"def eye_status(image, name, net):\n",
173+
" img = torch.stack([image[name]])\n",
174+
" bp = BackPropagation(model=net)\n",
175+
" probs, ids = bp.forward(img)\n",
176+
" actual_status = ids[:, 0]\n",
177+
" prob = probs.data[:, 0]\n",
178+
" if actual_status == 0:\n",
179+
" prob = probs.data[:,1]\n",
180+
"\n",
181+
" #print(name,classes[actual_status.data], probs.data[:,0] * 100)\n",
182+
" return classes[actual_status.data]\n",
183+
"\n",
184+
"def func(imag,modl):\n",
185+
" drow(images=[{'path': imag, 'eye': (0,0,0,0)}],model_name=modl)\n",
186+
"\n",
187+
"def drow(images, model_name):\n",
188+
" global eyess\n",
189+
" global cface\n",
190+
" global timebasedrow\n",
191+
" global timebasedis\n",
192+
" global timerundrow\n",
193+
" global timerundis\n",
194+
" net = model.Model(num_classes=len(classes))\n",
195+
" checkpoint = torch.load(os.path.join('model', model_name), map_location=torch.device('cpu'))\n",
196+
" net.load_state_dict(checkpoint['net'])\n",
197+
" net.eval()\n",
198+
" \n",
199+
" flag =1\n",
200+
" status=\"\"\n",
201+
" for i, image in enumerate(images):\n",
202+
" if(flag):\n",
203+
" preprocess(image)\n",
204+
" flag=0\n",
205+
" if cface==0:\n",
206+
" image = cv2.imread(\"temp-images/display.jpg\")\n",
207+
" image = cv2.putText(image, 'No face Detected', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)\n",
208+
" cv2.imwrite('temp-images/display.jpg',image)\n",
209+
" timebasedrow= time.time()\n",
210+
" timebasedis= time.time()\n",
211+
" timerundrow= time.time()\n",
212+
" timerundis= time.time()\n",
213+
" elif(len(eyess)!=0):\n",
214+
" eye, eye_raw , face = eyess[i]\n",
215+
" image['eye'] = eye\n",
216+
" image['raw'] = eye_raw\n",
217+
" image['face'] = face\n",
218+
" timebasedrow= time.time()\n",
219+
" timerundrow= time.time()\n",
220+
" for index, image in enumerate(images):\n",
221+
" status = eye_status(image, 'eye', net)\n",
222+
" if(status ==\"Close\"):\n",
223+
" timerundis= time.time()\n",
224+
" if((timerundis-timebasedis)>1.5):\n",
225+
" image = cv2.imread(\"temp-images/display.jpg\")\n",
226+
" image = cv2.putText(image, 'Distracted', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)\n",
227+
" cv2.imwrite('temp-images/display.jpg',image)\n",
228+
" if(not(p.is_playing())):\n",
229+
" p.play()\n",
230+
" else:\n",
231+
" p.stop() \n",
232+
" else:\n",
233+
" timerundrow= time.time()\n",
234+
" if((timerundrow-timebasedrow)>3):\n",
235+
" if(not(p.is_playing())):\n",
236+
" p.play()\n",
237+
" image = cv2.imread(\"temp-images/display.jpg\")\n",
238+
" image = cv2.putText(image, 'Drowsy', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)\n",
239+
" cv2.imwrite('temp-images/display.jpg',image)"
240+
]
241+
},
242+
{
243+
"cell_type": "code",
244+
"execution_count": null,
245+
"metadata": {},
246+
"outputs": [],
247+
"source": [
248+
"def main():\n",
249+
" while 1:\n",
250+
" global eyess\n",
251+
" global cface \n",
252+
" eyess=[]\n",
253+
" cface=0\n",
254+
" ret, img = cap.read() \n",
255+
" cv2.imwrite('img.jpg',img) \n",
256+
" func('img.jpg',MyModel)\n",
257+
" \n",
258+
"def disp():\n",
259+
" while 1:\n",
260+
" try:\n",
261+
" img=mpimg.imread('temp-images/display.jpg')\n",
262+
" plt.imshow(img)\n",
263+
" display.clear_output(wait=True)\n",
264+
" display.display(plt.gcf())\n",
265+
" except:\n",
266+
" ..."
267+
]
268+
},
269+
{
270+
"cell_type": "code",
271+
"execution_count": null,
272+
"metadata": {},
273+
"outputs": [],
274+
"source": [
275+
"cap = cv2.VideoCapture(0)\n",
276+
"timebasedrow= time.time()\n",
277+
"timebasedis= time.time()\n",
278+
"timerundrow= time.time()\n",
279+
"timerundis= time.time()\n",
280+
" \n",
281+
"d = threading.Thread(target=disp, name='disp')\n",
282+
"m = threading.Thread(target=main, name='main')\n",
283+
"\n",
284+
"d.start()\n",
285+
"m.start()"
286+
]
287+
}
288+
],
289+
"metadata": {
290+
"kernelspec": {
291+
"display_name": "Python 3",
292+
"language": "python",
293+
"name": "python3"
294+
},
295+
"language_info": {
296+
"codemirror_mode": {
297+
"name": "ipython",
298+
"version": 3
299+
},
300+
"file_extension": ".py",
301+
"mimetype": "text/x-python",
302+
"name": "python",
303+
"nbconvert_exporter": "python",
304+
"pygments_lexer": "ipython3",
305+
"version": "3.8.5"
306+
}
307+
},
308+
"nbformat": 4,
309+
"nbformat_minor": 4
310+
}

0 commit comments

Comments
 (0)