-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
2445 lines (2229 loc) · 208 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html lang="en">
<!-- Basic -->
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<!-- Mobile Metas -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Site Metas -->
<title>Home Pi</title>
<meta name="keywords" content="">
<meta name="description" content="">
<meta name="author" content="">
<!-- Site Icons -->
<link rel="shortcut icon" href="images/favicon.ico" type="image/x-icon" />
<link rel="apple-touch-icon" href="images/apple-touch-icon.png">
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="css/bootstrap.min.css">
<!-- Site CSS -->
<link rel="stylesheet" href="style.css">
<!-- ALL VERSION CSS -->
<link rel="stylesheet" href="css/versions.css">
<!-- Responsive CSS -->
<link rel="stylesheet" href="css/responsive.css">
<!-- Custom CSS -->
<link rel="stylesheet" href="css/custom.css">
<link rel="stylesheet" href="/path/to/styles/default.css">
<script src="/path/to/highlight.min.js"></script>
<script>hljs.initHighlightingOnLoad();</script>
<!-- Modernizer for Portfolio -->
<script src="js/modernizer.js"></script>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body class="host_version">
<!-- Modal -->
<div class="modal fade" id="login" tabindex="-1" role="dialog" aria-labelledby="myModalLabel">
<div class="modal-dialog modal-dialog-centered modal-lg" role="document">
<div class="modal-content">
<div class="modal-header tit-up">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
<h4 class="modal-title">Customer Login</h4>
</div>
<div class="modal-body customer-box">
<!-- Nav tabs -->
<ul class="nav nav-tabs">
<li><a class="active" href="#Login" data-toggle="tab">Login</a></li>
<li><a href="#Registration" data-toggle="tab">Registration</a></li>
</ul>
<!-- Tab panes -->
<div class="tab-content">
<div class="tab-pane active" id="Login">
<form role="form" class="form-horizontal">
<div class="form-group">
<div class="col-sm-12">
<input class="form-control" id="email1" placeholder="Name" type="text">
</div>
</div>
<div class="form-group">
<div class="col-sm-12">
<input class="form-control" id="exampleInputPassword1" placeholder="Email" type="email">
</div>
</div>
<div class="row">
<div class="col-sm-10">
<button type="submit" class="btn btn-light btn-radius btn-brd grd1">
Submit
</button>
<a class="for-pwd" href="javascript:;">Forgot your password?</a>
</div>
</div>
</form>
</div>
<div class="tab-pane" id="Registration">
<form role="form" class="form-horizontal">
<div class="form-group">
<div class="col-sm-12">
<input class="form-control" placeholder="Name" type="text">
</div>
</div>
<div class="form-group">
<div class="col-sm-12">
<input class="form-control" id="email" placeholder="Email" type="email">
</div>
</div>
<div class="form-group">
<div class="col-sm-12">
<input class="form-control" id="mobile" placeholder="Mobile" type="email">
</div>
</div>
<div class="form-group">
<div class="col-sm-12">
<input class="form-control" id="password" placeholder="Password" type="password">
</div>
</div>
<div class="row">
<div class="col-sm-10">
<button type="button" class="btn btn-light btn-radius btn-brd grd1">
Save & Continue
</button>
<button type="button" class="btn btn-light btn-radius btn-brd grd1">
Cancel</button>
</div>
</div>
</form>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- <!– LOADER –>-->
<!-- <div id="preloader">-->
<!-- <div class="loader-container">-->
<!-- <div class="progress-br float shadow">-->
<!-- <div class="progress__item"></div>-->
<!-- </div>-->
<!-- </div>-->
<!-- </div>-->
<!-- END LOADER -->
<!-- Start header -->
<header class="top-navbar">
<nav class="navbar navbar-expand-lg navbar-light bg-light">
<div class="container-fluid">
<a class="navbar-brand" href="index.html">
<img id ="logo" src="images/cornell-reduced-white.svg" alt="" />
</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbars-host" aria-controls="navbars-rs-food" aria-expanded="false" aria-label="Toggle navigation">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<div class="collapse navbar-collapse" id="navbars-host">
<ul class="navbar-nav ml-auto">
<li class="nav-item active"><a class="nav-link" href="index.html">Home</a></li>
<li class="nav-item"><a class="nav-link" href="#objective">Objective</a></li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#Design" id="dropdown-a" data-toggle="dropdown">Design </a>
<div class="dropdown-menu" aria-labelledby="dropdown-a">
<a class="dropdown-item" href="#CCTV Server">CCTV Server </a>
<a class="dropdown-item" href="#Servo Control">Servo Control</a>
<a class="dropdown-item" href="#TPU">TPU-Based Face Recognition</a>
<a class="dropdown-item" href="#android application">Android Application</a>
<a class="dropdown-item" href="#Mutliprocessing Algorithms">Mutliprocessing Algorithms</a>
<a class="dropdown-item" href="#FSM">FSM Control UI</a>
<a class="dropdown-item" href="#Flask">Flask Web Server</a>
<a class="dropdown-item" href="#Servo Control">Servo Control</a>
</div>
</li>
<li class="nav-item"><a class="nav-link" href="#Testing">Testing & Issues</a></li>
<li class="nav-item"><a class="nav-link" href="#Result">Results</a></li>
<li class="nav-item"><a class="nav-link" href="#conclusion">Future Work</a></li>
<li class="nav-item"><a class="nav-link" href="#ref">Reference</a></li>
</ul>
<!-- <ul class="nav navbar-nav navbar-right">-->
<!-- <li><a class="hover-btn-new log orange" href="#" data-toggle="modal" data-target="#login"><span>Book Now</span></a></li>-->
<!-- </ul>-->
</div>
</div>
</nav>
</header>
<!-- End header -->
<div id="carouselExampleControls" class="carousel slide bs-slider box-slider" data-ride="carousel" data-pause="hover" data-interval="false" >
<!-- Indicators -->
<ol class="carousel-indicators">
<li data-target="#carouselExampleControls" data-slide-to="0" class="active"></li>
<li data-target="#carouselExampleControls" data-slide-to="1"></li>
<li data-target="#carouselExampleControls" data-slide-to="2"></li>
</ol>
<div class="carousel-inner" role="listbox">
<div class="carousel-item active">
<div id="home" class="first-section" style="background-image:url('images/wholeset.png');">
<div class="dtab">
<div class="container">
<div class="row">
<div class="col-md-12 col-sm-12 text-right">
<div class="big-tagline">
<h2><strong>Home Pi</strong> Security System</h2>
<p class="lead"> ECE 5725 Final Project - by Jie He (jh2735) and Yuchong Geng (yg534).</br>
December 19, 2020.
</p>
<!-- <a href="#" class="hover-btn-new"><span>Contact Us</span></a>-->
<a href="#objective" class="hover-btn-new"><span>Read More</span></a>
</div>
</div>
</div><!-- end row -->
</div><!-- end container -->
</div>
</div><!-- end section -->
</div>
<div class="carousel-item">
<div id="home" class="first-section" style="background-image:url('images/projectPhoto.png');">
<div class="dtab">
<div class="container">
<div class="row">
<div class="col-md-12 col-sm-12 text-left">
<div class="big-tagline">
<h2 data-animation="animated zoomInRight">Home Pi <strong>Security System</strong></h2>
<p class="lead" data-animation="animated fadeInLeft">A home security solution powered by deep learning, computer vision, and Android. </p>
<!-- <a href="#" class="hover-btn-new"><span>Contact Us</span></a>-->
<a href="#objective" class="hover-btn-new"><span>Read More</span></a>
</div>
</div>
</div><!-- end row -->
</div><!-- end container -->
</div>
</div><!-- end section -->
</div>
<div class="carousel-item">
<div id="home" class="first-section" style="background-image:url('images/wholeset2.jpeg');">
<div class="dtab">
<div class="container">
<div class="row">
<div class="col-md-12 col-sm-12 text-right">
<div class="big-tagline">
<h2><strong>Home Pi</strong> Security System</h2>
<p class="lead"> ECE 5725 Final Project - by Jie He (jh2735) and Yuchong Geng (yg534).</br>
December 19, 2020.
</p>
<!-- <a href="#" class="hover-btn-new"><span>Contact Us</span></a>-->
<a href="#objective" class="hover-btn-new"><span>Read More</span></a>
</div>
</div>
</div><!-- end row -->
</div><!-- end container -->
</div>
</div><!-- end section -->
</div>
<a class="new-effect carousel-control-prev" href="#carouselExampleControls" role="button" data-slide="prev">
<span class="fa fa-angle-left" aria-hidden="true"></span>
<span class="sr-only">Previous</span>
</a>
<!-- Right Control -->
<a class="new-effect carousel-control-next" href="#carouselExampleControls" role="button" data-slide="next">
<span class="fa fa-angle-right" aria-hidden="true"></span>
<span class="sr-only">Next</span>
</a>
</div>
</div>
<div id="objective" class="section wb">
<div class="container">
<div class="section-title row text-center">
<div class="col-md-8 offset-md-2">
<h3>Project Objective & Introduction</h3>
<iframe width="560" height="315" src="https://www.youtube.com/embed/-fXKqDXpBbk" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<p class="lead"> One day when Yuchong and Jie were discussing on what they planned to do for their ECE 5725 project, Jie's received a email from Cornell Alert reporting a home burglary where the criminal sneaked into a house. An idea popped in their minds: How about building a home security sytem to protect their homes and loved ones in it?
</p>
</br>
<P class="lead">This system should be powerful enough so that Jie and Yuchong could watch the real time CCTV stream video and audio of their front doors from anywhere. Also, the system should be smart enough to automatically recognize their faces and open the doors while deny the faces of intruders. Even more, this system should enable remote control so that even if Yuchong forgets bringing the key, Jie still could open the door remotely while sitting at Phillips hall.
</P>
<img src="images/wholeset.png" alt="" class="img-fluid img-rounded">
<P class="lead">Home Pi, our project, is a security system desgined for protecting the front doors of our home. This system streams live video and audio CCTV based on Picamera and Microphone to a webserver, which an android phone could access to. This system provides fast face recognition and real-time semantic segmentation based on Tensorflow and OpenCV powered by Coral TPU. An Android application should be able to access the system remotely.
</P>
</div>
</div><!-- end title -->
<div id="Design" class="all-title-box">
<div class="container text-center">
<h1>Design<span class="m_1"></span></h1>
</div>
</div>
<div class="hmv-box">
<div class="container">
<center></center>
<center>
</br>
</br>
<p>
Home Pi is a RPi embedded system apparatus consisting of a user interface, a android phone for remote control, a CCTV and its streaming server, a TPU and a webcam for smooth face recognition and a servo module for door control.
Our system enable users to monitor the CCTV remotely via capturing video and audio through a Pi Cam and a microphone. The image and audio could be transmitted to a anrdoid phone, which could remotely control the system, via WiFi.
Also, an extremely smooth face recogntion module powered by the powerful USE edge TPU enable face recognition and face registration.
</p>
</br>
</br>
</br>
</center>
<div class="row">
<div class="col-lg-4 col-md-6 col-12">
<div class="inner-hmv">
<div class="icon-box-hmv"><a href="#CCTV Server"><i class="flaticon-achievement"></i></a></div>
<h3>CCTV Server</h3>
<div class="tr-pa">M</div>
<p>Our project has a CCTV server streams the live video and audio. The video is captured via a Picamera and the audio is collected via a microphone. The video and audio should be accessed via web browser and android application allowing users to access it conveniently. </p>
</div>
</div>
<div class="col-lg-4 col-md-6 col-12">
<div class="inner-hmv">
<div class="icon-box-hmv"><a href=""><i class="flaticon-eye"></i></a></div>
<h3>TPU & Face Recognition</h3>
<div class="tr-pa">V</div>
<p> To enable smooth user face login and fast face recognition,
in our project, a USB Edge TPU, a tensor processing unit capable of accelerating machine learning tasks powered by Tensorflow, is integrated into our system.
</p>
</div>
</div>
<div class="col-lg-4 col-md-6 col-12">
<div class="inner-hmv">
<div class="icon-box-hmv"><a href="#android application"><i class="flaticon-history"></i></a></div>
<h3>Android App</h3>
<div class="tr-pa">H</div>
<p>This system allows users to control it remotely via Android App. This android application should allow users to login via username and password. The authorized users could access live CCTV, the live semantic segmentation results and also control the door based on socket communication with the system.</p>
</div>
</div>
<div class="col-lg-4 col-md-6 col-12">
<div class="inner-hmv">
<div class="icon-box-hmv"><a href="#Mutliprocessing Algorithms"><i class="flaticon-eye"></i></a></div>
<h3>Multiprocessing Algorithms</h3>
<div class="tr-pa">V</div>
<p>In our project, the RPi has to perform tasks, including face recognition and semantic segementation, video and audio streaming, a multithreading UI, TCP socket communcation and a Flask web application at the same time. To reduce latency, we used Python multi processing to fully take advantage of four cores of RPi. </p>
</div>
</div>
<div class="col-lg-4 col-md-6 col-12">
<div class="inner-hmv">
<div class="icon-box-hmv"><a href=""><i class="flaticon-history"></i></a></div>
<h3>FSM Control Interface</h3>
<div class="tr-pa">V</div>
<p>In this project, a user interface is implemented on PiTFT. To control the work flow of the GUI, a finite state machine is designed. This state machine helps the interface behave correspondingly based on the current state and given user inputs and performs state transitions. The FSM could also help the multithread algorithms perform correctly.</p>
</div>
</div>
<div class="col-lg-4 col-md-6 col-12">
<div class="inner-hmv">
<div class="icon-box-hmv"><a href="#Flask"><i class="flaticon-achievement"></i></a></div>
<h3>Flask-based Stream Server</h3>
<div class="tr-pa">
V
</div>
<p>Thanks to the powerful coral TPU, real-time semantic segmentation could be achieved in our project. To show the results, a web video server based on Flask was developed to stream the semantic segmentation results to a server, which could be accessed by Android App and web browser.</p>
</div>
</div>
</div>
</div>
</div>
<div class="row align-items-center">
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<h2 id="CCTV Server"> CCTV Server</h2>
<h4>Pi Camera Video Stream</h4>
<!-- <h2>Welcome to SmartEDU education school</h2>-->
<p>Pi camera was installed following this <a href="https://thepihut.com/blogs/raspberry-pi-tutorials/16021420-how-to-install-use-the-raspberry-pi-camera">tutorial</a>. First, install the Raspberry Pi Camera by inserting the cable into the Raspberry Pi camera port.
Then we ran <code>sudo raspi-config</code> in the terminal to enable the camera. If the camera option is not available, then an update needs to be made, we run <code> sudo apt-get update</code> and <code> sudo apt-get upgrade</code> in this case. The Raspberry Pi needs to be rebooted.
</br>
To check if the picamera is successfully installed, we use camera to take photos <code> raspistill -o image.jpg</code> If we could view the image.jpg by running <code>gpicview image.jpg</code> without errors, the picamera is installed perfectly.
</br>
We consulted the official handbook <a href="https://picamera.readthedocs.io/en/release-1.13/recipes2.html#web-streaming">tutorial</a> to make the RPi with Picamera as a video stream server. This is a simple HTTP server that achieves significantly higher frame rates than any solution else we tested (mjpg-streamer, Motion program, RPi-Cam-Web-Interface). The code we used could be found <a href="https://raw.githubusercontent.com/RuiSantosdotme/Random-Nerd-Tutorials/master/Projects/rpi_camera_surveillance_system.py"> here</a>.
We set the live video feed to a website served by the RPi <code>http:// IP_Address_Of_Pi:9000</code>. So, we could access the video streaming through web browser on a machine that is connected to the same LAN. The video stream could come with very little latency.
</p>.
</div><!-- end messagebox -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="post-media wow fadeIn">
<img src="images/CCTV.png" alt="" class="img-fluid img-rounded">
</div><!-- end media -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<h4>Pi Audio Stream</h4>
<!-- <h2>Welcome to SmartEDU education school</h2>-->
<p>
For audio stream, a USB microphone was used in our project. Once the microphone is plugged in, we load the audio module by typing <code>sudo modprobe snd_bcm2835</code>. And we check if we could use it properly by recording some audio into a file by running: <code>arecord -D plughw:1,0 test.wav</code>. And press <code>CTRL+C</code> when we’ve got enough recording.
We play it to check if it works! <code>aplay test.wav</code>. Using the command <code>alsamixer</code>, we could record louder or adjust some parameter and play with the input/output levels of the microphone.
</br>
Similar to the video stream, it would be convenient if we stream the audio to a webserver to which the user could access via Android App or browsers.
We followed the tutorial <a href="https://maker.pro/raspberry-pi/projects/how-to-build-an-internet-radio-station-with-raspberry-pi-darkice-and-icecast">tutorial</a> here to make our RPi an Internet radio station to record or play your podcasts.
</p>
</div><!-- end messagebox -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<!-- <h4>Pi Camera Video Stream</h4>-->
<!-- <h2>Welcome to SmartEDU education school</h2>-->
<p> We set up the streaming station using two packages called <a href="http://www.darkice.org/">DarkIce</a>, a live audio streamer, and <a href="https://icecast.org/"> Icecast</a> an audio/video streaming media server. The detail of how to install Darkice and Icecast could be found <a href="https://maker.pro/raspberry-pi/projects/how-to-build-an-internet-radio-station-with-raspberry-pi-darkice-and-icecast">here</a>.
</br>
After the Icecast2 installation, we made a Config file for DarkIce by creating a file named <code>darkice.cfg</code>. The content configurations file should be like xxx and we also need to make shell script named <code>darkice.sh</code> so we could start the audio stream service by executing it. After entering <code>sudo service icecast2 start</code>, we could launch the audio stream server by executing the shell script <code>sudo /home/pi/darkice.sh</code>. And we set We set the live audio feed to a website served by the RPi, and the audio could be accessed by visiting the url <code>http:// IP_Address_Of_Pi:8000/rapi.mp3</code> on browser.
</p>.
<a href="https://github.com/stephen-hjay/Home_Pi/blob/main/final_project_RPi/CCTVServer.py" class="hover-btn-new orange"><span>Github Repo</span></a>
</div><!-- end messagebox -->
</div><!-- end col -->
</div><!-- end container -->
</div><!-- end section -->
<div id="TPU" class="section wb">
<div class="container">
<div class="section-title row text-center">
<div class="col-md-8 offset-md-2">
<h3 >TPU-Based Face Recognition</h3>
<p class="lead">
Our system will first run semantic segmentation to obtain the area of interest (ROI) that are related to people.
Then, the ROI will be put into a classification model to determine whether the current user is registered. The classification results will be sent to the UI via message queue and to determine whether to open the door.
</p>
</div>
</div><!-- end title -->
<div class="row align-items-center">
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<!-- <h4>2020 BEST SmartEDU education school</h4>-->
<!-- <h2>Welcome to SmartEDU education school</h2>-->
<p>The main benefit of TPU is that we could achieve real-time recognition speed.
Even though Raspberry Pi 4B comes with a GPU, this GPU is designed for more general tasks of graphic processing but not specifically designed for tensor processing tasks.
Thus, with this Edge USB TPU, the computation performance of a Raspberry Pi on tensor processing tasks is greatly boosted. </p>
<p>
Sampling video as dataset, the inference time of the classification for each frame in the video is recorded.
Then, we plot the recorded inference times on a bar graph, with y-axis being the inference time in milliseconds which is shown on the comparison between inference times on CPU and TPU, there is a huge difference for TPU and CPU.
The inference times for TPU are all below 20 milliseconds with average around 10 milliseconds.
These numbers translate to a frame rate of 100 frames per second, which is a lot bigger than most cameras’ frame rate which is usually 30 or 60 fps.
As for the CPU, the average inference time lies below 80 milliseconds, which corresponds to just 12.5 frames per second.
So using TPU can greatly improve our system’s response time.
</p>
</div><!-- end messagebox -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="post-media wow fadeIn">
<img src="images/inference.png" alt="" class="img-fluid img-rounded">
</div><!-- end media -->
</div><!-- end col -->
</div>
<div class="row align-items-center">
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="post-media wow fadeIn">
<img src="images/lostfunction.png" alt="" class="img-fluid img-rounded">
</div><!-- end media -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<!-- <h2>The standard Lorem Ipsum passage, used since the 1500s</h2>-->
<p>
Another benefit of using TPU is it can retrain a model on-device very quickly.
This feature allows new users to register their faces into the system and at the next boot, our program can retrain the classification model very quickly.
For our experiment, re-training on 300 images for around 300 iterations only takes 2 to 3 seconds, and the accuracy of the re-trained model is also very high.
In figure 2, we have shown the accuracy and loss over 300 iterations.
As shown in the figure, the model quickly learns the images and reach a plateau very early.
</p>
<p> Integer rutrum ligula eu dignissim laoreet. Pellentesque venenatis nibh sed tellus faucibus bibendum.</p>
</div><!-- end messagebox -->
</div><!-- end col -->
</div><!-- end row -->
<div class="row align-items-center">
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<!-- <h4>2020 BEST SmartEDU education school</h4>-->
<!-- <h2>Welcome to SmartEDU education school</h2>-->
<p>And to illustrate our model’s performance, we have also plotted the confusion matrix for test images.
As shown in this confusion matrix, the model can clearly differentiate between Yuchong, Jay and negative.
</p>
<a href="https://github.com/stephen-hjay/Home_Pi/blob/main/final_project_RPi/video_stream.py" class="hover-btn-new orange"><span>Github Repo</span></a>
</div><!-- end messagebox -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="post-media wow fadeIn">
<img src="images/confusion.png" alt="" class="img-fluid img-rounded">
</div><!-- end media -->
</div><!-- end col -->
</div>
</div><!-- end container -->
</div><!-- end section -->
<div class="section lb page-section">
<div class="container">
<div class="message-box">
<h2 id="android application">Android Application</h2>
<h4>Video and Audio Stream on APP</h4>
<p>After setting up the live stream video and audio station on RPi, we need to display the video on the Android phone by using WebView in App's MainActivity.</p>
<pre style='box-sizing: border-box; overflow: auto; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: 13px; display: block; padding: 9.5px; margin: 0px 0px 10px; line-height: 1.42857; color: rgb(51, 51, 51); word-break: break-all; overflow-wrap: break-word; background-color: rgb(245, 245, 245); border: 1px solid rgb(204, 204, 204); border-radius: 4px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: -webkit-left; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial;'><code style='box-sizing: border-box; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: inherit; padding: 0px; color: inherit; background-color: transparent; border-radius: 0px; white-space: pre-wrap;'>public class MainActivity extends AppCompatActivity implements SensorEventListener {
private WebView webview ;
...
@Override
protected void onCreate (Bundle savedInstanceState) {
...
webview =(WebView)findViewById(R.id.webView);
// make the webview adapt to the screen
WebSettings settings = webView.getSettings();
settings.setUseWideViewPort(true);
settings.setLoadWithOverviewMode(true);
// display the video
webview.setWebViewClient(new WebViewClient());
webview.getSettings().setJavaScriptEnabled(true);
webview.getSettings().setDomStorageEnabled(true);
webview.setOverScrollMode(WebView.OVER_SCROLL_NEVER);
webview.loadUrl("http://ip_address_of_Rpi:9000");
}
}</code></pre>
<p>But WebView only could not play the audio, to get the real-time audio streaming, we still need to include a MediaPlayer in our Android to play the sound from the audio server.</p>
<pre style='box-sizing: border-box; overflow: auto; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: 13px; display: block; padding: 9.5px; margin: 0px 0px 10px; line-height: 1.42857; color: rgb(51, 51, 51); word-break: break-all; overflow-wrap: break-word; background-color: rgb(245, 245, 245); border: 1px solid rgb(204, 204, 204); border-radius: 4px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: -webkit-left; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial;'><code style='box-sizing: border-box; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: inherit; padding: 0px; color: inherit; background-color: transparent; border-radius: 0px; white-space: pre-wrap;'>
MediaPlayer mediaPlayer = new MediaPlayer();
mediaPlayer.setAudioAttributes(
new AudioAttributes.Builder()
.setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
.setUsage(AudioAttributes.USAGE_MEDIA)
.build()
);
try {
mediaPlayer.setDataSource("http://ip_address_of_Rpi:8000/rapi.mp3");// the url for the sound data source
mediaPlayer.prepare();
} catch (IOException e) {
e.printStackTrace();
}
// might take long! (for buffering, etc)
mediaPlayer.start();
</code></pre>
</div>
<div class="message-box">
<h4>TCP SOCKET COMMUNICATION (Application End)</h4>
<p>In our project, we have "Admin Login" section in our interface. When entering this module, the UI would ask the user (the admins or the app holders) to login by entering password and username on the APP. After a successgul login, the UI (PiTFT) would display the username of the user who have logged in the system. And the user who has successfully logged in should have the right to open the door and watch the CCTV live.
Thus, a TCP socket communication module should also be deployed on both the Android APP and RPi server end. The following code is </p>
<pre style='box-sizing: border-box; overflow: auto; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: 13px; display: block; padding: 9.5px; margin: 0px 0px 10px; line-height: 1.42857; color: rgb(51, 51, 51); word-break: break-all; overflow-wrap: break-word; background-color: rgb(245, 245, 245); border: 1px solid rgb(204, 204, 204); border-radius: 4px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: -webkit-left; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial;'><code style='box-sizing: border-box; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: inherit; padding: 0px; color: inherit; background-color: transparent; border-radius: 0px; white-space: pre-wrap;'>class Sender extends AsyncTask {
Socket s;// socket connected to RPi
PrintWriter pw;
String msg;
String type;
BufferedReader bufferedReader;
@SuppressLint("SetTextI18n")
@Override
protected Void doInBackground(Void...params){
try {
s = new Socket("ip address of RPi", 7000);
// send message to RPi Server
pw = new PrintWriter(s.getOutputStream());
pw.write(msg);
pw.flush();
bufferedReader = new BufferedReader(new InputStreamReader(s.getInputStream()));
// receive message from RPi Server
String msg2 = bufferedReader.readLine();
switch (type) {
case "login":
if (msg2!=null && msg2.equals("Success")) {
success = true;
// login successfully
statusTextView.setText("Login Success");
showToast("Login Success");
}else{
// login fail
statusTextView.setText("Not Login In");
showToast("Login Fail");
}
break;
case "doorOpen":
if (msg2!=null && msg2.equals("Success")) {
doorOpen = true;
doorStatusTextView.setText("Door Open");
}
}
....
}
}
</code></pre>
</div>
<div class="message-box">
<h4>User Login Activity</h4>
<p> Before allowing the user to see the CCTV live stream and control our system, a login activity must be passed to verify the user identity. Thus, a user login activity was first launched on our App. The login activity would allow users to fill in the username, password, IP address of RPi, and the designated port number.
</p>
<pre style='box-sizing: border-box; overflow: auto; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: 13px; display: block; padding: 9.5px; margin: 0px 0px 10px; line-height: 1.42857; color: rgb(51, 51, 51); word-break: break-all; overflow-wrap: break-word; background-color: rgb(245, 245, 245); border: 1px solid rgb(204, 204, 204); border-radius: 4px; font-style: normal; font-variant-ligatures: normal; font-variant-caps: normal; font-weight: 400; letter-spacing: normal; orphans: 2; text-align: -webkit-left; text-indent: 0px; text-transform: none; widows: 2; word-spacing: 0px; -webkit-text-stroke-width: 0px; text-decoration-thickness: initial; text-decoration-style: initial; text-decoration-color: initial;'><code style='box-sizing: border-box; font-family: Menlo, Monaco, Consolas, "Courier New", monospace; font-size: inherit; padding: 0px; color: inherit; background-color: transparent; border-radius: 0px; white-space: pre-wrap;'>public class StartActivity extends AppCompatActivity {
private EditText textViewUserName;
private EditText textViewPassword;
...
@Override
protected void onCreate(Bundle savedInstanceState) {
....
// login button
Button buttonEnter = (Button) findViewById(R.id.enter);
buttonEnter.setOnClickListener(
(x) -> {
uName = textViewUserName.getText().toString();
pwd = textViewPassword.getText().toString();
host = textViewIP.getText().toString();
TCPport = Integer.valueOf(textViewPort.getText().toString());
if (uName == null || uName.length() == 0 || pwd == null || pwd.length() == 0) {
showToast("please fill all blanks");
return;
}else{
StartActivity.Sender sender = new StartActivity.Sender();
sender.host = host;
sender.port = TCPport;
sender.msg = "Check:"+uName+":"+pwd;
sender.type = "login";
sender.execute();
long timeInit = System.currentTimeMillis();
// wait for the login result from server
while(System.currentTimeMillis() - timeInit<3000){
if (success){
Intent startMainActivityIntent = new Intent(StartActivity.this, MainActivity.class);
startActivity(startMainActivityIntent);
finish();
}
}
}
});
</code></pre>
</div>
</div>
</div>
</section>
<div id="overviews" class="section wb">
<div class="container">
<div class="row align-items-center">
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<h2 id="Mutliprocessing Algorithms"> Mutliprocessing Algorithms</h2>
<p>To achieve our goal of live video and audio streaming, face recognition, semantic segementation and flask stream, we must decrease the latency and running time of every part as fast as possible. At the beginning, we tried to launch every thing in one bash file. This delay time and the latency is too long. To reduce the latency, one solution is to parallel process different tasks to take advantage of four cores in R-Pi.
In python, we could use the multiprocessing library to fully take advantage of four cores of R-Pi. In total, we have five process assigned to four cores.
</p>
<a href="#main interface" class="hover-btn-new orange"><span>Learn More</span></a>
</div><!-- end messagebox -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<!-- <h2>The standard Lorem Ipsum passage, used since the 1500s</h2>-->
<ol>
<li>The first core is assgined to stream the video and audio for the CCTV. Since we do not need the feedback from CCTV to control our state machine, we do not need to pass a queue into this process for interprocess communication. Even more, this core is assigned to a Flask Web application process, which is consumer process consuming the frames generated by sementic segmentation process. Thus, a <code>frame_queue</code> is passed into it to received processed semantic segmentation frames from the face recognition module. </li>
<li> The second core is assgined to launch the TCP socket server to receives socket connection and communicates with Android APP. For the user login module, the main interface needs user login information and display the login user. Thus,this is a producer process producing TCP connection results and the <code>login_queue</code> is passed into for processing communication. </li>
<li>
The third core is assigned to launch the face registration and face recognition module. By assigning a core to this computation-heavy module, the system could achieve higher frame per second and also receive face recognition result in only 20 ms. For this module, this is a producer process producing face recognition results and semantic segmentation frames. Thus, a <code>face_recognition_queue</code> for passing recognition results and a <code>frame_queue</code> for sending frames to Flask server were passed into.
</li>
<li>
The forth core is used to launch the main user interface. This UI screen is refreshed every 0.02 second, several GPIO callback and several threads were running at the same time so it needs the multiprocessing algorithm to ensure the UI smooth response. This is a consumer process, this process receives the face recognition results and the login results from other processes. Thus, the <code>face_recognition_queue</code> and <code>login_queue</code> were passed into to allow the UI get the results from APP login and face recognition.
</li>
</ol>
</div><!-- end messagebox -->
</div><!-- end col -->
<p>
The above describes the tasks that every core executes. But we still need to figure out how each process could communicate with each other via queues. In our project, the producer processes produce the results and frames. If the producer ceasely put results into the queue, unexpected results would appear.
For exmaple, if the face recognition module continuously feeds results into the queue and the UI only asks for results occasionally, the recognition results would be piled up in the queue. The next time when the UI requests the request, it could only get = the outdated piled up results from the front of the queue since queue is First-In-First-Out.
In order to resolve this problems, we have two solutions: 1. the producers examines the status of the message queue and clear the outdated results; 2. use LIFO (Last-In-First-Out) queue so that the consumer could always get the newest results.
</br>
After weighing the pros and cons, we decided to take the first option that the producers clear the outdated results in time since it hep could lower the burden on the system memory and resources. This is a classic producer-consumer design pattern where queue serves as the message pipe and the cache, and the producer process needs to match the speed of the consumer process.
</p>
</div><!-- end row -->
</div><!-- end container -->
</div><!-- end section -->
<div class="section lb page-section">
<div class="container">
<div class="message-box">
<h2 id="FSM">FSM Control User Interface</h2>
<p>
In our project, apart from multiprocessing, the user interface and the system need to process several complex tasks at the same time.
A finite state machine is designed to control the logic of multi thread activities and all functional modules. The user interface would behave accordingly given user inputs and performs state transitions.
</p>
<ol>
<li>
When system starts, the system would first assgin cores to each modules, and initialize the message queue for process communication. Then the state machine would enter the MAIN state where the main interface is displayed.
When user input is given (For example, the user chooses "Admin Login", "Face Login" or "Face Registration"), the state machine would transit to the corresponding state.
</li>
<li>
When state machine in "Admin Login" function, the state machine would start a new thread trying to get user login results from TCP module modules via message queue.
When the state machine receives positive results, the state machine would transit to "User Welcome" and will then return to "MAIN" state.
</li>
<li>
When state machine in "Face Login" function, the state machine would start a new thread trying to get results from Face Recognition module modules via message queue.
When user press "Confirm", the state machine would shift to "Check" and will decide if open the door based on recognition results. If it is the intended users, a new thread would be started to open the door asynchronously.
</li>
<li>
When state machine in "Face Registration" function, the state machine would start a new thread to call a bash shell script taking photos of the faces of users and save them to a designated location.
When this process finish, the state machine would return to "FINISH" and then "MAIN" automatically.
</li>
</ol>
</div>
<div class="post-media1 wow fadeIn">
<center><img id="FSMPic" src="images/FSM.jpg" alt="" ></center>
</div><!-- end media -->
</div>
</div>
</section>
<div id="Flask" class="section wb">
<div class="container">
<div class="row align-items-center">
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="message-box">
<!-- <h4>2020 BEST SmartEDU education school</h4>-->
<h2>Flask based Semantic Segmentation Result Streaming Server</h2>
<p>Thanks to the powerful coral TPU, real-time semantic segmentation could be achieved in our project.
Thus, in order to get more intuitive results of the face recognition module, we decided to stream the semantic segmemantation results.
We evaluated several solutions:</p>
<ol>
<li>
TCP socket image transfer, but we need to view it through VNC, which is not very ideal in our project, since we do not want to ssh login the RPi when system is running. </li>
</li>
<li>RPi-Cam-Web-Interface and mjpg-streamer allows for high frame rate realtime image transfer, but don’t allow for customization
</li>
<li>
Django could be ideal but it is too heavy for our project, since we do not need to process complex requests.
</li>
</ol>
<!-- <a href="#" class="hover-btn-new orange"><span>Learn More</span></a>-->
At last, we settled on <a href="https://flask.palletsprojects.com/en/1.1.x/">Flask</a>, which is a lightweight Python web backend framework. Then, a web video server based on Flask was developed to stream the semantic segmentation results to a server, which could be accessed by Android App and web browser.
</div><!-- end messagebox -->
</div><!-- end col -->
<div class="col-xl-6 col-lg-6 col-md-12 col-sm-12">
<div class="post-media wow fadeIn">
<img src="images/1200px-Flask_logo.svg.png" alt="" class="img-fluid img-rounded">
</div><!-- end media -->
</div><!-- end col -->
</div>
<!-- end row -->
</div><!-- end container -->
</div><!-- end section -->
<section class="section lb page-section">
<div class="container">
<div class="section-title row text-center">
<div class="col-md-8 offset-md-2">
<h3 id="Testing"> Testing & Issues</h3>
<p class="lead"> In our project, we have tracked issues we met based on timeline. The below is a timeline recording the issues we discovered in our project and also the way how we resolve those problems.
</p>
</div>
</div><!-- end title -->
<div class="timeline">
<div class="timeline__wrap">
<div class="timeline__items">
<!-- <h3 id="Issues">Issues</h3>-->
<div class="timeline__item">
<div class="timeline__content img-bg-01">
<h2>Broken Pi Cam</h2>
<p>
At first, we have tested every hardware we have got in our project. And we have found out that the PiCam turned out to be a little fragile.
</p>
</div>
</div>
<div class="timeline__item">
<div class="timeline__content img-bg-02">
<h2>Python Module Installation</h2>
<p>
For the installation of python modules, since the Raspibian system, we need to pay special attention to users and python version. In our project, we launch our programs with <code>sudo python3</code>. Thus, we need to use <code>sudo pip3 install</code> to make sure every module is installed under super user and Python3.
</p>
</div>
</div>
<div class="timeline__item">
<div class="timeline__content img-bg-03">
<h2>Multiprocessing Communication</h2>
<p>
Since mutliprocesses need to communicate with each other, we have researched on several solutions of multiprocessing communication, such as FIFO, Queue. We decided to use Queue and implement the multiprocessing algorithms, since there would be no need to interact with OS frequently thus to increase the efficiency.
</p>
</div>
</div>
<div class="timeline__item">
<div class="timeline__content img-bg-04">
<h2>Multiple Camera Conflicts</h2>
<p>
In our project, we used two cameras. However, the camera index is randomly assigned but OpenCV requires the specific number of WebCam is assigned to.
In order to avoid this randomness, we have designed a method to confirm the number which is assigned to web cam using commands <code>v4l2-ctl -d /dev/videoN -D</code>
</p>
</div>
</div>
<div class="timeline__item">
<div class="timeline__content img-bg-01">
<h2>Clear Text not permitted in Android</h2>
<p>
When we developed android application viewing the live CCTV, the communication protocol is based on HTTP which is unsecured and not permitted in Android 8.
Also, we also need to configure Internet before launching the App. Thus, we need to include <code>android:usesCleartextTraffic="true"</code> and <code> <uses-permission android:name="android.permission.INTERNET"/></code> in the <code>AndroidManifest.xml</code> file.
</p>
</div>
</div>
<div class="timeline__item">
<div class="timeline__content img-bg-02">
<h2>Internet Conncetion when Initializing</h2>
<p>
Since our project highly relies on Internet connection, we have included a piece of shell script code to ensure Pi has connected to Wifi before launching the main program.
</p>
</div>
</div>
<div class="timeline__item">
<div class="timeline__content img-bg-03">
<h2>Servo Issues</h2>
<p>
For this project, we also use a continuous rotation servo to control a door.
However, the servo is not calibrated and it continues to rotate at a slow speed when given a stop signal.
To solve it, by looking through the servo’s datasheet, we found that we can calibrate the servo by adjusting the potentiometer.
So we calibrated the servo by giving a stop signal and trying to find the position that the servo moves the least.
</p>
</div>
</div>
<div class="timeline__item">
<div class="timeline__content img-bg-04">
<h2>Memory Allocation</h2>
<p> The on-board GPU on Pi may have run out of memory space since we used multiple cameras. Upon investigation, we have found that Pi actually allows users to determine the memory allocation to GPU. We have increased default memory 128 MB to 256 MB.
</p>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<div id="testimonials" class="parallax section db parallax-off" style="background-image:url('images/wholeset.png');">
<div class="container">
<div class="section-title text-center">
<h3>Test Videos</h3>
<p>We followed an incremental testing approach in our development.
This enabled our team to parallelize our work seperately, while ensuring each individual of us develop fully functional component before being integrated into the system-at-large. The below videos records how we test each module, and integrate them into a system.
</p>
</div><!-- end title -->
<div class="row">
<div class="col-md-12 col-sm-12">
<div class="testi-carousel owl-carousel owl-theme">
<div class="testimonial clearfix">
<div class="testi-meta">
<!-- <img src="images/testi_01.png" alt="" class="img-fluid">-->
<h4>Face Recognition & Segmentation Results </h4>
</div>
<!-- <div class="desc">-->
<!-- <h3><i class="fa fa-quote-left"></i> Wonderful Support!</h3>-->
<iframe width="560" height="315" src="https://www.youtube.com/embed/cJcruITofGU" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<!-- </div>-->
<!-- end testi-meta -->
</div>
<!-- end testimonial -->
<div class="testimonial clearfix">
<div class="testi-meta">
<!-- <img src="images/testi_02.png" alt="" class="img-fluid">-->
<h4>User Interface Test </h4>
</div>
<iframe width="560" height="315" src="https://www.youtube.com/embed/13xrsHnUF_U" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<!-- end testi-meta -->
</div>
<!-- end testimonial -->
<div class="testimonial clearfix">
<div class="testi-meta">
<h4> Android Phone Test</h4>
</div>
<iframe width="560" height="315" src="https://www.youtube.com/embed/Tg_YHVRKXuQ" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<!-- end testi-meta -->
</div>
<!-- end testimonial -->
<div class="testimonial clearfix">
<div class="testi-meta">
<!-- <img src="images/testi_01.png" alt="" class="img-fluid">-->
<h4>CCTV Test on Android and Browser </h4>
</div>
<!-- <div class="desc">-->
<!-- <h3><i class="fa fa-quote-left"></i> Wonderful Support!</h3>-->
<!-- <p class="lead">They have got my project on time with the competition with a sed highly skilled, and experienced & professional team.</p>-->
<!-- </div>-->
<iframe width="560" height="315" src="https://www.youtube.com/embed/LEgcQkHB0gc" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
<!-- end testi-meta -->
</div>
</div><!-- end carousel -->
</div><!-- end col -->
</div><!-- end row -->
</div><!-- end container -->
</div><!-- end section -->
<div id="teachers" class="section wb">
<div class="container">
<h2 >About Us</h2>
<div class="row">
<div class="col-lg-3 col-md-6 col-12">
<div class="our-team">
<div class="team-img">
<img src="images/jay.jpeg">
<div class="social">
<ul>
<li><a href="#" class="fa fa-facebook"></a></li>
<li><a href="#" class="fa fa-twitter"></a></li>
<li><a href="#" class="fa fa-linkedin"></a></li>
<li><a href="#" class="fa fa-skype"></a></li>
</ul>
</div>
</div>
<div class="team-content">
<h3 class="title">Jie He (jh2735)</h3>
<span class="post">Android, CCTV, User Interface, Multiprocessing Algorithm, Flask, Website</span>