Skip to content

Commit 4dad686

Browse files
author
doripjonov
committed
separated webcam demo documentation
1 parent d3066f7 commit 4dad686

File tree

3 files changed

+246
-246
lines changed

3 files changed

+246
-246
lines changed

README.md

+1-245
Original file line numberDiff line numberDiff line change
@@ -113,251 +113,7 @@ NOTE: We provide 3 ways of uploading image to our SDK. They are url, blob and re
113113
| Nodejs ||||
114114

115115
### Webcam demo
116-
117-
In this part of docs we show how to use our detection service with webcamera. **NOTE:** we have chosen reactjs as it is today's one of the most popular UI library.
118-
119-
1. Clone our repository
120-
2. Enter to ```webcam_demo``` folder and install packages
121-
122-
``` cd webcam_demo```
123-
124-
```npm install```
125-
126-
3. Change detection API key inside ```src > App.js``` line ```40```
127-
128-
4. Start project
129-
130-
```npm start```
131-
132-
5. Click ```video start``` button to start your webcamera
133-
134-
*OR follow below instructions to create project by yourself*
135-
136-
1. Install reactjs
137-
138-
```npx create-react-app compreface-demo```
139-
140-
2. Enter to project folder
141-
142-
```cd compreface-demo```
143-
144-
3. Install CompreFace SDK
145-
146-
```npm i @exadel/compreface-js-sdk```
147-
148-
4. Create your component and copy/past following code. NOTE: We have used functional component and video tag used to connect to your webcamera and canvas tags used for drawing square and some extra data.
149-
150-
```
151-
import { useRef } from 'react'
152-
import { CompreFace } from '@exadel/compreface-js-sdk';
153-
154-
function App() {
155-
const videoTag = useRef(null);
156-
const canvas1 = useRef(null);
157-
const canvas2 = useRef(null);
158-
const canvas3 = useRef(null);
159-
160-
const handleVideoStart = () => {
161-
console.log("Click is working")
162-
}
163-
164-
return (
165-
<div>
166-
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
167-
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
168-
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
169-
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
170-
171-
<div>
172-
<button onClick={handleVideoStart}>Start video</button>
173-
</div>
174-
</div>
175-
);
176-
}
177-
178-
export default App;
179-
```
180-
181-
5. Add ability to start webcamera when user clicks "Start video" button. Put following code into ```handleVideoStart()``` function. ```Navigator.mediaDevices``` is built in read-only property of browser which enables user to access webcamera.
182-
183-
```
184-
navigator.mediaDevices.getUserMedia({ video: true})
185-
.then(stream => videoTag.current.srcObject = stream)
186-
.catch( error => console.error(error) )
187-
```
188-
189-
6. Initialize CompreFace instances and catch video event which fired when webcamera starts working. Your code should look like as following example. ```Play``` event listener fires when webcamera starts working and this is place where we need to use CompreFace SDK. NOTE: ```next_frame``` custom event created in order to create kind of recursion effect when we drawing square on face.
190-
```
191-
import { useRef } from 'react'
192-
import { CompreFace } from '@exadel/compreface-js-sdk';
193-
194-
function App() {
195-
const videoTag = useRef(null);
196-
const canvas1 = useRef(null);
197-
const canvas2 = useRef(null);
198-
const canvas3 = useRef(null);
199-
200-
const handleVideoStart = () => {
201-
navigator.mediaDevices.getUserMedia({ video: true})
202-
.then(stream => videoTag.current.srcObject = stream)
203-
.catch( error => console.error(error) )
204-
205-
videoTag.current.addEventListener('play', () => {
206-
// CompreFace init
207-
let server = "http://localhost";
208-
let port = 8000;
209-
let detection_key = "your_api_key_for_detection_service";
210-
211-
let core = new CompreFace(server, port);
212-
let detection_service = core.initFaceDetectionService(detection_key);
213-
// end of CompreFace init
214-
215-
let ctx1 = canvas1.current.getContext('2d');
216-
let ctx2 = canvas2.current.getContext('2d');
217-
let ctx3 = canvas3.current.getContext("2d");
218-
219-
document.addEventListener('next_frame', () => {
220-
ctx1.drawImage(videoTag.current, 0, 0, 640, 480)
221-
canvas1.current.toBlob( blob => {
222-
detection_service.detect(blob, { limit: 1, face_plugins: 'age,gender' })
223-
.then(res => {
224-
/**
225-
226-
We need call draw function which draws square on face of user in front of webcamera
227-
228-
*/
229-
})
230-
.catch(error => console.log(error))
231-
}, 'image/jpeg', 0.95)
232-
})
233-
234-
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
235-
document.dispatchEvent(evt);
236-
})
237-
}
238-
239-
return (
240-
<div>
241-
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
242-
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
243-
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
244-
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
245-
246-
<div>
247-
<button onClick={handleVideoStart}>Start video</button>
248-
</div>
249-
</div>
250-
);
251-
}
252-
253-
export default App;
254-
```
255-
256-
7. Add draw function. NOTE: You can extra canvas elemets which shows extra info related to detected face.
257-
258-
```
259-
const drawFace = (canvasElement, faceData, extraCanvas) => {
260-
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
261-
document.dispatchEvent(evt);
262-
let box = faceData.result[0].box;
263-
264-
canvasElement.clearRect(0, 0, 640, 480);
265-
extraCanvas.clearRect(0, 0, 640, 480);
266-
267-
canvasElement.strokeStyle = 'green';
268-
extraCanvas.strokeStyle = "blue";
269-
extraCanvas.fillStyle = "white"
270-
271-
extraCanvas.lineWidth = 5;
272-
canvasElement.lineWidth = 5;
273-
274-
canvasElement.strokeRect(box.x_min, box.y_min, box.x_max - box.x_min, box.y_max - box.y_min);
275-
extraCanvas.fillText( Number.parseFloat(box.probability).toPrecision(5) + ' ' + faceData.result[0].gender + ' ' + faceData.result[0].age[0] + '-' + faceData.result[0].age[1], box.x_min, box.y_min - 10)
276-
}
277-
```
278-
279-
8. Final code should look like this.
280-
281-
```
282-
import { useRef } from 'react'
283-
import { CompreFace } from '@exadel/compreface-js-sdk';
284-
285-
function App() {
286-
const videoTag = useRef(null);
287-
const canvas1 = useRef(null);
288-
const canvas2 = useRef(null);
289-
const canvas3 = useRef(null);
290-
291-
const drawFace = (canvasElement, faceData, extraCanvas) => {
292-
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
293-
document.dispatchEvent(evt);
294-
let box = faceData.result[0].box;
295-
296-
canvasElement.clearRect(0, 0, 640, 480);
297-
extraCanvas.clearRect(0, 0, 640, 480);
298-
299-
canvasElement.strokeStyle = 'green';
300-
extraCanvas.strokeStyle = "blue";
301-
extraCanvas.fillStyle = "white"
302-
303-
extraCanvas.lineWidth = 5;
304-
canvasElement.lineWidth = 5;
305-
306-
canvasElement.strokeRect(box.x_min, box.y_min, box.x_max - box.x_min, box.y_max - box.y_min);
307-
extraCanvas.fillText( Number.parseFloat(box.probability).toPrecision(5) + ' ' + faceData.result[0].gender + ' ' + faceData.result[0].age[0] + '-' + faceData.result[0].age[1], box.x_min, box.y_min - 10)
308-
}
309-
310-
const handleVideoStart = () => {
311-
navigator.mediaDevices.getUserMedia({ video: true})
312-
.then(stream => videoTag.current.srcObject = stream)
313-
.catch( error => console.error(error) )
314-
315-
videoTag.current.addEventListener('play', () => {
316-
// CompreFace init
317-
let server = "http://localhost";
318-
let port = 8000;
319-
let detection_key = "your_api_key_for_detection_service";
320-
321-
let core = new CompreFace(server, port);
322-
let detection_service = core.initFaceDetectionService(detection_key);
323-
// end of CompreFace init
324-
325-
let ctx1 = canvas1.current.getContext('2d');
326-
let ctx2 = canvas2.current.getContext('2d');
327-
let ctx3 = canvas3.current.getContext("2d");
328-
329-
document.addEventListener('next_frame', () => {
330-
ctx1.drawImage(videoTag.current, 0, 0, 640, 480)
331-
canvas1.current.toBlob( blob => {
332-
detection_service.detect(blob, { limit: 1, face_plugins: 'age,gender' })
333-
.then(res => {
334-
drawFace(ctx2, res, ctx3)
335-
})
336-
.catch(error => console.log(error))
337-
}, 'image/jpeg', 0.95)
338-
})
339-
340-
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
341-
document.dispatchEvent(evt);
342-
})
343-
}
344-
345-
return (
346-
<div>
347-
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
348-
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
349-
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
350-
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
351-
352-
<div>
353-
<button onClick={handleVideoStart}>Start video</button>
354-
</div>
355-
</div>
356-
);
357-
}
358-
359-
export default App;
360-
```
116+
[Documentation is here](/webcam_demo)
361117

362118
## Reference
363119

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@exadel/compreface-js-sdk",
3-
"version": "0.5.0",
3+
"version": "0.5.1",
44
"license": "Apache-2.0",
55
"description": "JavaScript SDK for CompreFace - free and open-source face recognition system from Exadel",
66
"main": "index.js",

0 commit comments

Comments
 (0)