-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.js
More file actions
79 lines (67 loc) · 2.67 KB
/
app.js
File metadata and controls
79 lines (67 loc) · 2.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
let model;
const videoElement = document.getElementById('webcam');
const canvasElement = document.getElementById('canvas');
const resultElement = document.getElementById('prediction-label');
const loadingElement = document.getElementById('loading');
// Initialize the webcam
async function setupWebcam() {
return new Promise((resolve, reject) => {
const navigatorAny = navigator;
navigator.getUserMedia = navigator.getUserMedia ||
navigatorAny.webkitGetUserMedia || navigatorAny.mozGetUserMedia ||
navigatorAny.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({ video: true },
stream => {
videoElement.srcObject = stream;
videoElement.addEventListener('loadeddata', () => resolve(), false);
},
error => reject());
} else {
reject();
}
});
}
// Load your model and the face detection model
async function loadModels() {
model = await tf.loadLayersModel('http://127.0.0.1:8081/model13092024-OM/model.json'); // Adjust path to your hosted model
await faceapi.nets.tinyFaceDetector.loadFromUri('/models'); // Adjust path if needed
loadingElement.style.display = 'none'; // Hide loading message after models are loaded
}
// Capture a frame, detect the face, and run the model
async function predict() {
const canvas = canvasElement.getContext('2d');
canvas.drawImage(videoElement, 0, 0, videoElement.width, videoElement.height);
// Detect faces in the video frame
const detection = await faceapi.detectSingleFace(videoElement, new faceapi.TinyFaceDetectorOptions());
if (detection) {
const { x, y, width, height } = detection.box;
// Crop the face area
const faceCanvas = document.createElement('canvas');
faceCanvas.width = width;
faceCanvas.height = height;
const faceContext = faceCanvas.getContext('2d');
faceContext.drawImage(videoElement, x, y, width, height, 0, 0, width, height);
// Convert the cropped face into a tensor
let imageData = faceContext.getImageData(0, 0, width, height);
let imageTensor = tf.browser.fromPixels(imageData)
.resizeBilinear([96, 96]) // Resize to the input size of the model
.expandDims(0)
.toFloat()
.div(tf.scalar(255.0));
// Run the prediction
const prediction = model.predict(imageTensor);
const predictedClass = prediction.argMax(-1).dataSync()[0];
const classes = ['real', 'fake'];
// Update the prediction label
resultElement.innerText = classes[predictedClass];
}
requestAnimationFrame(predict); // Keep the prediction loop going
}
// Main function to start everything
async function main() {
await setupWebcam();
await loadModels();
predict();
}
main();