|
@@ -1,80 +1,332 @@
|
|
<template>
|
|
<template>
|
|
- <div class="player">
|
|
|
|
- <div style="width: 640px;margin: 0 auto;">
|
|
|
|
- <video-player class="video-player vjs-custom-skin"
|
|
|
|
- ref="videoPlayer"
|
|
|
|
- :playsinline="true"
|
|
|
|
- style="object-fit:fill"
|
|
|
|
- :options="playerOptions"
|
|
|
|
- :x5-video-player-fullscreen="true"
|
|
|
|
- @pause="onPlayerPause($event)"
|
|
|
|
- @play="onPlayerPlay($event)"
|
|
|
|
- @fullscreenchange="onFullscreenChange($event)"
|
|
|
|
- @click="fullScreen"
|
|
|
|
- >
|
|
|
|
- </video-player>
|
|
|
|
- </div>
|
|
|
|
- </div>
|
|
|
|
|
|
+ <div class="webrtc_face_recognition">
|
|
|
|
+ <div class="option">
|
|
|
|
+ <div>
|
|
|
|
+ <label>面板操作:</label>
|
|
|
|
+ <button @click="fnOpen">启动摄像头视频媒体</button>
|
|
|
|
+ <button @click="fnClose">结束摄像头视频媒体</button>
|
|
|
|
+ </div>
|
|
|
|
+ <div>
|
|
|
|
+ <span style="margin-right: 20px;"
|
|
|
|
+ >前置后置切换(重新启动摄像头):</span
|
|
|
|
+ >
|
|
|
|
+ <label>
|
|
|
|
+ 前置
|
|
|
|
+ <input
|
|
|
|
+ type="radio"
|
|
|
|
+ v-model="constraints.video.facingMode"
|
|
|
|
+ value="user"
|
|
|
|
+ />
|
|
|
|
+ </label>
|
|
|
|
+ <label>
|
|
|
|
+ 后置
|
|
|
|
+ <input
|
|
|
|
+ type="radio"
|
|
|
|
+ v-model="constraints.video.facingMode"
|
|
|
|
+ value="environment"
|
|
|
|
+ />
|
|
|
|
+ </label>
|
|
|
|
+ </div>
|
|
|
|
+ <div>
|
|
|
|
+ <span style="margin-right: 20px;">检测识别类型:</span>
|
|
|
|
+ <label>
|
|
|
|
+ 轮廓检测
|
|
|
|
+ <input type="radio" v-model="detection" value="landmark" />
|
|
|
|
+ </label>
|
|
|
|
+ <label>
|
|
|
|
+ 表情检测
|
|
|
|
+ <input type="radio" v-model="detection" value="expression" />
|
|
|
|
+ </label>
|
|
|
|
+ <label>
|
|
|
|
+ 年龄性别检测
|
|
|
|
+ <input type="radio" v-model="detection" value="age_gender" />
|
|
|
|
+ </label>
|
|
|
|
+ </div>
|
|
|
|
+ <div>
|
|
|
|
+ <label>边框Or面部轮廓:</label>
|
|
|
|
+ <input type="checkbox" v-model="withBoxes" />
|
|
|
|
+ </div>
|
|
|
|
+ <div>
|
|
|
|
+ <label>检测人脸:</label>
|
|
|
|
+ <label>
|
|
|
|
+ 可信单
|
|
|
|
+ <input type="radio" v-model="detectFace" value="detectSingleFace" />
|
|
|
|
+ </label>
|
|
|
|
+ <label>
|
|
|
|
+ 模糊多
|
|
|
|
+ <input type="radio" v-model="detectFace" value="detectAllFaces" />
|
|
|
|
+ </label>
|
|
|
|
+ </div>
|
|
|
|
+ <div>
|
|
|
|
+ <label>选择算法模型</label>
|
|
|
|
+ <label>
|
|
|
|
+ ssdMobilenetv1
|
|
|
|
+ <input type="radio" v-model="nets" value="ssdMobilenetv1" />
|
|
|
|
+ </label>
|
|
|
|
+ <label>
|
|
|
|
+ tinyFaceDetector
|
|
|
|
+ <input type="radio" v-model="nets" value="tinyFaceDetector" />
|
|
|
|
+ </label>
|
|
|
|
+ <label>
|
|
|
|
+ mtcnn
|
|
|
|
+ <input type="radio" v-model="nets" value="mtcnn" />
|
|
|
|
+ </label>
|
|
|
|
+ </div>
|
|
|
|
+ </div>
|
|
|
|
+ <div class="see">
|
|
|
|
+ <video
|
|
|
|
+ id="myVideo"
|
|
|
|
+ poster="https://dummyimage.com/1280x720"
|
|
|
|
+ muted
|
|
|
|
+ loop
|
|
|
|
+ playsinline
|
|
|
|
+ @loadedmetadata="fnRun"
|
|
|
|
+ ></video>
|
|
|
|
+ <canvas id="myCanvas" />
|
|
|
|
+ </div>
|
|
|
|
+ </div>
|
|
</template>
|
|
</template>
|
|
|
|
|
|
<script>
|
|
<script>
|
|
- import test from '@/assets/images/bg.png'
|
|
|
|
- import {videoPlayer} from 'vue-video-player';
|
|
|
|
-
|
|
|
|
- export default {
|
|
|
|
- components: {
|
|
|
|
- videoPlayer
|
|
|
|
- },
|
|
|
|
- data() {
|
|
|
|
- return {
|
|
|
|
- pictureImg: test,
|
|
|
|
- playerOptions: {
|
|
|
|
- // playbackRates: [0.7, 1.0, 1.5, 2.0], //播放速度
|
|
|
|
- autoplay: false, //如果true,浏览器准备好时开始回放。
|
|
|
|
- muted: false, // 默认情况下将会消除任何音频。
|
|
|
|
- loop: false, // 导致视频一结束就重新开始。
|
|
|
|
- preload: 'auto', // 建议浏览器在<video>加载元素后是否应该开始下载视频数据。auto浏览器选择最佳行为,立即开始加载视频(如果浏览器支持)
|
|
|
|
- language: 'zh-CN',
|
|
|
|
- aspectRatio: '16:9', // 将播放器置于流畅模式,并在计算播放器的动态大小时使用该值。值应该代表一个比例 - 用冒号分隔的两个数字(例如"16:9"或"4:3")
|
|
|
|
- fluid: true, // 当true时,Video.js player将拥有流体大小。换句话说,它将按比例缩放以适应其容器。
|
|
|
|
- sources: [{
|
|
|
|
- type: "application/x-mpegURL",
|
|
|
|
- src: "https://media.ndjsxh.cn:18443/ld/2518/2518.m3u8"
|
|
|
|
- }],
|
|
|
|
- poster: "你的封面地址",
|
|
|
|
- width: document.documentElement.clientWidth,
|
|
|
|
- notSupportedMessage: '此视频暂无法播放,请稍后再试', //允许覆盖Video.js无法播放媒体源时显示的默认信息。
|
|
|
|
- controlBar: {
|
|
|
|
- // timeDivider: true,
|
|
|
|
- // durationDisplay: true,
|
|
|
|
- // remainingTimeDisplay: false,
|
|
|
|
- fullscreenToggle: true //全屏按钮
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+import * as faceapi from "face-api.js";
|
|
|
|
+export default {
|
|
|
|
+ name: "WebRTCFaceRecognition",
|
|
|
|
+ data() {
|
|
|
|
+ return {
|
|
|
|
+ nets: "tinyFaceDetector", // 模型
|
|
|
|
+ options: null, // 模型参数
|
|
|
|
+ withBoxes: true, // 框or轮廓
|
|
|
|
+ detectFace: "detectSingleFace", // 单or多人脸
|
|
|
|
+ detection: "landmark",
|
|
|
|
+ videoEl: null,
|
|
|
|
+ canvasEl: null,
|
|
|
|
+ timeout: 0,
|
|
|
|
+ // 视频媒体参数配置
|
|
|
|
+ constraints: {
|
|
|
|
+ video: true,
|
|
},
|
|
},
|
|
- methods: {
|
|
|
|
- fullScreen() {
|
|
|
|
- const player = this.$refs.videoPlayer.player
|
|
|
|
- player.requestFullscreen()//调用全屏api方法
|
|
|
|
- player.isFullscreen(true)
|
|
|
|
- player.play()
|
|
|
|
- },
|
|
|
|
- onPlayerPlay(player) {
|
|
|
|
- player.play()
|
|
|
|
- },
|
|
|
|
- onPlayerPause(player) {
|
|
|
|
- // alert("pause");
|
|
|
|
- }
|
|
|
|
- },
|
|
|
|
- computed: {
|
|
|
|
- player() {
|
|
|
|
- return this.$refs.videoPlayer.player
|
|
|
|
|
|
+ };
|
|
|
|
+ },
|
|
|
|
+ watch: {
|
|
|
|
+ nets(val) {
|
|
|
|
+ this.nets = val;
|
|
|
|
+ this.fnInit();
|
|
|
|
+ },
|
|
|
|
+ detection(val) {
|
|
|
|
+ this.detection = val;
|
|
|
|
+ this.videoEl.pause();
|
|
|
|
+ setTimeout(() => {
|
|
|
|
+ this.videoEl.play();
|
|
|
|
+ setTimeout(() => this.fnRun(), 300);
|
|
|
|
+ }, 300);
|
|
|
|
+ },
|
|
|
|
+ },
|
|
|
|
+ mounted() {
|
|
|
|
+ this.$nextTick(() => {
|
|
|
|
+ this.fnInit();
|
|
|
|
+ });
|
|
|
|
+ },
|
|
|
|
+ methods: {
|
|
|
|
+ // 初始化模型加载
|
|
|
|
+ async fnInit() {
|
|
|
|
+ await faceapi.nets[this.nets].loadFromUri("/static/models"); // 算法模型
|
|
|
|
+ await faceapi.loadFaceLandmarkModel("/static/models"); // 轮廓模型
|
|
|
|
+ await faceapi.loadFaceExpressionModel("/static/models"); // 表情模型
|
|
|
|
+ await faceapi.loadAgeGenderModel("/static/models"); // 年龄模型
|
|
|
|
+ // 根据算法模型参数识别调整结果
|
|
|
|
+ switch (this.nets) {
|
|
|
|
+ case "ssdMobilenetv1":
|
|
|
|
+ this.options = new faceapi.SsdMobilenetv1Options({
|
|
|
|
+ minConfidence: 0.5, // 0.1 ~ 0.9
|
|
|
|
+ });
|
|
|
|
+ break;
|
|
|
|
+ case "tinyFaceDetector":
|
|
|
|
+ this.options = new faceapi.TinyFaceDetectorOptions({
|
|
|
|
+ inputSize: 512, // 160 224 320 416 512 608
|
|
|
|
+ scoreThreshold: 0.5, // 0.1 ~ 0.9
|
|
|
|
+ });
|
|
|
|
+ break;
|
|
|
|
+ case "mtcnn":
|
|
|
|
+ this.options = new faceapi.MtcnnOptions({
|
|
|
|
+ minFaceSize: 20, // 0.1 ~ 0.9
|
|
|
|
+ scaleFactor: 0.709, // 0.1 ~ 0.9
|
|
|
|
+ });
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ // 节点属性化
|
|
|
|
+ this.videoEl = document.getElementById("myVideo");
|
|
|
|
+ this.canvasEl = document.getElementById("myCanvas");
|
|
|
|
+ },
|
|
|
|
+ // 人脸面部勘探轮廓识别绘制
|
|
|
|
+ async fnRunFaceLandmark() {
|
|
|
|
+ console.log("RunFaceLandmark");
|
|
|
|
+ if (this.videoEl.paused) return clearTimeout(this.timeout);
|
|
|
|
+ // 识别绘制人脸信息
|
|
|
|
+ const result = await faceapi[this.detectFace](
|
|
|
|
+ this.videoEl,
|
|
|
|
+ this.options
|
|
|
|
+ ).withFaceLandmarks();
|
|
|
|
+ if (result && !this.videoEl.paused) {
|
|
|
|
+ const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
|
|
|
|
+ const resizeResult = faceapi.resizeResults(result, dims);
|
|
|
|
+ this.withBoxes
|
|
|
|
+ ? faceapi.draw.drawDetections(this.canvasEl, resizeResult)
|
|
|
|
+ : faceapi.draw.drawFaceLandmarks(this.canvasEl, resizeResult);
|
|
|
|
+ } else {
|
|
|
|
+ this.canvasEl
|
|
|
|
+ .getContext("2d")
|
|
|
|
+ .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
|
|
|
|
+ }
|
|
|
|
+ this.timeout = setTimeout(() => this.fnRunFaceLandmark());
|
|
|
|
+ },
|
|
|
|
+ // 人脸表情识别绘制
|
|
|
|
+ async fnRunFaceExpression() {
|
|
|
|
+ console.log("RunFaceExpression");
|
|
|
|
+ if (this.videoEl.paused) return clearTimeout(this.timeout);
|
|
|
|
+ // 识别绘制人脸信息
|
|
|
|
+ const result = await faceapi[this.detectFace](this.videoEl, this.options)
|
|
|
|
+ .withFaceLandmarks()
|
|
|
|
+ .withFaceExpressions();
|
|
|
|
+ if (result && !this.videoEl.paused) {
|
|
|
|
+ const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
|
|
|
|
+ const resizeResult = faceapi.resizeResults(result, dims);
|
|
|
|
+ this.withBoxes
|
|
|
|
+ ? faceapi.draw.drawDetections(this.canvasEl, resizeResult)
|
|
|
|
+ : faceapi.draw.drawFaceLandmarks(this.canvasEl, resizeResult);
|
|
|
|
+ faceapi.draw.drawFaceExpressions(this.canvasEl, resizeResult, 0.05);
|
|
|
|
+ } else {
|
|
|
|
+ this.canvasEl
|
|
|
|
+ .getContext("2d")
|
|
|
|
+ .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
|
|
|
|
+ }
|
|
|
|
+ this.timeout = setTimeout(() => this.fnRunFaceExpression());
|
|
|
|
+ },
|
|
|
|
+ // 年龄性别识别绘制
|
|
|
|
+ async fnRunFaceAgeAndGender() {
|
|
|
|
+ console.log("RunFaceAgeAndGender");
|
|
|
|
+ if (this.videoEl.paused) return clearTimeout(this.timeout);
|
|
|
|
+ // 识别绘制人脸信息
|
|
|
|
+ const result = await faceapi[this.detectFace](this.videoEl, this.options)
|
|
|
|
+ .withFaceLandmarks()
|
|
|
|
+ .withAgeAndGender();
|
|
|
|
+ if (result && !this.videoEl.paused) {
|
|
|
|
+ const dims = faceapi.matchDimensions(this.canvasEl, this.videoEl, true);
|
|
|
|
+ const resizeResults = faceapi.resizeResults(result, dims);
|
|
|
|
+ this.withBoxes
|
|
|
|
+ ? faceapi.draw.drawDetections(this.canvasEl, resizeResults)
|
|
|
|
+ : faceapi.draw.drawFaceLandmarks(this.canvasEl, resizeResults);
|
|
|
|
+ if (Array.isArray(resizeResults)) {
|
|
|
|
+ resizeResults.forEach((result) => {
|
|
|
|
+ const { age, gender, genderProbability } = result;
|
|
|
|
+ new faceapi.draw.DrawTextField(
|
|
|
|
+ [
|
|
|
|
+ `${Math.round(age, 0)} years`,
|
|
|
|
+ `${gender} (${Math.round(genderProbability)})`,
|
|
|
|
+ ],
|
|
|
|
+ result.detection.box.bottomLeft
|
|
|
|
+ ).draw(this.canvasEl);
|
|
|
|
+ });
|
|
|
|
+ } else {
|
|
|
|
+ const { age, gender, genderProbability } = resizeResults;
|
|
|
|
+ new faceapi.draw.DrawTextField(
|
|
|
|
+ [
|
|
|
|
+ `${Math.round(age, 0)} years`,
|
|
|
|
+ `${gender} (${Math.round(genderProbability)})`,
|
|
|
|
+ ],
|
|
|
|
+ resizeResults.detection.box.bottomLeft
|
|
|
|
+ ).draw(this.canvasEl);
|
|
}
|
|
}
|
|
|
|
+ } else {
|
|
|
|
+ this.canvasEl
|
|
|
|
+ .getContext("2d")
|
|
|
|
+ .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
|
|
}
|
|
}
|
|
- }
|
|
|
|
-
|
|
|
|
- </script>
|
|
|
|
|
|
+ this.timeout = setTimeout(() => this.fnRunFaceAgeAndGender());
|
|
|
|
+ },
|
|
|
|
+ // 执行检测识别类型
|
|
|
|
+ fnRun() {
|
|
|
|
+ if (this.detection === "landmark") {
|
|
|
|
+ this.fnRunFaceLandmark();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if (this.detection === "expression") {
|
|
|
|
+ this.fnRunFaceExpression();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if (this.detection === "age_gender") {
|
|
|
|
+ this.fnRunFaceAgeAndGender();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ },
|
|
|
|
+ // 启动摄像头视频媒体
|
|
|
|
+ fnOpen() {
|
|
|
|
+ if (typeof window.stream === "object") return;
|
|
|
|
+ clearTimeout(this.timeout);
|
|
|
|
+ this.timeout = setTimeout(() => {
|
|
|
|
+ clearTimeout(this.timeout);
|
|
|
|
+ navigator.mediaDevices
|
|
|
|
+ .getUserMedia(this.constraints)
|
|
|
|
+ .then(this.fnSuccess)
|
|
|
|
+ .catch(this.fnError);
|
|
|
|
+ }, 300);
|
|
|
|
+ },
|
|
|
|
+ // 成功启动视频媒体流
|
|
|
|
+ fnSuccess(stream) {
|
|
|
|
+ window.stream = stream; // 使流对浏览器控制台可用
|
|
|
|
+ this.videoEl.srcObject = stream;
|
|
|
|
+ this.videoEl.play();
|
|
|
|
+ },
|
|
|
|
+ // 失败启动视频媒体流
|
|
|
|
+ fnError(error) {
|
|
|
|
+ console.log(error);
|
|
|
|
+ alert("视频媒体流获取错误" + error);
|
|
|
|
+ },
|
|
|
|
+ // 结束摄像头视频媒体
|
|
|
|
+ fnClose() {
|
|
|
|
+ this.canvasEl
|
|
|
|
+ .getContext("2d")
|
|
|
|
+ .clearRect(0, 0, this.canvasEl.width, this.canvasEl.height);
|
|
|
|
+ this.videoEl.pause();
|
|
|
|
+ clearTimeout(this.timeout);
|
|
|
|
+ if (typeof window.stream === "object") {
|
|
|
|
+ window.stream.getTracks().forEach((track) => track.stop());
|
|
|
|
+ window.stream = "";
|
|
|
|
+ this.videoEl.srcObject = null;
|
|
|
|
+ }
|
|
|
|
+ },
|
|
|
|
+ },
|
|
|
|
+ beforeDestroy() {
|
|
|
|
+ this.fnClose();
|
|
|
|
+ },
|
|
|
|
+};
|
|
|
|
+</script>
|
|
|
|
|
|
-<style>
|
|
|
|
|
|
+<style scoped>
|
|
|
|
+button {
|
|
|
|
+ height: 30px;
|
|
|
|
+ border: 2px #42b983 solid;
|
|
|
|
+ border-radius: 4px;
|
|
|
|
+ background: #42b983;
|
|
|
|
+ color: white;
|
|
|
|
+ margin: 10px;
|
|
|
|
+}
|
|
|
|
+.see {
|
|
|
|
+ position: relative;
|
|
|
|
+}
|
|
|
|
+.see canvas {
|
|
|
|
+ position: absolute;
|
|
|
|
+ top: 0;
|
|
|
|
+ left: 0;
|
|
|
|
+}
|
|
|
|
+.option {
|
|
|
|
+ padding-bottom: 20px;
|
|
|
|
+}
|
|
|
|
+.option div {
|
|
|
|
+ padding: 10px;
|
|
|
|
+ border-bottom: 2px #42b983 solid;
|
|
|
|
+}
|
|
|
|
+.option div label {
|
|
|
|
+ margin-right: 20px;
|
|
|
|
+}
|
|
</style>
|
|
</style>
|
|
|
|
+
|