用你的浏览器拍照,录音和录像
Posted on Thu 25 February 2021 in Journal
用你的浏览器拍照
先看一下效果,你可以在这里亲自动手试试 https://www.fanyamin.com/webrtc/examples/media_stream.html
WebRTC 中对媒体流 Media Stream 做了内置的支持,可以从电脑的摄像头,麦克风中捕获音频或视频流,并在 HTML5 所支持的
-
MediaStream 代表音频或视频数据流, 一个 MediaStream包含零个或多个 MediaStreamTrack ,
-
MediaStreamTrack 代表各种 audio 或 video track, 一个 MediaStreamTrack 包含一个或多个 Channels .
-
Channel 代表媒体流的最小单元, 例如音频信号关联到一个给定的 Speaker , 如立体声道的左右声道.
MediaStream对象具有单个输入和单个输出。 由getUserMedia() 方法生成的MediaStream对象称为本地对象,其输入之一是用户的摄像机或麦克风。
非本地MediaStream可能表示为媒体元素(例如
MediaStream对象的输出链接到使用者。 它可以是媒体元素,例如
API
var promise = navigator.mediaDevices.getUserMedia(constraints);
基本用法
async function getMedia(constraints) {
let stream = null;
try {
stream = await navigator.mediaDevices.getUserMedia(constraints);
/* use the stream */
} catch(err) {
/* handle the error */
}
}
// or not use promise
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
/* use the stream */
})
.catch(function(err) {
/* handle the error */
});
实例
开始提到的实例怎么实现呢,其实很简单
1)先创建一个 media_stream.html
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title> WebRTC Examples</title>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/webrtc-adapter/6.4.0/adapter.min.js" ></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.9.1/jquery.min.js" ></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery.blockUI/2.70/jquery.blockUI.min.js" ></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.4.1/js/bootstrap.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/bootbox.js/5.4.0/bootbox.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/spin.js/2.3.2/spin.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/toastr.js/2.1.4/toastr.min.js"></script>
<script type="text/javascript" src="js/util.js" ></script>
<script type="text/javascript" src="js/media_stream_demo.js"></script>
<script>
$(function() {
$(".navbar-static-top").load("navbar.html", function() {
$(".navbar-static-top li.dropdown").addClass("active");
$(".navbar-static-top a[href='echotest.html']").parent().addClass("active");
});
$(".footer").load("footer.html");
//-----------------------------------------------------------//
weblog("-------- Available User Devices -----------")
listUserDevices();
const constraints = window.constraints = {
audio: false,
video: true
};
const resSelect = document.querySelector('select#resolution');
var selectedRes = resSelect.value;
switch(selectedRes) {
case "qvga":
constraints.video = {
mandatory: {
maxWidth: 320,
maxHeight: 240
}
}
break;
case "vga":
constraints.video = {
mandatory: {
maxWidth: 640,
maxHeight: 480
}
}
break;
default:
constraints.video = {
mandatory: {
maxWidth: 1024,
maxHeight: 768
}
}
break;
}
const filterSelect = document.querySelector('select#filter');
filterSelect.onchange = function() {
const video = document.querySelector('video');
video.className = filterSelect.value;
};
document.querySelector('#open').addEventListener('click', e => openCamera(e, constraints));
document.querySelector('#close').addEventListener('click', e => closeCamera(e, constraints));
document.querySelector('#snapshot').addEventListener('click', e => takeSnapshot(e));
});
</script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootswatch/3.4.0/cerulean/bootstrap.min.css" type="text/css"/>
<link rel="stylesheet" href="css/demo.css" type="text/css"/>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" type="text/css"/>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/toastr.js/2.1.4/toastr.min.css"/>
</head>
<body>
<!--
<a href="https://github.com/walterfan/webrtc-primer"><img style="position: absolute; top: 0; left: 0; border: 0; z-index: 1001;" src="https://s3.amazonaws.com/github/ribbons/forkme_left_darkblue_121621.png" alt="Fork me on GitHub"></a>
-->
<nav class="navbar navbar-default navbar-static-top">
</nav>
<div class="container">
<div class="row">
<div class="col-md-12">
<div class="page-header">
<h1>WebRTC example of User Media </h1>
</div>
<div class="container" id="details">
<div class="row">
<div class="col-lg-12">
<p>Click the button to open, close camera or take snapshot</p>
<div>
<button class="btn btn-default" autocomplete="off" id="open">Open Camera</button>
<label for="resolution">resolution </label>
<select id="resolution">
<option value="qvga">320*240</option>
<option value="vga">640*480</option>
<option value="hd">1280*960</option>
</select>
<button class="btn btn-default" autocomplete="off" id="close">Close Camera</button>
<button class="btn btn-default" autocomplete="off" id="snapshot">Take Snapshort</button>
<label for="filter">Filter: </label>
<select id="filter">
<option value="none">None</option>
<option value="blur">Blur</option>
<option value="grayscale">Grayscale</option>
<option value="invert">Invert</option>
<option value="sepia">Sepia</option>
</select>
</div>
<br/>
</div>
<div class="col-lg-12">
<div class="col-lg-6"><video id="localVideo" autoplay playsinline controls="true"></video></div>
<div class="col-lg-6"><canvas id="localCanvas"></canvas></div>
</div>
</div>
<div class="note">
<div id="logDiv">
<ul id="logContent">
</ul>
</div>
</div>
</div>
</div>
</div>
<hr>
<div class="footer">
</div>
</div>
</body>
</html>
2) 创建 media_stream_demo.js
'use strict';
// Put variables in global scope to make them available to the browser console.
function handleSuccess(stream) {
const video = document.querySelector('video');
const videoTracks = stream.getVideoTracks();
console.log('Got stream with constraints:', constraints);
console.log(`Using video device: ${videoTracks[0].label}`);
window.stream = stream; // make variable available to browser console
video.srcObject = stream;
}
function handleError(error) {
if (error.name === 'ConstraintNotSatisfiedError') {
const v = constraints.video;
errorMsg(`The resolution ${v.width.exact}x${v.height.exact} px is not supported by your device.`);
} else if (error.name === 'PermissionDeniedError') {
errorMsg('Permissions have not been granted to use your camera and ' +
'microphone, you need to allow the page access to your devices in ' +
'order for the demo to work.');
}
errorMsg(`getUserMedia error: ${error.name}`, error);
}
function errorMsg(msg, error) {
const errorElement = document.querySelector('#logDiv');
errorElement.innerHTML += `<p>${msg}</p>`;
if (typeof error !== 'undefined') {
console.error(error);
}
}
async function openCamera(e, constraints) {
try {
const stream = await navigator.mediaDevices.getUserMedia(constraints);
handleSuccess(stream);
e.target.disabled = true;
document.querySelector('#close').disabled = false;
} catch (ex) {
handleError(ex);
}
}
function closeCamera(e) {
const videoElem = document.querySelector('video');
const stream = videoElem.srcObject;
const tracks = stream.getTracks();
e.target.disabled = true;
document.querySelector('#open').disabled = false;
tracks.forEach(function(track) {
track.stop();
});
videoElem.srcObject = null;
}
async function takeSnapshot(e) {
console.log("take snapshot");
try {
const video = document.querySelector('video');
const canvas = window.canvas = document.querySelector('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0, canvas.width, canvas.height);
} catch (ex) {
handleError(ex);
}
}
3) 点击 "Oper Camera" 来打开摄像头,点击 “Take Snapshot” 来拍摄快照
4) 可以自己动手试试,通过选择 "resolution" 来调整 getUserMedia 的 Contraints
5) 可以自己动手试试,通过更改 "Filter" 来为摄像头捕捉的视频添加滤镜。
用你的浏览器录音和录像
既然现在的笔记本电脑,平板,手机都有摄像头和麦克风,那么录音和录像就是一件非常容易的事情了,但是如果不用别人写好的录音录像程序,让你自己来实现一个录音和录像应用,其实也没那么简单。
但是有了 WebRTC 和支持它的浏览器, 事情就变得简单多了
现代浏览器不仅支持 audio 和 video 两个新的元素,还支持了MediaStream 和 MediaRecorder 这样的媒体 API
第一步:创建一个供演示的 HTML 文件
- 源码在此 record_demo.html
这个 html 文件很简单,就是如下四个按钮
再加上一个 HTML5 支持的 video 元素
<video autoplay></video>
第二步:处理这四个按键的的 click 事件
源码见 record_demo.js
1. 打开媒体 “open media” 按键的处理 - 获取本地媒体流
var localStream = null;
var mediaRecorder = null;
var recordChunks = [];
var recordElement = document.querySelector('video');
var recordMediaType = 'video/webm';
var mediaConstraints = {
audio: {
echoCancellation: {exact: true}
},
video: {
width: 640,
height: 480
}
};
async function openMedia(e, constraints) {
if (mediaButton.textContent === 'Close Media') {
closeMedia(e);
return;
}
try {
const stream = await navigator.mediaDevices.getUserMedia(mediaConstraints);
handleSuccess(stream);
mediaButton.textContent = 'Close Media';
console.log('openMedia success ');
} catch (ex) {
handleError(ex);
}
}
function handleSuccess(stream) {
localStream = stream;
recordElement.srcObject = stream;
}
上述代码获取本地用户的 audio 和 video 媒体流,async 和 await 关键字是ES7 提供的异步支持,await 就是先返回,等异步操作完成再回来执行下一步语句, async 代表函数是异步的。 当媒体流获取后,就赋予本地的
2. 开始录制 “start record” 的处理 - 录制本地媒体流
MediaRecorder API 就是录制媒体流的核心
function startRecord() {
if(!localStream) {
console.error("stream is not created.");
return;
}
if (recordButton.textContent === 'Stop Record') {
stopRecord();
return;
}
var options = {mimeType: recordMediaType};
mediaRecorder = new MediaRecorder(localStream, options);
mediaRecorder.start();
recordButton.textContent = 'Stop Record';
playButton.disabled = true;
downButton.disabled = true;
console.log("recorder started");
mediaRecorder.ondataavailable = function(e) {
console.log("data available", e.data);
recordChunks.push(e.data);
}
mediaRecorder.onstop = function(e) {
console.log('onstop fired');
var blob = new Blob(recordChunks, { 'type' : recordMediaType });
var blobURL = URL.createObjectURL(blob);
const a = document.createElement('a');
a.style.display = 'none';
a.href = blobURL;
a.download = 'test.webm';
document.body.appendChild(a);
};
}
上述代码很简单,关键的地方就是创建 MediaRecorder 对象,传入媒体流,然后开始录制
var options = {mimeType: recordMediaType};
mediaRecorder = new MediaRecorder(localStream, options);
mediaRecorder.start();
为了能播放和下载所录制的媒体文件,需要将录制的内容存贮下来
var recordChunks = [];
是一个字节数组, 在录制停止时一起存入本地的 blob 对象中
mediaRecorder.ondataavailable = function(e) {
console.log("data available", e.data);
recordChunks.push(e.data);
}
mediaRecorder.onstop = function(e) {
console.log('onstop fired');
var blob = new Blob(recordChunks, { 'type' : recordMediaType });
var blobURL = URL.createObjectURL(blob);
const a = document.createElement('a');
a.style.display = 'none';
a.href = blobURL;
a.download = 'test.webm';
document.body.appendChild(a);
};
3. 播放 “Plan Record” 的处理 - 播放本地存储的媒体文件
它由录制时保存下来的 blob 数组创建出来
function playRecord() {
const blob = new Blob(recordChunks, {type: recordMediaType});
recordElement.src = null;
recordElement.srcObject = null;
recordElement.src = window.URL.createObjectURL(blob);
recordElement.controls = true;
recordElement.play();
}
4. 下载 “Download Record ” 的处理 - 下载本地存储的媒体文件
function downRecord() {
const blob = new Blob(recordChunks, {type: recordMediaType});
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.style.display = 'none';
a.href = url;
a.download = 'test.webm';
document.body.appendChild(a);
a.click();
setTimeout(() => {
document.body.removeChild(a);
window.URL.revokeObjectURL(url);
}, 100);
}
可点击https://www.fanyamin.com/webrtc/examples/record_demo.html 看最终的效果:
参考资料
- https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia
- MediaRecorderAPI 参见 https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder
- MediaRecorder 示例参考 https://webrtc.github.io/samples/src/content/getusermedia/record/