WebRtc06: 音视频数据采集
音视频采集API
通过getUserMedia
这个API去获取视频音频,
通过constraints
这个对象去配置偏好,比如视频宽高、音频降噪等
测试代码
index.html
<html>
<head>
<title>WebRtc capture video and audio</title>
</head>
<body>
<video autoplay playsinline id="player"></video>
<script src="./js/client.js"></script>
</body>
</html>
client.js
'use strict'
var videoplay = document.querySelector('video#player');
function getMediaStream(stream) {
videoplay.srcObject = stream;
}
function handleError(err) {
console.log('getUserMedia error: ', err);
}
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia is not supported');
} else {
var constraints = {
video : true,
audio : true
}
navigator.mediaDevices.getUserMedia(constraints).then(getMediaStream).catch(handleError);
}
会打开前置摄像头并且输出视频音频
浏览器适配
getUserMedia适配
- getUserMedia (www)
- webkitGetUserMedia(google)
- mozGetUserMedia(firefox)
每个厂商的API名称都不一样
比如在前面的index.html中加上下述代码
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
获取音视频设备的访问权限
在前面代码的基础上,返回navigator.mediaDevices.enumerateDevices()
,从而得到每一个媒体信息
然后通过添加到HTML的选择器中实现打印不同设备信息
index.html
<html>
<head>
<title>WebRtc capture video and audio</title>
</head>
<body>
<div>
<label>audioSource:</label>
<select id="audioSource"></select>
</div>
<div>
<label>audioOutput:</label>
<select id="audioOutput"></select>
</div>
<div>
<label>videoSource:</label>
<select id="videoSource"></select>
</div>
<video autoplay playsinline id="player"></video>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="./js/client.js"></script>
</body>
</html>
client.js
'use strict'
var audioSource = document.querySelector('select#audioSource');
var audioOutput = document.querySelector('select#audioOutput');
var videoSource = document.querySelector('select#videoSource');
var videoplay = document.querySelector('video#player');
function getDevice(deviceInfos) {
deviceInfos.forEach(function(deviceInfos) {
var option = document.createElement('option');
option.text = deviceInfos.label;
option.value = deviceInfos.deviceId;
if (deviceInfos.kind == 'audioinput') {
audioSource.appendChild(option);
} else if (deviceInfos.kind == 'audiooutput') {
audioOutput.appendChild(option);
} else if (deviceInfos.kind == 'videoinput') {
videoSource.appendChild(option);
}
});
}
function getMediaStream(stream) {
videoplay.srcObject = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log('getUserMedia error: ', err);
}
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia is not supported');
} else {
var constraints = {
video : true,
audio : true
}
navigator.mediaDevices.getUserMedia(constraints).then(getMediaStream).then(getDevice).catch(handleError);
}
音视频采集约束
视频约束
- width
- height
- aspectRatio 宽高比
- frameRate 帧率
- facingMode
- user 前置摄像头
- environment 后置摄像头
- left 前置左侧摄像头
- right 前置右侧摄像头
- resizeMode
设置constraints中视频的参数如下
var constraints = {
video : {
width: 1920,
height: 1080,
frameRate: 30,
facingMode: 'environment'
},
audio : true
}
音频约束
- volume 音量 [0-1.0]
- sampleRate 采样率
- sampleSize 采样大小 位深 一般16位
- echoCancellation 回音消除 true/false
- autoGainControl 自动增益 true/false 在原有声音的基础上,增加音量
- noiseSuppression 降噪 true/false
- latency 延迟大小
- channelCount 声道数量
- deviceID
- groupID 组ID
var constraints = {
video : {
width: 1920,
height: 1080,
frameRate: 30,
facingMode: 'environment'
},
audio : {
noiseSuppression: true,
echoCancellation: true
}
}
整体例子:切换不同摄像头
'use strict'
var audioSource = document.querySelector('select#audioSource');
var audioOutput = document.querySelector('select#audioOutput');
var videoSource = document.querySelector('select#videoSource');
var videoplay = document.querySelector('video#player');
function getDevice(deviceInfos) {
deviceInfos.forEach(function(deviceInfos) {
var option = document.createElement('option');
option.text = deviceInfos.label;
option.value = deviceInfos.deviceId;
console.log('lai: deviceInfo: ', deviceInfos.label);
if (deviceInfos.kind == 'audioinput') {
audioSource.appendChild(option);
} else if (deviceInfos.kind == 'audiooutput') {
audioOutput.appendChild(option);
} else if (deviceInfos.kind == 'videoinput') {
videoSource.appendChild(option);
}
});
}
function getMediaStream(stream) {
videoplay.srcObject = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log('getUserMedia error: ', err);
}
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia is not supported');
} else {
var deviceId = videoSource.value;
var constraints = {
video : {
width: 1920,
height: 1080,
frameRate: 30,
deviceId : deviceId ? deviceId : undefined
},
audio : {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints).then(getMediaStream).then(getDevice).catch(handleError);
}
}
start();
videoSource.onchange = start;
实战
视频渲染特效
浏览器视频特效
CSS filter: -webkit-filter/filter
不同浏览器的名称不一样,Chrome使用的是-webkit-filter,IE用的是filter
需要知道如何将video和filter关联
特效的底层调用都是OpenGL/Metal等等
常用特效
代码测试
在index.html中添加css和filter的selector,然后在js中指定播放器用的类
index.html
<html>
<head>
<title>WebRtc capture video and audio</title>
<style>
.none {
-webkit-filter: none;
}
.blur {
-webkit-filter: blur(3px);
}
.grayscale {
-webkit-filter:grayscale(1);
}
.invert {
-webkit-filter: invert(1);
}
.sepia {
-webkit-filter:sepia(1);
}
</style>
</head>
<body>
<div>
<label>audioSource:</label>
<select id="audioSource"></select>
</div>
<div>
<label>audioOutput:</label>
<select id="audioOutput"></select>
</div>
<div>
<label>videoSource:</label>
<select id="videoSource"></select>
</div>
<div>
<label>filter:</label>
<select id="filter">
<option value="none"> None</option>
<option value="blur"> blur</option>
<option value="grayscale"> grayscale</option>
<option value="invert"> invert</option>
<option value="sepia"> sepia</option>
</select>
</div>
<video autoplay playsinline id="player"></video>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="./js/client.js"></script>
</body>
</html>
client.js
'use strict'
var audioSource = document.querySelector('select#audioSource');
var audioOutput = document.querySelector('select#audioOutput');
var videoSource = document.querySelector('select#videoSource');
var filterSelect = document.querySelector('select#filter');
var videoplay = document.querySelector('video#player');
function getDevice(deviceInfos) {
deviceInfos.forEach(function(deviceInfos) {
var option = document.createElement('option');
option.text = deviceInfos.label;
option.value = deviceInfos.deviceId;
console.log('lai: deviceInfo: ', deviceInfos.label);
if (deviceInfos.kind == 'audioinput') {
audioSource.appendChild(option);
} else if (deviceInfos.kind == 'audiooutput') {
audioOutput.appendChild(option);
} else if (deviceInfos.kind == 'videoinput') {
videoSource.appendChild(option);
}
});
}
function getMediaStream(stream) {
videoplay.srcObject = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log('getUserMedia error: ', err);
}
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia is not supported');
} else {
var deviceId = videoSource.value;
var constraints = {
video : {
width: 1920,
height: 1080,
frameRate: 30,
deviceId : deviceId ? deviceId : undefined
},
audio : {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints).then(getMediaStream).then(getDevice).catch(handleError);
}
}
start();
videoSource.onchange = start;
filterSelect.onchange = function() {
videoplay.className = filterSelect.value;
}
从视频中获取图片
index.html
<html>
<head>
<title>WebRtc capture video and audio</title>
<style>
.none {
-webkit-filter: none;
}
.blur {
-webkit-filter: blur(3px);
}
.grayscale {
-webkit-filter:grayscale(1);
}
.invert {
-webkit-filter: invert(1);
}
.sepia {
-webkit-filter:sepia(1);
}
</style>
</head>
<body>
<div>
<label>audioSource:</label>
<select id="audioSource"></select>
</div>
<div>
<label>audioOutput:</label>
<select id="audioOutput"></select>
</div>
<div>
<label>videoSource:</label>
<select id="videoSource"></select>
</div>
<div>
<label>filter:</label>
<select id="filter">
<option value="none"> None</option>
<option value="blur"> blur</option>
<option value="grayscale"> grayscale</option>
<option value="invert"> invert</option>
<option value="sepia"> sepia</option>
</select>
</div>
<video autoplay playsinline id="player"></video>
<div>
<button id="snapshot">Take snapshot</button>
</div>
<div>
<canvas id="picture"></canvas>
</div>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="./js/client.js"></script>
</body>
</html>
client.js
'use strict'
// devices
var audioSource = document.querySelector('select#audioSource');
var audioOutput = document.querySelector('select#audioOutput');
var videoSource = document.querySelector('select#videoSource');
// filter
var filterSelect = document.querySelector('select#filter');
// picture
var snapshot = document.querySelector('button#snapshot');
var picture = document.querySelector('canvas#picture');
picture.width = 320;
picture.height = 240;
var videoplay = document.querySelector('video#player');
function getDevice(deviceInfos) {
deviceInfos.forEach(function(deviceInfos) {
var option = document.createElement('option');
option.text = deviceInfos.label;
option.value = deviceInfos.deviceId;
if (deviceInfos.kind == 'audioinput') {
audioSource.appendChild(option);
} else if (deviceInfos.kind == 'audiooutput') {
audioOutput.appendChild(option);
} else if (deviceInfos.kind == 'videoinput') {
videoSource.appendChild(option);
}
});
}
function getMediaStream(stream) {
videoplay.srcObject = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log('getUserMedia error: ', err);
}
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia is not supported');
} else {
var deviceId = videoSource.value;
var constraints = {
video : {
width: 1920,
height: 1080,
frameRate: 30,
deviceId : deviceId ? deviceId : undefined
},
audio : {
noiseSuppression: true,
echoCancellation: true
}
}
navigator.mediaDevices.getUserMedia(constraints).then(getMediaStream).then(getDevice).catch(handleError);
}
}
start();
videoSource.onchange = start;
filterSelect.onchange = function() {
videoplay.className = filterSelect.value;
}
// 点击按钮进行截图,但是这里的截图保存到本地是没有滤镜效果的
snapshot.onclick = function() {
picture.className = filterSelect.value;
picture.getContext('2d').drawImage(videoplay, 0, 0, picture.width, picture.height);
}
只采集音频数据
临时屏蔽vidoplayer,设置constraints中的video为false,获取到的媒体流赋值给audioplayer
index.html
<html>
<head>
<title>WebRtc capture video and audio</title>
<style>
.none {
-webkit-filter: none;
}
.blur {
-webkit-filter: blur(3px);
}
.grayscale {
-webkit-filter:grayscale(1);
}
.invert {
-webkit-filter: invert(1);
}
.sepia {
-webkit-filter:sepia(1);
}
</style>
</head>
<body>
<div>
<label>audioSource:</label>
<select id="audioSource"></select>
</div>
<div>
<label>audioOutput:</label>
<select id="audioOutput"></select>
</div>
<div>
<label>videoSource:</label>
<select id="videoSource"></select>
</div>
<div>
<label>filter:</label>
<select id="filter">
<option value="none"> None</option>
<option value="blur"> blur</option>
<option value="grayscale"> grayscale</option>
<option value="invert"> invert</option>
<option value="sepia"> sepia</option>
</select>
</div>
<!-- controls用于显示控制按钮 -->
<audio autoplay controls id="audioplayer" ></audio>
<!-- <video autoplay playsinline id="player"></video> -->
<div>
<button id="snapshot">Take snapshot</button>
</div>
<div>
<canvas id="picture"></canvas>
</div>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="./js/client.js"></script>
</body>
</html>
client.js
'use strict'
// devices
var audioSource = document.querySelector('select#audioSource');
var audioOutput = document.querySelector('select#audioOutput');
var videoSource = document.querySelector('select#videoSource');
// filter
var filterSelect = document.querySelector('select#filter');
// picture
var snapshot = document.querySelector('button#snapshot');
var picture = document.querySelector('canvas#picture');
picture.width = 320;
picture.height = 240;
// var videoplay = document.querySelector('video#player');
// 获取audioplayer
var audioplay = document.querySelector('audio#audioplayer');
function getDevice(deviceInfos) {
deviceInfos.forEach(function(deviceInfos) {
var option = document.createElement('option');
option.text = deviceInfos.label;
option.value = deviceInfos.deviceId;
if (deviceInfos.kind == 'audioinput') {
audioSource.appendChild(option);
} else if (deviceInfos.kind == 'audiooutput') {
audioOutput.appendChild(option);
} else if (deviceInfos.kind == 'videoinput') {
videoSource.appendChild(option);
}
});
}
function getMediaStream(stream) {
// videoplay.srcObject = stream;
audioplay.srcObject = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log('getUserMedia error: ', err);
}
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia is not supported');
} else {
var deviceId = videoSource.value;
var constraints = {
// video : {
// width: 1920,
// height: 1080,
// frameRate: 30,
// deviceId : deviceId ? deviceId : undefined
// },
video : false,
audio : true
}
navigator.mediaDevices.getUserMedia(constraints).then(getMediaStream).then(getDevice).catch(handleError);
}
}
start();
videoSource.onchange = start;
filterSelect.onchange = function() {
videoplay.className = filterSelect.value;
}
snapshot.onclick = function() {
picture.className = filterSelect.value;
picture.getContext('2d').drawImage(videoplay, 0, 0, picture.width, picture.height);
}
MediaStreamAPI及获取视频约束
MediaStream API
MediaStream.addTrack()
MediaStream.removeTrack()
MediaStream.getVideoTracks()
MediaStream.getAudioTracks()
MediaStream事件
MediaStream.onaddtrack()
MediaStream.onremovetrack()
MediaStream.onended()
获取视频约束
index.html
<html>
<head>
<title>WebRtc capture video and audio</title>
<style>
.none {
-webkit-filter: none;
}
.blur {
-webkit-filter: blur(3px);
}
.grayscale {
-webkit-filter:grayscale(1);
}
.invert {
-webkit-filter: invert(1);
}
.sepia {
-webkit-filter:sepia(1);
}
</style>
</head>
<body>
<div>
<label>audioSource:</label>
<select id="audioSource"></select>
</div>
<div>
<label>audioOutput:</label>
<select id="audioOutput"></select>
</div>
<div>
<label>videoSource:</label>
<select id="videoSource"></select>
</div>
<div>
<label>filter:</label>
<select id="filter">
<option value="none"> None</option>
<option value="blur"> blur</option>
<option value="grayscale"> grayscale</option>
<option value="invert"> invert</option>
<option value="sepia"> sepia</option>
</select>
</div>
<!-- controls用于显示控制按钮 -->
<!-- <audio autoplay controls id="audioplayer" ></audio> -->
<table>
<tr>
<td><video autoplay playsinline id="player"></video></td>
<!-- 显示视频约束 -->
<td><div id = 'constraints' class='output'></div></td>
</tr>
</table>
<div>
<button id="snapshot">Take snapshot</button>
</div>
<div>
<canvas id="picture"></canvas>
</div>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="./js/client.js"></script>
</body>
</html>
client.js
'use strict'
// devices
var audioSource = document.querySelector('select#audioSource');
var audioOutput = document.querySelector('select#audioOutput');
var videoSource = document.querySelector('select#videoSource');
// filter
var filterSelect = document.querySelector('select#filter');
// picture
var snapshot = document.querySelector('button#snapshot');
var picture = document.querySelector('canvas#picture');
picture.width = 320;
picture.height = 240;
var videoplay = document.querySelector('video#player');
// 获取audioplayer
// var audioplay = document.querySelector('audio#audioplayer');
var divConstraints = document.querySelector('div#constraints');
function getDevice(deviceInfos) {
deviceInfos.forEach(function(deviceInfos) {
var option = document.createElement('option');
option.text = deviceInfos.label;
option.value = deviceInfos.deviceId;
if (deviceInfos.kind == 'audioinput') {
audioSource.appendChild(option);
} else if (deviceInfos.kind == 'audiooutput') {
audioOutput.appendChild(option);
} else if (deviceInfos.kind == 'videoinput') {
videoSource.appendChild(option);
}
});
}
function getMediaStream(stream) {
videoplay.srcObject = stream;
var videoTrack = stream.getVideoTracks()[0];
var videoConstraints = videoTrack.getSettings();
divConstraints.textContent = JSON.stringify(videoConstraints, null, 2);
// audioplay.srcObject = stream;
return navigator.mediaDevices.enumerateDevices();
}
function handleError(err) {
console.log('getUserMedia error: ', err);
}
function start() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
console.log('getUserMedia is not supported');
} else {
var deviceId = videoSource.value;
var constraints = {
video : {
width: 640,
height: 480,
frameRate: 30,
deviceId : deviceId ? deviceId : undefined
},
// video : false,
audio : true
}
navigator.mediaDevices.getUserMedia(constraints).then(getMediaStream).then(getDevice).catch(handleError);
}
}
start();
videoSource.onchange = start;
filterSelect.onchange = function() {
videoplay.className = filterSelect.value;
}
snapshot.onclick = function() {
picture.className = filterSelect.value;
picture.getContext('2d').drawImage(videoplay, 0, 0, picture.width, picture.height);
}