当前位置: 首页 > article >正文

音频录制实现 绘制频谱

思路

获取设备信息

获取录音的频谱数据

绘制频谱图

具体实现

封装 loadDevices.js


/**
 * 是否支持录音
 */
const recordingSupport = () => {
    const scope = navigator.mediaDevices || {};
    if (!scope.getUserMedia) {
        scope = navigator
        scope.getUserMedia || (scope.getUserMedia = scope.webkitGetUserMedia || scope.mozGetUserMedia || scope.msGetUserMedia);
    }

    if (!scope.getUserMedia) {
        return false
    }
    return scope
}


// 获取麦克风权限
export const getUserMediaPermission = () => {
    return new Promise((resolve, reject) => {
        const mediaDevices = recordingSupport()
        if (mediaDevices.getUserMedia) {
            let constraints = { audio: true }
            mediaDevices.getUserMedia(constraints).then(resolve, reject);
        } else { reject(false) } // 浏览器不支持录音
    })
}

function checkMime() {
    var types = [
        "audio/mpeg",
        "audio/webm",
        "audio/mp4",
        "audio/wav",
        "audio/ogg",
        "audio/flac",
        "audio/m4a",
        "audio/mp3",
        "audio/mpga",
        "audio/oga",
    ];
    let first;
    for (var i in types) {
        // 判断当前浏览器支持哪种
        let supported = MediaRecorder.isTypeSupported(types[i]);
        if (supported && !first) {
            console.log("Is " + types[i] + " supported? " + (supported ? "Yes!" : "Nope :("));
            first = types[i];
        }
    }

    return first;
}

let streams = []
let stopDraw = false

/**
 * 释放资源
 */
export const devicesDispose = () => {
    console.log('devicesDispose-释放资源');
    stopDraw = true
    streams.forEach(e => {
        e.getTracks().forEach(track => track.stop());
    })
}

export const getAudioContext = () => window.AudioContext ||
    window.webkitAudioContext ||
    window.mozAudioContext ||
    window.msAudioContext;


export default function loadDevices(options = {}) {
    const { readover = () => { }, change = () => { }, stop = () => { } } = options
    let analyser;
    let mediaRecorder;
    let dataArray;
    let audioChunks = [];

    try {
        const draw = () => {
            if (stopDraw) return
            requestAnimationFrame(draw);
            analyser.getByteTimeDomainData(dataArray);
            change(dataArray);
        };

        let mimeType = checkMime();

        getUserMediaPermission().then((stream) => {
            streams.push(streams)

            // 创建记录器
            mediaRecorder = new MediaRecorder(stream, { mimeType });

            // 音频数据发生变化时收集音频片段,用于合成音频文件
            mediaRecorder.addEventListener("dataavailable", (event) => {
                console.log("mediaRecorder-dataavailable:", event);
                audioChunks.push(event.data);
            });

            // // 监听音频开始录制
            // mediaRecorder.addEventListener('start', () => {
            //     console.log("mediaRecorder-start:");
            //     audioChunks = []
            // })

            // 音频录制结束回调
            mediaRecorder.addEventListener("stop", () => {
                console.log("mediaRecorder-end:", audioChunks);
                const audioBlob = new Blob(audioChunks, { type: "audio/mp4" }); // wav webm mp4  

                stop(audioBlob);

                // 清空 chunks 以便下一次录音 
                audioChunks = []
            });

            // 获取音频数据
            const audioContext = new getAudioContext()();
            const source = audioContext.createMediaStreamSource(stream);
            // 通过AnalyserNode对象的getByteTimeDomainData方法来获取音频数据的波形形式:
            // 获取音频时间和频率数据
            analyser = audioContext.createAnalyser();

            // 定义长度
            analyser.fftSize = 2048; // 可以调整这个值来改变细节
            const bufferLength = analyser.frequencyBinCount;
            dataArray = new Uint8Array(bufferLength);

            // 合并流数据
            source.connect(analyser);

            draw()
            readover(mediaRecorder)
        }).catch((err) => {
            console.log("stream-errr", err);
        });
    } catch (err) {
        console.log("mediaDevices-errr", err);
    }
}

示例

import { onMounted, onUnmounted } from "vue";
import loadDevices, {
  devicesDispose,
  getAudioContext,
} from "../compositions/VerbalChat/loadDevices";

let mediaRecorder;
const speak = ref(false);

// 停止录制
const uploadAudio = (blob) => {
  // others 获取录音数据之后后续处理 上传
  // const formData = new FormData();
  // formData.append("file", blob);
  // 接口formData上传
};

// 绘制方法
const draw = ({ data }) => {
  // 调用子组件的绘制方法,传递数据
  // verCanvas.value && verCanvas.value.draw({ data });
};

const btnClick = () => {
  if (!speak.value) {
    console.log("开始录制"); 
    speak.value = true;
    mediaRecorder && mediaRecorder.start();
  } else {
    console.log("停止录制");
    speak.value = false;
    mediaRecorder && mediaRecorder.stop();
  }
};

onMounted(() => {
  loadDevices({
    readover: (r) => (mediaRecorder = r),
    change: (dataArray) => {
      if (speak.value) {
        // 处于录制中
        draw({ data: dataArray });
      }
    },
    stop: (blob) => uploadAudio(blob),
  });
});

onUnmounted(()=>devicesDispose())

绘制频谱图

<template>
  <canvas class="VerbalCanvas" ref="canvasRef"></canvas>
</template>

<script setup>
import { onMounted, ref, watch } from "vue";

let ctx, canvas;
const canvasRef = ref();

const draw = ({ data }) => {
  if (!canvasRef.value) return;
  canvas = canvasRef.value;
  canvas.height = parseFloat(getComputedStyle(canvas)["height"]);
  canvas.width = parseFloat(getComputedStyle(canvas)["width"]);
  ctx = canvas.getContext("2d");

  // drawWave(ctx, canvas, type, data);
  // drawLoop(ctx, canvas, type, data);
  drawCircle(ctx, canvas, type, data);
}

const clear = () => {
  try {
    ctx.clearRect(0, 0, canvas.width, canvas.height);
  } catch (er) {
    console.log("er", er);
  }
}

defineExpose({ draw, clear });

绘制曲线

const waveH = 150; // 波区域高度
const obj = {
  top: 0,
  center: canvas.height / 2,
  bottom: canvas.height - waveH,
};
const initY = obj[type];
const dataArray = data || []; // 模拟数据 随机生成一个数组,值随机

ctx.fillStyle = "rgba(200, 200, 200, 0)";
ctx.fillRect(0, 0, canvas.width, canvas.height);

ctx.lineWidth = 1;
ctx.strokeStyle = "#0077FF"; //"rgb(0, 0, 0)";

ctx.clearRect(0, 0, canvas.width, canvas.height);
ctx.beginPath();

const sliceWidth = (canvas.width * 1.0) / dataArray.length;
let x = 0;

for (let i = 0; i < dataArray.length; i++) {
  const v = dataArray[i] / 128.0;
  // const y = (v * canvas.height) / 2 ;
  const y = (v * waveH) / 2 + initY;

  if (i === 0) {
    ctx.moveTo(x, y);
  } else {
    ctx.lineTo(x, y);
  }

  x += sliceWidth;
}

// ctx.lineTo(canvas.width, canvas.height / 2);
ctx.lineTo(canvas.width, waveH / 2) + initY;
ctx.stroke();

绘制音频环

ctx.clearRect(0, 0, canvas.width, canvas.height);
const cX = canvas.width / 2;
const cY = canvas.height / 2;
const r = 100;
const basel = Math.floor(data.length / 360);
for (var i = 0; i < 360; i++) {
  var value = (data[i * basel] / 60) * 8; //8;   
  // 模拟数据 value = Math.random() * 100
  ctx.beginPath();
  ctx.lineWidth = 2;
  ctx.strokeStyle = "#08a3ef";
  ctx.moveTo(cX, cY);
  //R * cos (PI/180*一次旋转的角度数) ,-R * sin (PI/180*一次旋转的角度数)
  ctx.lineTo(
    Math.cos(((i * 1) / 180) * Math.PI) * (r + value) + cX,
    -Math.sin(((i * 1) / 180) * Math.PI) * (r + value) + cY
  );
  ctx.stroke();
}
//画一个小圆,将线条覆盖
ctx.beginPath();
ctx.lineWidth = 1;
ctx.arc(cX, cY, r, 0, 2 * Math.PI, false);
ctx.fillStyle = "#000";
ctx.stroke();

ctx.fill();

绘制圆

/** 绘制圆 */
const drawCircle = (ctx, canvas, type, data) => {
  ctx.clearRect(0, 0, canvas.width, canvas.height);
  const cX = canvas.width / 2;
  const cY = canvas.height / 2;
  const r = 100;
  for (var i = 0; i < data.length; i += 4) {
    const v = (data[i] + data[i + 1] + data[i + 2] + data[i + 3]) / 4;
    const r = v * 0.5;
    // for (var i = 0; i < 254; i += 4) {
    //   const r = Math.random() * 100;
    ctx.beginPath();
    ctx.lineWidth = 1;
    ctx.arc(cX, cY, r, 0, 2 * Math.PI, false);
    ctx.strokeStyle = "#c46868";
    ctx.stroke();
  }
};

http://www.kler.cn/a/134692.html

相关文章:

  • LeetCode面试经典150题C++实现,更新中
  • Pytest-Bdd-Playwright 系列教程(9):datatable 参数的使用
  • C++ —— 哈希详解 - 开散列与闭散列
  • 蓝桥杯c++算法学习【2】之搜索与查找(九宫格、穿越雷区、迷宫与陷阱、扫地机器人:::非常典型的必刷例题!!!)
  • 如何使用 Web Scraper API 高效采集 Facebook 用户帖子信息
  • 记录使用documents4j来将word文件转化为pdf文件
  • PyInstaller 如何 将第三方库打包到可执行文件
  • spring boot @Autowired 注入的服务为null
  • 1.索引的本质
  • 斯坦福机器学习 Lecture2 (假设函数、参数、样本等等术语,还有批量梯度下降法、随机梯度下降法 SGD 以及它们的相关推导,还有正态方程)
  • P2444 [POI2000] 病毒
  • 1688商品详情原数据(2023年11月最新版)
  • MySQL集群高可用架构之MMM
  • 八股文-TCP的四次挥手
  • “智能与未来”2024世亚国际智能机器人展会(简称:世亚智博会)
  • Swin Transformer
  • 云端援手:智能枢纽应对数字资产挑战 ——华为云11.11应用集成管理与创新专区优惠限时购
  • 图神经网络:消息传递算法
  • 使用JDK自带java.util.logging.Logger引起的冲突问题
  • HTTP(Hypertext Transfer Protocol)协议
  • Cadence virtuoso drc lvs pex 无法输入
  • AlmaLinux download
  • 开发中遇到的问题
  • HarmonyOS开发(四):应用程序入口UIAbility
  • 小米手环8pro重新和手机配对解决办法
  • java: 无法访问org.mybatis.spring.annotation.MapperScan