当前位置: 首页 > article >正文

web端手机录音

可以将每个片段的音频,变成完整的mp3(或其他格式文件)

采样率使用16000(本代码中:其他采样率可能会导致噪音或者播放(具体采样率自行研究))

引入第三方依赖

<script src="https://cdnjs.cloudflare.com/ajax/libs/lamejs/1.2.0/lame.min.js"></script> 

webRecorder的js 代码

export function to16BitPCM(input) {
  const dataLength = input.length * (16 / 8);
  const dataBuffer = new ArrayBuffer(dataLength);
  const dataView = new DataView(dataBuffer);
  let offset = 0;
  for (let i = 0; i < input.length; i++, offset += 2) {
    const s = Math.max(-1, Math.min(1, input[i]));
    dataView.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
  }
  return dataView;
}
export function to16kHz(audioData, sampleRate = 44100) {
  const data = new Float32Array(audioData);
  const fitCount = Math.round(data.length * (16000 / sampleRate));
  const newData = new Float32Array(fitCount);
  const springFactor = (data.length - 1) / (fitCount - 1);
  newData[0] = data[0];
  for (let i = 1; i < fitCount - 1; i++) {
    const tmp = i * springFactor;
    const before = Math.floor(tmp).toFixed();
    const after = Math.ceil(tmp).toFixed();
    const atPoint = tmp - before;
    newData[i] = data[before] + (data[after] - data[before]) * atPoint;
  }
  newData[fitCount - 1] = data[data.length - 1];
  return newData;
}

const audioWorkletCode = `
  class MyProcessor extends AudioWorkletProcessor {
    constructor(options) {
      super(options);
      this.audioData = [];
      this.audioDataFloat32 = [];
      this.sampleCount = 0;
      this.bitCount = 0;
      this.preTime = 0;
    }
  
    process(inputs) {
      // 去处理音频数据
      // eslint-disable-next-line no-undef
      if (inputs[0][0]) {
        const output = ${to16kHz}(inputs[0][0], sampleRate);
        this.sampleCount += 1;
        const audioData = ${to16BitPCM}(output);
        this.bitCount += 1;
        const data = [...new Int16Array(audioData.buffer)];
        this.audioData = this.audioData.concat(data);

        const dataFloat32 = [...output];
        this.audioDataFloat32 = this.audioDataFloat32.concat(dataFloat32);

        if (new Date().getTime() - this.preTime > 100) {
          this.port.postMessage({
            audioData: new Int16Array(this.audioData),
            audioDataFloat32: new Float32Array(this.audioDataFloat32),
            sampleCount: this.sampleCount,
            bitCount: this.bitCount
          });
          this.preTime = new Date().getTime();
          this.audioData = [];
          this.audioDataFloat32 = [];
        }
          return true;
        }
    }
  }
  
  registerProcessor('my-processor', MyProcessor);
  `;
const TAG = 'WebRecorder';
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia
  || navigator.mozGetUserMedia || navigator.msGetUserMedia;


export default class WebRecorder {
  constructor(requestId, params, isLog) {
    this.audioData = [];
    this.audioDataFloat32 = [];
    this.allAudioData = [];
    this.stream = null;
    this.audioContext = null;
    this.requestId = requestId;
    this.frameTime = [];
    this.frameCount = 0;
    this.sampleCount = 0;
    this.bitCount = 0;
    this.mediaStreamSource = null;
    this.isLog = isLog;
    this.params = params;
  }
  static isSupportMediaDevicesMedia() {
    return !!(navigator.getUserMedia || (navigator.mediaDevices && navigator.mediaDevices.getUserMedia));
  }
  static isSupportUserMediaMedia() {
    return !!navigator.getUserMedia;
  }
  static isSupportAudioContext() {
    return typeof AudioContext !== 'undefined' || typeof webkitAudioContext !== 'undefined';
  }
  static isSupportMediaStreamSource(requestId, audioContext) {
    return typeof audioContext.createMediaStreamSource === 'function';
  }
  static isSupportAudioWorklet(audioContext) {
    return audioContext.audioWorklet && typeof audioContext.audioWorklet.addModule === 'function'
      && typeof AudioWorkletNode !== 'undefined';
  }
  static isSupportCreateScriptProcessor(requestId, audioContext) {
    return typeof audioContext.createScriptProcessor === 'function';
  }
  start() {
    this.frameTime = [];
    this.frameCount = 0;
    this.allAudioData = [];
    this.audioData = [];
    this.sampleCount = 0;
    this.bitCount = 0;
    this.getDataCount = 0;
    this.audioContext = null;
    this.mediaStreamSource = null;
    this.stream = null;
    this.preTime = 0;
    try {
      if (WebRecorder.isSupportAudioContext()) {
        this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
      } else {
        this.isLog && console.log(this.requestId, '浏览器不支持AudioContext', TAG);
        this.OnError('浏览器不支持AudioContext');
      }
    } catch (e) {
      this.isLog && console.log(this.requestId, '浏览器不支持webAudioApi相关接口', e, TAG);
      this.OnError('浏览器不支持webAudioApi相关接口');
    }
    this.getUserMedia(this.requestId, this.getAudioSuccess, this.getAudioFail);
  }
  stop() {
    if (!(/Safari/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgent))) {
      this.audioContext && this.audioContext.suspend();
    }
    this.audioContext && this.audioContext.suspend();
    this.isLog && console.log(this.requestId, `webRecorder stop ${this.sampleCount}/${this.bitCount}/${this.getDataCount}`, JSON.stringify(this.frameTime), TAG);
    this.OnStop(this.allAudioData);
  }
  destroyStream() {
    // 关闭通道
    if (this.stream) {
      this.stream.getTracks().map((val) => {
        val.stop();
      });
      this.stream = null;
    }
  }
  async getUserMedia(requestId, getStreamAudioSuccess, getStreamAudioFail) {
    let audioOption = {
      echoCancellation: true,
    };
    if (this.params && String(this.params.echoCancellation) === 'false') { // 关闭回声消除
      audioOption = {
        echoCancellation: false,
      };
    }
    const mediaOption = {
      audio: audioOption,
      video: false,
    };
    // 获取用户的麦克风
    if (WebRecorder.isSupportMediaDevicesMedia()) {
      navigator.mediaDevices
        .getUserMedia(mediaOption)
        .then(stream => {
          this.stream = stream;
          getStreamAudioSuccess.call(this, requestId, stream);
        })
        .catch(e => {
          getStreamAudioFail.call(this, requestId, e);
        });
    } else if (WebRecorder.isSupportUserMediaMedia()) {
      navigator.getUserMedia(mediaOption,
        stream => {
          this.stream = stream;
          getStreamAudioSuccess.call(this, requestId, stream);
        },
        function (err) {
          getStreamAudioFail.call(this, requestId, err);
        }
      );
    } else {
      if (navigator.userAgent.toLowerCase().match(/chrome/) && location.origin.indexOf('https://') < 0) {
        this.isLog && console.log(this.requestId, 'chrome下获取浏览器录音功能,因为安全性问题,需要在localhost或127.0.0.1或https下才能获取权限', TAG);
        this.OnError('chrome下获取浏览器录音功能,因为安全性问题,需要在localhost或127.0.0.1或https下才能获取权限');
      } else {
        this.isLog && console.log(this.requestId, '无法获取浏览器录音功能,请升级浏览器或使用chrome', TAG);
        this.OnError('无法获取浏览器录音功能,请升级浏览器或使用chrome');
      }
      this.audioContext && this.audioContext.close();
    }
  }
  async getAudioSuccess(requestId, stream) {
    if (!this.audioContext) {
      return false;
    }
    if (this.mediaStreamSource) {
      this.mediaStreamSource.disconnect();
      this.mediaStreamSource = null;
    }
    this.audioTrack = stream.getAudioTracks()[0];
    const mediaStream = new MediaStream();
    mediaStream.addTrack(this.audioTrack);
    this.mediaStreamSource = this.audioContext.createMediaStreamSource(mediaStream);
    if (WebRecorder.isSupportMediaStreamSource(requestId, this.audioContext)) {
      if (WebRecorder.isSupportAudioWorklet(this.audioContext)) { // 不支持 AudioWorklet 降级
        this.audioWorkletNodeDealAudioData(this.mediaStreamSource, requestId);
      } else {
        this.scriptNodeDealAudioData(this.mediaStreamSource, requestId);
      }
    } else { // 不支持 MediaStreamSource
      this.isLog && console.log(this.requestId, '不支持MediaStreamSource', TAG);
      this.OnError('不支持MediaStreamSource');
    }
  }
  getAudioFail(requestId, err) {
    if (err && err.err && err.err.name === 'NotAllowedError') {
      this.isLog && console.log(requestId, '授权失败', JSON.stringify(err.err), TAG);
    }
    this.isLog && console.log(this.requestId, 'getAudioFail', JSON.stringify(err), TAG);
    this.OnError(err);
    this.stop();
  }
  scriptNodeDealAudioData(mediaStreamSource, requestId) {
    if (WebRecorder.isSupportCreateScriptProcessor(requestId, this.audioContext)) {
      // 创建一个音频分析对象,采样的缓冲区大小为0(自动适配),输入和输出都是单声道
      const scriptProcessor = this.audioContext.createScriptProcessor(1024, 1, 1);
      // 连接
      this.mediaStreamSource && this.mediaStreamSource.connect(scriptProcessor);
      scriptProcessor && scriptProcessor.connect(this.audioContext.destination);
      scriptProcessor.onaudioprocess = (e) => {
        this.getDataCount += 1;
        // 去处理音频数据
        const inputData = e.inputBuffer.getChannelData(0);
        const output = to16kHz(inputData, this.audioContext.sampleRate);
        const audioData = to16BitPCM(output);
        this.audioDataFloat32.push(...output);
        this.audioData.push(...new Int16Array(audioData.buffer));
        this.allAudioData.push(...new Int16Array(audioData.buffer));
        if (new Date().getTime() - this.preTime > 100) {
          this.frameTime.push(`${Date.now()}-${this.frameCount}`);
          this.frameCount += 1;
          this.preTime = new Date().getTime();
          const audioDataArray = new Int16Array(this.audioData);
          this.OnReceivedData(audioDataArray);
          this.audioData = [];
          this.sampleCount += 1;
          this.bitCount += 1;
        }
      };
    } else { // 不支持
      this.isLog && console.log(this.requestId, '不支持createScriptProcessor', TAG);
    }
  }
  async audioWorkletNodeDealAudioData(mediaStreamSource, requestId) {
    try {
      const audioWorkletBlobURL = window.URL.createObjectURL(new Blob([audioWorkletCode], { type: 'text/javascript' }));
      await this.audioContext.audioWorklet.addModule(audioWorkletBlobURL);
      const myNode = new AudioWorkletNode(this.audioContext, 'my-processor', { numberOfInputs: 1, numberOfOutputs: 1, channelCount: 1 });
      myNode.onprocessorerror = (event) => {
        // 降级
        this.scriptNodeDealAudioData(mediaStreamSource, this.requestId);
        return false;
      }
      myNode.port.onmessage = (event) => {
        console.log(event)
        this.frameTime.push(`${Date.now()}-${this.frameCount}`);
        this.OnReceivedData(event.data.audioData);
        this.frameCount += 1;
        this.allAudioData.push(...event.data.audioData);
        this.sampleCount = event.data.sampleCount;
        this.bitCount = event.data.bitCount;
      };
      myNode.port.onmessageerror = (event) => {
        // 降级
        this.scriptNodeDealAudioData(mediaStreamSource, requestId);
        return false;
      }
      mediaStreamSource && mediaStreamSource.connect(myNode).connect(this.audioContext.destination);
    } catch (e) {
      this.isLog && console.log(this.requestId, 'audioWorkletNodeDealAudioData catch error', JSON.stringify(e), TAG);
      this.OnError(e);
    }
  }
  // 获取音频数据
  OnReceivedData(data) { }
  OnError(res) { }
  OnStop(res) { }
}
typeof window !== 'undefined' && (window.WebRecorder = WebRecorder);

代码,里面有一些测试demo(不一定能用),看主要代码即可

<template>
  <div style="padding: 20px">
    <h3>录音上传</h3>

    <div style="font-size: 14px">
      <el-button type="primary" @click="handleStart">开始录音</el-button>
      <el-button type="info" @click="handlePause">暂停录音</el-button>
      <el-button type="info" @click="handlePlay">播放录音</el-button>
      <el-button type="info" @click="makemp3">生成MP3</el-button>
    </div>
  </div>
</template>

<script setup>
import lamejs from "lamejs";
import webRecorder from "./assets/js/index";

import MPEGMode from "lamejs/src/js/MPEGMode";
import BitStream from "lamejs/src/js/BitStream";

// window.MPEGMode = MPEGMode;
// window.Lame = Lame;
// window.BitStream = BitStream;

const recorder = new WebRecorder();

const audioData = [];

function int8ArrayToMp3(int8ArrayData, sampleRate) {
  const numChannels = 1;
  const bufferSize = 4096;
  const encoder = new lamejs.Mp3Encoder(numChannels, sampleRate, 128);
  let remainingData = int8ArrayData;
  let mp3Data = [];
  while (remainingData.length > 0) {
    const chunkSize = Math.min(bufferSize, remainingData.length);
    const chunk = remainingData.subarray(0, chunkSize);
    const leftChannel = new Int16Array(chunk.length);

    for (let i = 0; i < chunk.length; i++) {
      leftChannel[i] = chunk[i];
    }

    const mp3buffer = encoder.encodeBuffer(leftChannel);
    if (mp3buffer.length > 0) {
      mp3Data.push(new Uint8Array(mp3buffer));
    }
    remainingData = remainingData.subarray(chunkSize);
  }
  const mp3buffer = encoder.flush();

  if (mp3buffer.length > 0) {
    mp3Data.push(new Uint8Array(mp3buffer));
  }

  return new Blob(mp3Data, { type: "audio/mp3" });
}

function int8ArrayToWavURL(int8ArrayData) {
  const numChannels = 1; // 单声道
  const sampleRate = 44100; // 采样率
  const bytesPerSample = 2; // 16-bit audio
  const byteRate = sampleRate * numChannels * bytesPerSample;
  const dataLength = int8ArrayData.length * bytesPerSample;

  const header = createWavHeader(numChannels, sampleRate, byteRate, dataLength);
  const wavBuffer = new Uint8Array(
    header.buffer.byteLength + int8ArrayData.length * bytesPerSample
  );
  wavBuffer.set(new Uint8Array(header.buffer), 0);

  for (let i = 0; i < int8ArrayData.length; i++) {
    const value = int8ArrayData[i];
    wavBuffer[i * 2 + 44] = value & 0xff;
    wavBuffer[i * 2 + 45] = (value >> 8) & 0xff;
  }

  const blob = new Blob([wavBuffer], { type: "audio/wav" });
  return URL.createObjectURL(blob);
}

function createWavHeader(numChannels, sampleRate, byteRate, dataLength) {
  const buffer = new ArrayBuffer(44);
  const view = new DataView(buffer);

  // RIFF chunk descriptor
  writeString(view, 0, "RIFF");
  view.setUint32(4, 36 + dataLength, true);
  writeString(view, 8, "WAVE");

  // fmt sub-chunk
  writeString(view, 12, "fmt ");
  view.setUint32(16, 16, true);
  view.setUint16(20, 0x0001, true); // WAVE_FORMAT_PCM
  view.setUint16(22, numChannels, true);
  view.setUint32(24, sampleRate, true);
  view.setUint32(28, byteRate, true);
  view.setUint16(32, 2, true); // BLOCK_ALIGN
  view.setUint16(34, 16, true);

  // data sub-chunk
  writeString(view, 36, "data");
  view.setUint32(40, dataLength, true);

  return view;
}

function writeString(view, offset, string) {
  for (let i = 0; i < string.length; i++) {
    view.setUint8(offset + i, string.charCodeAt(i));
  }
}

// 获取采集到的音频数据
// recorder.OnReceivedData = (data) => {
//   // console.log(data);
//   audioData.push(...data);
// };

// 获取采集到的音频数据
recorder.OnReceivedData = (data) => {
  // console.log(data);
  handlePlay2(data);
};



const handleStart = () => {
  recorder.start();
};

const handlePause = () => {
  recorder.stop();
};

function downloadMP3(url, filename) {
  const a = document.createElement("a");
  a.href = url;
  a.download = filename || "audio.mp3";
  document.body.appendChild(a);
  a.click();
  document.body.removeChild(a);
}


let i = 0;
let tempAudioBuffer = []; // 用于存储累积的音频数据
let startTime = null; // 记录开始累积的时间
const handlePlay2 = (audioData) => {
  i += 1;
  // 将音频数据转换为 Int16Array
  //const int16ArrayAudioData = new Int16Array(audioData);
  // 如果这是第一次接收数据,记录开始时间
  if (startTime === null) {
     startTime = Date.now();
  }
  // 将新接收到的数据添加到缓冲区
   tempAudioBuffer.push(...audioData);
  // 检查是否已经累积了3秒的数据
  const currentTime = Date.now();
  if (currentTime - startTime >= 5000) { // 5000毫秒即5秒
    startTime = Date.now();
    let copiedArray = [...tempAudioBuffer];
    tempAudioBuffer=[];
    processAudioBuffer2(copiedArray);
      // // 重置变量以准备下一次累积
      // audioBuffer = [];
  }
};
const processAudioBuffer2 = (audioBuffer) => {
  // console.log(audioData);
  // 转 wav
  const int16ArrayAudioData = new Int16Array(audioBuffer);
  //console.log("8位录音数据:",int16ArrayAudioData);
  var mp3Data = [];
  // var audioData; // 假设这里是你的 PCM 音频数据
  var sampleRate = 16000; // 通常的采样率
  const encoder = new lamejs.Mp3Encoder(1, sampleRate, 128);
  var mp3Tmp = encoder.encodeBuffer(int16ArrayAudioData);
  mp3Data.push(mp3Tmp);
  mp3Tmp = encoder.flush(); // Write last data to the output data, too
  mp3Data.push(mp3Tmp); // mp3Data contains now the complete mp3Data
  var blob = new Blob(mp3Data, { type: "audio/mp3" });
  var url = URL.createObjectURL(blob);
  var a = document.createElement("a");
  a.href = url;
  a.download = "recording.mp3";
  document.body.appendChild(a);
  a.click();
};

const handlePlay = () => {
  // console.log(audioData);
  // 转 wav
  const int16ArrayAudioData = new Int16Array(audioData);
  console.log("8位录音数据:",int16ArrayAudioData);
  var mp3Data = [];
  // var audioData; // 假设这里是你的 PCM 音频数据
  var sampleRate = 16000; // 通常的采样率
  const encoder = new lamejs.Mp3Encoder(1, sampleRate, 128);
  var mp3Tmp = encoder.encodeBuffer(int16ArrayAudioData);
  mp3Data.push(mp3Tmp);

  mp3Tmp = encoder.flush(); // Write last data to the output data, too
  mp3Data.push(mp3Tmp); // mp3Data contains now the complete mp3Data

  var blob = new Blob(mp3Data, { type: "audio/mp3" });
  var url = URL.createObjectURL(blob);
  var a = document.createElement("a");
  a.href = url;
  a.download = "recording.mp3";
  document.body.appendChild(a);
  a.click();
};

const makemp3 = () => {
  var mp3Data = [];
  var mp3encoder = new lamejs.Mp3Encoder(1, 44100, 128); // mono 44.1kHz encode to 128kbps

  // 生成一秒钟的正弦波样本
  var sampleRate = 44100;
  var frequency = 440; // A4 音符
  var samples = new Int16Array(sampleRate);
  for (var i = 0; i < sampleRate; i++) {
      samples[i] = 32767 * Math.sin(2 * Math.PI * frequency * (i / sampleRate)); // 生成正弦波
  }
  console.log("16位正弦数据:",samples);

  var mp3Tmp = mp3encoder.encodeBuffer(samples); // encode mp3
  mp3Data.push(mp3Tmp); // Push encode buffer to mp3Data variable

  mp3Tmp = mp3encoder.flush(); // Write last data to the output data, too
  mp3Data.push(mp3Tmp); // mp3Data contains now the complete mp3Data
  


  var blob = new Blob(mp3Data, { type: "audio/mp3" });
  var url = URL.createObjectURL(blob);
  var a = document.createElement("a");
  a.href = url;
  a.download = "recording.mp3";
  document.body.appendChild(a);
  a.click();
}
</script>


http://www.kler.cn/a/391962.html

相关文章:

  • ❤React-React 组件通讯
  • unity基础,点乘叉乘。
  • 【C++】类与对象的基础概念
  • [CKS] K8S ServiceAccount Set Up
  • 在Flutter中,禁止侧滑的方法
  • Spark 的容错机制:保障数据处理的稳定性与高效性
  • 信息化运维方案,实施方案,开发方案,信息中心安全运维资料(软件资料word)
  • [2024最新] macOS 发起 Bilibili 直播(不使用 OBS)
  • 进程信息和定时任务
  • 数学建模学习(136):使用Python基于Fuzzy WSM、Fuzzy WPM、Fuzzy WASPAS的多准则决策分析
  • Elasticsearch 和 Kibana 8.16:Kibana 获得上下文和 BBQ 速度并节省开支!
  • 使用Spring AI中的RAG技术,实现私有业务领域的大模型系统
  • SpringBoot自定义Starter指南
  • MyBatisPlus(Spring Boot版)的基本使用
  • gpu-V100显卡相关知识
  • 使用多种机器学习调参模型进行二分类建模的全流程,代做分析辅导
  • OceanStor Pacific系列 8.1.0 功能架构
  • 设计模式-七个基本原则之一-里氏替换原则
  • 初始JavaEE篇 —— 网络编程(2):了解套接字,从0到1实现回显服务器
  • 机器人操作臂逆运动学
  • kafka消费数据太慢了,给优化下
  • labview连接sql server数据库
  • MySQL远程连接错误解决:Host is not allowed to connect to this MySQL server
  • 【Rust中的链表实现】
  • 【大数据测试HBase数据库 — 详细教程(含实例与监控调优)】
  • AI编程工具市场是一个庞大且不断增长的市场