是否可以合并两个音频字符串以创建唯一的音频文件?

Its possible to merge two audio #39;base64data#39; strings to create an unique audio file?(是否可以合并两个音频字符串以创建唯一的音频文件?)

本文介绍了是否可以合并两个音频字符串以创建唯一的音频文件?的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

是否可以合并两个音频‘base 64data’字符串以创建唯一的音频文件?

我的字符串中有两个循环音频Base64 wav,如下所示:

data:audio/x-wav;base64,UklGRuIfQVZFZm1R7SH$WP90AhICLwKT...

我猜我在做一件非常愚蠢的事情,但我想知道这是否可能。

我正在尝试将这两个wav合并成一个可以在音频HTML元素中播放的wav。 我想提取秒的Base64数据以合并为1,然后播放它,但导航器返回错误。

这是我的脚本代码:

var audio_Data1 = base64_audio1;
var audio_Data2 = base64_audio1.split(',')[1];
var audio_final = audio_Data1 + audio_Data2;

audioControl.src = audio_final;
audioControl.play();

感谢您的建议 谢谢!


编辑:

我正在尝试解码Base64区块以缓冲数据,并合并缓冲区以便再次编码为Base64。

问题:返回的Base64是";aa==";

我认为&aa==";是0字节。当我连接缓冲区时,我正在做一些不好的事情:

var myB64Data  = myB64WavString.split(',');
var myB64Chunk = myB64Data[1];
var myBuffer1 =  base64ToArrayBuffer(myB64Chunk);
var myBuffer2 = ""; //Same process

var myFinalBuffer = [];
myFinalBuffer.push.apply(myFinalBuffer, myBuffer1);
myFinalBuffer.push.apply(myFinalBuffer, myBuffer2); 

var myFinalB64 = 'data:audio/x-wav;base64,' + arrayBufferToBase64(myFinalBuffer); 

console.log( myFinalB64 );

控制台返回它的下一个:";data:dio/x-wav;Base64,AA==";

我的用于编码/解码的Java脚本工作函数:

function base64ToArrayBuffer(base64) {
   var binary_string =  window.atob(base64);
   var len = binary_string.length;
   var bytes = new Uint8Array( len );
   for (var i = 0; i < len; i++)        {
       bytes[i] = binary_string.charCodeAt(i);
   }
   return bytes.buffer;
}

function arrayBufferToBase64( buffer ) {
   var binary = '';
   var bytes = new Uint8Array( buffer );
   var len = bytes.byteLength;
   for (var i = 0; i < len; i++) {
       binary += String.fromCharCode( bytes[ i ] );
   }
   return window.btoa( binary );
}

推荐答案

我解决了!(经过两天的艰苦努力和抄袭Convert AudioBuffer to ArrayBuffer / Blob for WAV Download的答案) 我希望它能帮助您省去我为此开发的所有工作;)

var myB64Data  = myB64WavString.split(',');
var myB64Chunk = myB64Data[1];
var myBuffer1 =  base64ToArrayBuffer(myB64Chunk);
var myBuffer2 = ""; //Same process

var myFinalBuffer = appendBuffer (myBuffer1, myBuffer2);

var myFinalBuffer = getWavBytes( arrBytesFinal, {
  isFloat: false,       // floating point or 16-bit integer
  numChannels: 2,     //1 for mono recordings
  sampleRate: 48000, //Depends on your file audio bitrate !! 32000
})              


var myFinalB64 = 'data:audio/x-wav;base64,' + arrayBufferToBase64(myFinalBuffer); 

console.log( myFinalB64 );

//Then you can asign to an audio HTML control

myAudioControl.src = myFinalB64;

其他功能:

function appendBuffer(buffer1, buffer2) {
  var tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength);
  tmp.set(new Uint8Array(buffer1), 0);
  tmp.set(new Uint8Array(buffer2), buffer1.byteLength);
  return tmp;
};
 

function getWavBytes(buffer, options) {
  const type = options.isFloat ? Float32Array : Uint16Array
  const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT

  const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }))
  const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);

  // prepend header, then add pcmBytes
  wavBytes.set(headerBytes, 0)
  wavBytes.set(new Uint8Array(buffer), headerBytes.length)

  return wavBytes
}

// adapted from https://gist.github.com/also/900023
// returns Uint8Array of WAV header bytes
function getWavHeader(options) {
  const numFrames =      options.numFrames
  const numChannels =    options.numChannels || 2
  const sampleRate =     options.sampleRate || 44100
  const bytesPerSample = options.isFloat? 4 : 2
  const format =         options.isFloat? 3 : 1

  const blockAlign = numChannels * bytesPerSample
  const byteRate = sampleRate * blockAlign
  const dataSize = numFrames * blockAlign

  const buffer = new ArrayBuffer(44)
  const dv = new DataView(buffer)

  let p = 0

  function writeString(s) {
    for (let i = 0; i < s.length; i++) {
      dv.setUint8(p + i, s.charCodeAt(i))
    }
    p += s.length
  }

  function writeUint32(d) {
    dv.setUint32(p, d, true)
    p += 4
  }

  function writeUint16(d) {
    dv.setUint16(p, d, true)
    p += 2
  }

  writeString('RIFF')              // ChunkID
  writeUint32(dataSize + 36)       // ChunkSize
  writeString('WAVE')              // Format
  writeString('fmt ')              // Subchunk1ID
  writeUint32(16)                  // Subchunk1Size
  writeUint16(format)              // AudioFormat
  writeUint16(numChannels)         // NumChannels
  writeUint32(sampleRate)          // SampleRate
  writeUint32(byteRate)            // ByteRate
  writeUint16(blockAlign)          // BlockAlign
  writeUint16(bytesPerSample * 8)  // BitsPerSample
  writeString('data')              // Subchunk2ID
  writeUint32(dataSize)            // Subchunk2Size

  return new Uint8Array(buffer)
}

这篇关于是否可以合并两个音频字符串以创建唯一的音频文件?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持编程学习网!

本文标题为:是否可以合并两个音频字符串以创建唯一的音频文件?