Skip to content

Commit

Permalink
improve: Support Stereo audio in export method (#78)
Browse files Browse the repository at this point in the history
Resolves: #21, #73

Changes:

- format beatBuilder.html with Prettier
- rewrite _interleave method to support multiple channels
- add test for _interleave method
- take channels number & actual audio sampleRate (not Crunker default one) into account when exporting to wave
- organize values & offsets inside _writeHeaders function so it is correctly calculated for n channels and easier to understand where the values come from.
- turns out that merge and concatenate work well for stereo files, it's just the export brings it back to mono.
  • Loading branch information
MikeyZat authored Sep 11, 2022
1 parent e713c71 commit bb377d0
Show file tree
Hide file tree
Showing 4 changed files with 108 additions and 75 deletions.
41 changes: 21 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,54 +100,55 @@ You may optionally provide an object with a `sampleRate` key, but it will defaul

## crunker.fetchAudio(songURL, anotherSongURL)

Fetch one or more audio files.
Returns: an array of audio buffers in the order they were fetched.
Fetch one or more audio files.\
**Returns:** an array of audio buffers in the order they were fetched.

## crunker.mergeAudio(arrayOfBuffers);

Merge two or more audio buffers.
Returns: a single AudioBuffer object.
Merge two or more audio buffers.\
**Returns:** a single `AudioBuffer` object.

## crunker.concatAudio(arrayOfBuffers);

Concatenate two or more audio buffers in the order specified.
Returns: a single AudioBuffer object.
Concatenate two or more audio buffers in the order specified.\
**Returns:** a single `AudioBuffer` object.

## crunker.padAudio(buffer, padStart, seconds);

Pad the audio with silence, at the beginning, the end, or any specified points through the audio.
Returns: a single AudioBuffer object.
Pad the audio with silence, at the beginning, the end, or any specified points through the audio.\
**Returns:** a single `AudioBuffer` object.

## crunker.export(buffer, type);

Export an audio buffers with MIME type option.
Type: `'audio/mp3', 'audio/wav', 'audio/ogg'`.
Returns: an object containing the blob object, url, and an audio element object.
Export an audio buffers with MIME type option.\
**Type:** e.g. `'audio/mp3', 'audio/wav', 'audio/ogg'`.
**IMPORTANT**: the MIME type does **not** change the actual file format. It will always be a `WAVE` file under the hood.\
**Returns:** an object containing the blob object, url, and an audio element object.

## crunker.download(blob, filename);

Automatically download an exported audio blob with optional filename.
Filename: String not containing the .mp3, .wav, or .ogg file extension.
Returns: the HTMLAnchorElement element used to simulate the automatic download.
Automatically download an exported audio blob with optional filename.\
**Filename:** String **not** containing the .mp3, .wav, or .ogg file extension.\
**Returns:** the `HTMLAnchorElement` element used to simulate the automatic download.

## crunker.play(buffer);

Starts playing the exported audio buffer in the background.
Returns: the HTMLAudioElement.
Starts playing the exported audio buffer in the background.\
**Returns:** the `HTMLAudioElement`.

## crunker.notSupported(callback);

Execute custom code if Web Audio API is not supported by the users browser.
Returns: The callback function.
Execute custom code if Web Audio API is not supported by the users browser.\
**Returns:** The callback function.

# Properties

For more detailed API documentation, view the Typescript typings.

## crunker.context

Access the [AudioContext](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext) used internally by a given Crunker.
Returns: [AudioContext](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext).
Access the [AudioContext](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext) used internally by a given Crunker.\
**Returns:** [AudioContext](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext).

# License

Expand Down
83 changes: 46 additions & 37 deletions examples/client/beatBuilder.html
Original file line number Diff line number Diff line change
@@ -1,45 +1,54 @@
<!DOCTYPE html>
<html lang="en">
<head>
<head>
<title>Crunker - Beat machine example</title>
<script src="https://unpkg.com/crunker@latest/dist/crunker.js"></script>
<script type="text/javascript">
const mergeOneLine = async (crunker, buffer, times) => {
const audios = times.map(t => crunker.padAudio(buffer, 0, t))
return crunker.mergeAudio(audios)
}

const beatBuilder = async (crunker, beatConf, buffers, beatTiming) =>{
const beatLines = await Promise.all(buffers.map((_,i) => {
return mergeOneLine(crunker, buffers[i], beatTiming[i].map(bt => bt*beatConf.timing))
}))
const beat = await crunker.mergeAudio(beatLines)
const beatTimingBaseArray = Array.from(Array(beatConf.repeats).keys()) //will generate an Array [0,1,2,3,4,5,...,beatConf.repeats]
const beatTiming = beatTimingBaseArray.map(e => beatConf.delayStart + e * beatConf.beats * beatConf.timing) //will convert the array to the exact timestamps where the beat should restart
return await mergeOneLine(crunker, beat, beatTiming)
}

window.onload = async function(){
const crunker = new Crunker.default({sampleRate:96000});
const [hihats] = await crunker.fetchAudio('./drumms_Hi-Hats_Open_Hat.mp3'); //use your own sound here
const [dirtBase] = await crunker.fetchAudio('./drumms_dirt_base.mp3'); //use your own sound here

//add pause to audio
const beat = await beatBuilder(crunker, {delayStart: 1, timing:0.5, repeats: 4, beats: 4},
[dirtBase, hihats], [
[0, 1.5, 2],
[1, 3]
])


const merged = await crunker.mergeAudio([beat])
crunker.play(merged);
const output = await crunker.export(merged, 'audio/ogg');
await crunker.download(output.blob, 'merged');
}
const mergeOneLine = async (crunker, buffer, times) => {
const audios = times.map((t) => crunker.padAudio(buffer, 0, t));
return crunker.mergeAudio(audios);
};

const beatBuilder = async (crunker, beatConf, buffers, beatTiming) => {
const beatLines = await Promise.all(
buffers.map((_, i) => {
return mergeOneLine(
crunker,
buffers[i],
beatTiming[i].map((bt) => bt * beatConf.timing)
);
})
);
const beat = await crunker.mergeAudio(beatLines);
const beatTimingBaseArray = Array.from(Array(beatConf.repeats).keys()); //will generate an Array [0,1,2,3,4,5,...,beatConf.repeats]
const beatTiming = beatTimingBaseArray.map((e) => beatConf.delayStart + e * beatConf.beats * beatConf.timing); //will convert the array to the exact timestamps where the beat should restart
return await mergeOneLine(crunker, beat, beatTiming);
};

window.onload = async function () {
const crunker = new Crunker.default({ sampleRate: 96000 });
const [hihats] = await crunker.fetchAudio('./drumms_Hi-Hats_Open_Hat.mp3'); //use your own sound here
const [dirtBase] = await crunker.fetchAudio('./drumms_dirt_base.mp3'); //use your own sound here

//add pause to audio
const beat = await beatBuilder(
crunker,
{ delayStart: 1, timing: 0.5, repeats: 4, beats: 4 },
[dirtBase, hihats],
[
[0, 1.5, 2],
[1, 3],
]
);

const merged = await crunker.mergeAudio([beat]);
crunker.play(merged);
const output = await crunker.export(merged, 'audio/ogg');
await crunker.download(output.blob, 'merged');
};
</script>
</head>
<body>
</head>
<body>
<h1>Crunker - Beat machine example</h1>
</body>
</body>
</html>
49 changes: 31 additions & 18 deletions src/crunker.ts
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ export default class Crunker {
*/
export(buffer: AudioBuffer, type: string = 'audio/wav'): ExportedCrunkerAudio {
const recorded = this._interleave(buffer);
const dataview = this._writeHeaders(recorded);
const dataview = this._writeHeaders(recorded, buffer.numberOfChannels, buffer.sampleRate);
const audioBlob = new Blob([dataview], { type });

return {
Expand Down Expand Up @@ -295,25 +295,34 @@ export default class Crunker {
*
* @internal
*/
private _writeHeaders(buffer: Float32Array): DataView {
const arrayBuffer = new ArrayBuffer(44 + buffer.length * 2);
private _writeHeaders(buffer: Float32Array, numOfChannels: number, sampleRate: number): DataView {
const bitDepth = 16;
const bytesPerSample = bitDepth / 8;
const sampleSize = numOfChannels * bytesPerSample;

const fileHeaderSize = 8;
const chunkHeaderSize = 36;
const chunkDataSize = buffer.length * bytesPerSample;
const chunkTotalSize = chunkHeaderSize + chunkDataSize;

const arrayBuffer = new ArrayBuffer(fileHeaderSize + chunkTotalSize);
const view = new DataView(arrayBuffer);

this._writeString(view, 0, 'RIFF');
view.setUint32(4, 32 + buffer.length * 2, true);
view.setUint32(4, chunkTotalSize, true);
this._writeString(view, 8, 'WAVE');
this._writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
view.setUint16(22, 2, true);
view.setUint32(24, this._sampleRate, true);
view.setUint32(28, this._sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
view.setUint16(22, numOfChannels, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * sampleSize, true);
view.setUint16(32, sampleSize, true);
view.setUint16(34, bitDepth, true);
this._writeString(view, 36, 'data');
view.setUint32(40, buffer.length * 2, true);
view.setUint32(40, chunkDataSize, true);

return this._floatTo16BitPCM(view, buffer, 44);
return this._floatTo16BitPCM(view, buffer, fileHeaderSize + chunkHeaderSize);
}

/**
Expand Down Expand Up @@ -347,18 +356,22 @@ export default class Crunker {
* @internal
*/
private _interleave(input: AudioBuffer): Float32Array {
const buffer = input.getChannelData(0),
length = buffer.length * 2,
result = new Float32Array(length);
const channels = Array.from({ length: input.numberOfChannels }, (_, i) => i);
const length = channels.reduce((prev, channelIdx) => prev + input.getChannelData(channelIdx).length, 0);
const result = new Float32Array(length);

let index = 0,
inputIndex = 0;
let index = 0;
let inputIndex = 0;

// for 2 channels its like: [L[0], R[0], L[1], R[1], ... , L[n], R[n]]
while (index < length) {
result[index++] = buffer[inputIndex];
result[index++] = buffer[inputIndex];
channels.forEach((channelIdx) => {
result[index++] = input.getChannelData(channelIdx)[inputIndex];
});

inputIndex++;
}

return result;
}

Expand Down
10 changes: 10 additions & 0 deletions test/test.js
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,14 @@ describe('Crunker', () => {
it('exports an object with url', () => {
expect(audio.export(buffers[0])).to.have.property('url');
});

it('interleaves two channels', () => {
const audioInput = buffers[0];
const interleaved = audio._interleave(audioInput);
const left = audioInput.getChannelData(0);
const right = audioInput.getChannelData(1);

expect(interleaved.length).to.equal(left.length + right.length);
expect([interleaved[0], interleaved[1]]).to.have.same.members([left[0], right[0]]);
});
});

0 comments on commit bb377d0

Please sign in to comment.