diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt
index 9769f48e..7e01c5fe 100644
--- a/.github/actions/spelling/allow.txt
+++ b/.github/actions/spelling/allow.txt
@@ -28,4 +28,5 @@ rnaa
tada
vec
+rnaa
diff --git a/apps/common-app/src/examples/SharedUtils/soundEngines/Clap.ts b/apps/common-app/src/examples/SharedUtils/soundEngines/Clap.ts
index 77cc6940..6a7aa5e8 100644
--- a/apps/common-app/src/examples/SharedUtils/soundEngines/Clap.ts
+++ b/apps/common-app/src/examples/SharedUtils/soundEngines/Clap.ts
@@ -20,7 +20,7 @@ class Clap implements SoundEngine {
}
createNoiseBuffer() {
- const bufferSize = this.audioContext.sampleRate / 10;
+ const bufferSize = this.audioContext.sampleRate / 5;
const buffer = this.audioContext.createBuffer(
1,
bufferSize,
diff --git a/apps/common-app/src/examples/SharedUtils/soundEngines/HiHat.ts b/apps/common-app/src/examples/SharedUtils/soundEngines/HiHat.ts
index 34e79efe..72f8401a 100644
--- a/apps/common-app/src/examples/SharedUtils/soundEngines/HiHat.ts
+++ b/apps/common-app/src/examples/SharedUtils/soundEngines/HiHat.ts
@@ -26,6 +26,7 @@ class HiHat implements SoundEngine {
const oscillator = this.audioContext.createOscillator();
oscillator.type = 'square';
oscillator.frequency.value = this.tone * ratio;
+
const bandpassFilter = this.audioContext.createBiquadFilter();
const highpassFilter = this.audioContext.createBiquadFilter();
const gain = this.audioContext.createGain();
@@ -46,6 +47,7 @@ class HiHat implements SoundEngine {
bandpassFilter.connect(highpassFilter);
highpassFilter.connect(gain);
gain.connect(this.audioContext.destination!);
+
oscillator.start(time);
oscillator.stop(time + this.decay);
});
diff --git a/apps/fabric-example/ios/FabricExample.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/apps/fabric-example/ios/FabricExample.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist
new file mode 100644
index 00000000..18d98100
--- /dev/null
+++ b/apps/fabric-example/ios/FabricExample.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist
@@ -0,0 +1,8 @@
+
+
+
+
+ IDEDidComputeMac32BitWarning
+
+
+
diff --git a/apps/fabric-example/ios/Podfile.lock b/apps/fabric-example/ios/Podfile.lock
index a9c40bae..96960d34 100644
--- a/apps/fabric-example/ios/Podfile.lock
+++ b/apps/fabric-example/ios/Podfile.lock
@@ -2063,8 +2063,8 @@ SPEC CHECKSUMS:
RNReanimated: 77242c6d67416988a2fd9f5cf574bb3e60016362
RNScreens: e389d6a6a66a4f0d3662924ecae803073ccce8ec
SocketRocket: d4aabe649be1e368d1318fdf28a022d714d65748
- Yoga: f8ec45ce98bba1bc93dd28f2ee37215180e6d2b6
+ Yoga: 1d66db49f38fd9e576a1d7c3b081e46ab4c28b9e
PODFILE CHECKSUM: 75ad38075e71875257a2590065853ea6a608b897
-COCOAPODS: 1.15.2
+COCOAPODS: 1.16.2
diff --git a/packages/react-native-audio-api/android/CMakeLists.txt b/packages/react-native-audio-api/android/CMakeLists.txt
index 6f626724..affd853c 100644
--- a/packages/react-native-audio-api/android/CMakeLists.txt
+++ b/packages/react-native-audio-api/android/CMakeLists.txt
@@ -4,11 +4,6 @@ project(react-native-audio-api)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(CMAKE_CXX_STANDARD 20)
-# Detect the operating system
-if(APPLE)
- set(HAVE_ACCELERATE TRUE)
-endif()
-
# Detect the processor and SIMD support
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
set(HAVE_ARM_NEON_INTRINSICS TRUE)
diff --git a/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.cpp b/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.cpp
index 029ebb30..f0954670 100644
--- a/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.cpp
+++ b/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.cpp
@@ -1,10 +1,16 @@
+
+#include "AudioBus.h"
+#include "Constants.h"
+#include "AudioArray.h"
#include "AudioPlayer.h"
#include "AudioContext.h"
namespace audioapi {
-AudioPlayer::AudioPlayer(const std::function &renderAudio)
+
+AudioPlayer::AudioPlayer(const std::function &renderAudio)
: renderAudio_(renderAudio) {
AudioStreamBuilder builder;
+
builder.setSharingMode(SharingMode::Exclusive)
->setFormat(AudioFormat::Float)
->setFormatConversionAllowed(true)
@@ -13,12 +19,18 @@ AudioPlayer::AudioPlayer(const std::function &renderAudio)
->setSampleRateConversionQuality(SampleRateConversionQuality::Medium)
->setDataCallback(this)
->openStream(mStream_);
+
+ mBus_ = std::make_shared(getSampleRate(), getBufferSizeInFrames(), CHANNEL_COUNT);
}
int AudioPlayer::getSampleRate() const {
return mStream_->getSampleRate();
}
+int AudioPlayer::getBufferSizeInFrames() const {
+ return mStream_->getBufferSizeInFrames();
+}
+
void AudioPlayer::start() {
if (mStream_) {
mStream_->requestStart();
@@ -38,8 +50,17 @@ DataCallbackResult AudioPlayer::onAudioReady(
void *audioData,
int32_t numFrames) {
auto buffer = static_cast(audioData);
- renderAudio_(buffer, numFrames);
+
+ renderAudio_(mBus_.get(), numFrames);
+
+ // TODO: optimize this with SIMD?
+ for (int32_t i = 0; i < numFrames; i += 1) {
+ for (int channel = 0; channel < CHANNEL_COUNT; channel += 1) {
+ buffer[i * CHANNEL_COUNT + channel] = mBus_->getChannel(channel)->getData()[i];
+ }
+ }
return DataCallbackResult::Continue;
}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.h b/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.h
index 02994793..26383f07 100644
--- a/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.h
+++ b/packages/react-native-audio-api/android/src/main/cpp/AudioPlayer/AudioPlayer.h
@@ -8,12 +8,14 @@ namespace audioapi {
using namespace oboe;
class AudioContext;
+class AudioBus;
class AudioPlayer : public AudioStreamDataCallback {
public:
- explicit AudioPlayer(const std::function &renderAudio);
+ explicit AudioPlayer(const std::function &renderAudio);
int getSampleRate() const;
+ int getBufferSizeInFrames() const;
void start();
void stop();
@@ -23,8 +25,9 @@ class AudioPlayer : public AudioStreamDataCallback {
int32_t numFrames) override;
private:
- std::function renderAudio_;
+ std::function renderAudio_;
std::shared_ptr mStream_;
+ std::shared_ptr mBus_;
};
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioArray.cpp b/packages/react-native-audio-api/common/cpp/core/AudioArray.cpp
new file mode 100644
index 00000000..439b9f04
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/core/AudioArray.cpp
@@ -0,0 +1,103 @@
+#include
+
+#include "AudioArray.h"
+#include "VectorMath.h"
+
+namespace audioapi {
+
+AudioArray::AudioArray(int size) : size_(size), data_(0) {
+ resize(size);
+}
+
+AudioArray::~AudioArray() {
+ if (data_) {
+ delete[] data_;
+ data_ = 0;
+ }
+}
+
+int AudioArray::getSize() const {
+ return size_;
+}
+
+float* AudioArray::getData() const {
+ return data_;
+}
+
+float& AudioArray::operator[](int index) {
+ return data_[index];
+}
+
+const float& AudioArray::operator[](int index) const {
+ return data_[index];
+}
+
+void AudioArray::normalize() {
+ float maxAbsValue = getMaxAbsValue();
+
+ if (maxAbsValue == 0.0f || maxAbsValue == 1.0f) {
+ return;
+ }
+
+ VectorMath::multiplyByScalar(data_, 1.0f / maxAbsValue, data_, size_);
+}
+
+void AudioArray::resize(int size) {
+ if (size == size_) {
+ if (!data_) {
+ data_ = new float[size];
+ }
+
+ zero(0, size);
+ return;
+ }
+
+ delete[] data_;
+ size_ = size;
+ data_ = new float[size_];
+
+ zero(0, size_);
+}
+
+void AudioArray::scale(float value) {
+ VectorMath::multiplyByScalar(data_, value, data_, size_);
+}
+
+float AudioArray::getMaxAbsValue() const {
+ return VectorMath::maximumMagnitude(data_, size_);
+}
+
+void AudioArray::zero() {
+ zero(0, size_);
+}
+
+void AudioArray::zero(int start, int length) {
+ memset(data_ + start, 0, length * sizeof(float));
+}
+
+void AudioArray::sum(const AudioArray* source) {
+ sum(source, 0, 0, size_);
+}
+
+void AudioArray::sum(const AudioArray* source, int start, int length) {
+ sum(source, start, start, length);
+}
+
+void AudioArray::sum(const AudioArray* source, int sourceStart, int destinationStart, int length) {
+ VectorMath::add(data_ + destinationStart, source->getData() + sourceStart, data_ + destinationStart, length);
+}
+
+void AudioArray::copy(const AudioArray* source) {
+ copy(source, 0, size_);
+}
+
+void AudioArray::copy(const AudioArray* source, int start, int length) {
+ copy(source, start, start, length);
+}
+
+void AudioArray::copy(const AudioArray* source, int sourceStart, int destinationStart, int length) {
+ memcpy(data_ + destinationStart, source->getData() + sourceStart, length * sizeof(float));
+}
+
+} // namespace audioapi
+
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioArray.h b/packages/react-native-audio-api/common/cpp/core/AudioArray.h
new file mode 100644
index 00000000..3c59eb90
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/core/AudioArray.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#include
+#include
+
+namespace audioapi {
+
+class AudioArray {
+ public:
+ explicit AudioArray(int size);
+ ~AudioArray();
+
+ [[nodiscard]] int getSize() const;
+ float* getData() const;
+
+
+ float& operator[](int index);
+ const float& operator[](int index) const;
+
+ void normalize();
+ void resize(int size);
+ void scale(float value);
+ float getMaxAbsValue() const;
+
+ void zero();
+ void zero(int start, int length);
+
+ void sum(const AudioArray* source);
+ void sum(const AudioArray* source, int start, int length);
+ void sum(const AudioArray* source, int sourceStart, int destinationStart, int length);
+
+ void copy(const AudioArray* source);
+ void copy(const AudioArray* source, int start, int length);
+ void copy(const AudioArray* source, int sourceStart, int destinationStart, int length);
+
+
+ private:
+ float *data_;
+ int size_;
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioBuffer.cpp b/packages/react-native-audio-api/common/cpp/core/AudioBuffer.cpp
index 28a28baa..4027abed 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioBuffer.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/AudioBuffer.cpp
@@ -1,65 +1,31 @@
+#include "AudioBus.h"
+#include "AudioArray.h"
#include "AudioBuffer.h"
namespace audioapi {
-AudioBuffer::AudioBuffer(int numberOfChannels, int length, int sampleRate)
- : numberOfChannels_(numberOfChannels),
- length_(length),
- sampleRate_(sampleRate),
- duration_(static_cast(length) / sampleRate) {
- channels_ = new float *[numberOfChannels];
-
- for (int i = 0; i < numberOfChannels; i++) {
- channels_[i] = new float[length];
-
- for (int j = 0; j < length; j++) {
- channels_[i][j] = 0.0f;
- }
- }
+AudioBuffer::AudioBuffer(int numberOfChannels, int length, int sampleRate) {
+ bus_ = std::make_shared(sampleRate, length, numberOfChannels);
}
-int AudioBuffer::getNumberOfChannels() const {
- return numberOfChannels_;
+int AudioBuffer::getLength() const {
+ return bus_->getSize();
}
-int AudioBuffer::getLength() const {
- return length_;
+int AudioBuffer::getNumberOfChannels() const {
+ return bus_->getNumberOfChannels();
}
int AudioBuffer::getSampleRate() const {
- return sampleRate_;
+ return bus_->getSampleRate();
}
double AudioBuffer::getDuration() const {
- return duration_;
+ return static_cast(getLength()) / getSampleRate();
}
-float *AudioBuffer::getChannelData(int channel) const {
- return channels_[channel];
-}
-
-std::shared_ptr AudioBuffer::mix(int outputNumberOfChannels) {
- if (outputNumberOfChannels == numberOfChannels_) {
- return shared_from_this();
- }
-
- auto mixedBuffer = std::make_shared(
- outputNumberOfChannels, length_, sampleRate_);
-
- switch (this->numberOfChannels_) {
- case 1:
- mixedBuffer->copyToChannel(this->channels_[0], length_, 0, 0);
- mixedBuffer->copyToChannel(this->channels_[0], length_, 1, 0);
- break;
- case 2:
- for (int i = 0; i < length_; i++) {
- mixedBuffer->channels_[0][i] =
- (this->channels_[0][i] + this->channels_[1][i]) / 2;
- }
- break;
- }
-
- return mixedBuffer;
+float* AudioBuffer::getChannelData(int channel) const {
+ return bus_->getChannel(channel)->getData();
}
void AudioBuffer::copyFromChannel(
@@ -67,11 +33,11 @@ void AudioBuffer::copyFromChannel(
int destinationLength,
int channelNumber,
int startInChannel) const {
- std::copy(
- channels_[channelNumber] + startInChannel,
- channels_[channelNumber] + startInChannel +
- std::min(destinationLength, length_ - startInChannel),
- destination);
+ memcpy(
+ destination,
+ bus_->getChannel(channelNumber)->getData() + startInChannel,
+ std::min(destinationLength, getLength() - startInChannel) * sizeof(float)
+ );
}
void AudioBuffer::copyToChannel(
@@ -79,9 +45,11 @@ void AudioBuffer::copyToChannel(
int sourceLength,
int channelNumber,
int startInChannel) {
- std::copy(
- source,
- source + std::min(sourceLength, length_ - startInChannel),
- channels_[channelNumber] + startInChannel);
+ memcpy(
+ bus_->getChannel(channelNumber)->getData() + startInChannel,
+ source,
+ std::min(sourceLength, getLength() - startInChannel) * sizeof(float)
+ );
}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioBuffer.h b/packages/react-native-audio-api/common/cpp/core/AudioBuffer.h
index 073b6e87..bca9e193 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioBuffer.h
+++ b/packages/react-native-audio-api/common/cpp/core/AudioBuffer.h
@@ -7,15 +7,19 @@
namespace audioapi {
+class AudioBus;
+
class AudioBuffer : public std::enable_shared_from_this {
public:
explicit AudioBuffer(int numberOfChannels, int length, int sampleRate);
- [[nodiscard]] int getNumberOfChannels() const;
[[nodiscard]] int getLength() const;
[[nodiscard]] int getSampleRate() const;
[[nodiscard]] double getDuration() const;
+
+ [[nodiscard]] int getNumberOfChannels() const;
[[nodiscard]] float *getChannelData(int channel) const;
+
void copyFromChannel(
float *destination,
int destinationLength,
@@ -28,15 +32,9 @@ class AudioBuffer : public std::enable_shared_from_this {
int startInChannel);
private:
- friend class AudioBufferSourceNode;
+ std::shared_ptr bus_;
- int numberOfChannels_;
- int length_;
- int sampleRate_;
- double duration_;
- float **channels_;
-
- std::shared_ptr mix(int outputNumberOfChannels);
+ friend class AudioBufferSourceNode;
};
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.cpp b/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.cpp
index 05ffa043..9ba0ba57 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.cpp
@@ -1,5 +1,9 @@
-#include "AudioBufferSourceNode.h"
+#include
+
+#include "AudioBus.h"
+#include "AudioArray.h"
#include "BaseAudioContext.h"
+#include "AudioBufferSourceNode.h"
namespace audioapi {
@@ -7,6 +11,7 @@ AudioBufferSourceNode::AudioBufferSourceNode(BaseAudioContext *context)
: AudioScheduledSourceNode(context), loop_(false), bufferIndex_(0) {
numberOfInputs_ = 0;
buffer_ = std::shared_ptr(nullptr);
+ isInitialized_ = true;
}
bool AudioBufferSourceNode::getLoop() const {
@@ -23,37 +28,108 @@ void AudioBufferSourceNode::setLoop(bool loop) {
void AudioBufferSourceNode::setBuffer(
const std::shared_ptr &buffer) {
+
if (!buffer) {
buffer_ = std::shared_ptr(nullptr);
return;
}
- buffer_ = buffer->mix(channelCount_);
+ buffer_ = buffer;
}
-bool AudioBufferSourceNode::processAudio(float *audioData, int32_t numFrames) {
- if (!isPlaying_ || !buffer_) {
- return false;
- } else {
- for (int i = 0; i < numFrames; ++i) {
- for (int j = 0; j < channelCount_; j++) {
- audioData[i * channelCount_ + j] =
- buffer_->getChannelData(j)[bufferIndex_];
+// Note: AudioBus copy method will use memcpy if the source buffer and system processing bus have same channel count,
+// otherwise it will use the summing function taking care of up/down mixing.
+void AudioBufferSourceNode::processNode(AudioBus* processingBus, int framesToProcess) {
+ // No audio data to fill, zero the output and return.
+ if (!isPlaying() || !buffer_ || buffer_->getLength() == 0) {
+ processingBus->zero();
+ return;
+ }
+
+ // Easiest case, the buffer is the same length as the number of frames to process, just copy the data.
+ if (framesToProcess == buffer_->getLength()) {
+ processingBus->copy(buffer_->bus_.get());
+
+ if (!loop_) {
+ playbackState_ = PlaybackState::FINISHED;
+ disable();
+ }
+
+ return;
+ }
+
+ // The buffer is longer than the number of frames to process.
+ // We have to keep track of where we are in the buffer.
+ if (framesToProcess < buffer_->getLength()) {
+ int outputBusIndex = 0;
+ int framesToCopy = 0;
+
+ while (framesToProcess - outputBusIndex > 0) {
+ framesToCopy = std::min(framesToProcess - outputBusIndex, buffer_->getLength() - bufferIndex_);
+
+ processingBus->copy(buffer_->bus_.get(), bufferIndex_, outputBusIndex, framesToCopy);
+
+ bufferIndex_ += framesToCopy;
+ outputBusIndex += framesToCopy;
+
+ if (bufferIndex_ < buffer_->getLength()) {
+ continue;
}
- bufferIndex_++;
- if (bufferIndex_ >= buffer_->getLength()) {
- if (loop_) {
- bufferIndex_ = 0;
- } else {
- isPlaying_ = false;
- break;
+ bufferIndex_ %= buffer_->getLength();
+
+ if (!loop_) {
+ playbackState_ = PlaybackState::FINISHED;
+ disable();
+
+ if (framesToProcess - outputBusIndex > 0) {
+ processingBus->zero(outputBusIndex, framesToProcess - outputBusIndex);
}
}
}
- return true;
+ return;
+ }
+
+ // processing bus is longer than the source buffer
+ if (!loop_) {
+ // If we don't loop the buffer, copy it once and zero the remaining processing bus frames.
+ processingBus->copy(buffer_->bus_.get());
+ processingBus->zero(buffer_->getLength(), framesToProcess - buffer_->getLength());
+
+ playbackState_ = PlaybackState::FINISHED;
+ disable();
+
+ return;
+ }
+
+ // If we loop the buffer, we need to loop the buffer framesToProcess / bufferSize times
+ // There might also be a remainder of frames to copy after the loop,
+ // which will also carry over some buffer frames to the next render quantum.
+ int processingBusPosition = 0;
+ int bufferSize = buffer_->getLength();
+ int remainingFrames = framesToProcess - framesToProcess / bufferSize;
+
+ // Do we have some frames left in the buffer from the previous render quantum,
+ // if yes copy them over and reset the buffer position.
+ if (bufferIndex_ > 0) {
+ processingBus->copy(buffer_->bus_.get(), 0, bufferIndex_);
+ processingBusPosition += bufferIndex_;
+ bufferIndex_ = 0;
+ }
+
+ // Copy the entire buffer n times to the processing bus.
+ while (processingBusPosition + bufferSize <= framesToProcess) {
+ processingBus->copy(buffer_->bus_.get());
+ processingBusPosition += bufferSize;
+ }
+
+ // Fill in the remaining frames from the processing buffer and update buffer index for next render quantum.
+ if (remainingFrames > 0) {
+ processingBus->copy(buffer_->bus_.get(), 0, processingBusPosition, remainingFrames);
+ bufferIndex_ = remainingFrames;
}
}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.h b/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.h
index 4abce23e..66feac08 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/AudioBufferSourceNode.h
@@ -7,6 +7,8 @@
namespace audioapi {
+class AudioBus;
+
class AudioBufferSourceNode : public AudioScheduledSourceNode {
public:
explicit AudioBufferSourceNode(BaseAudioContext *context);
@@ -15,11 +17,14 @@ class AudioBufferSourceNode : public AudioScheduledSourceNode {
[[nodiscard]] std::shared_ptr getBuffer() const;
void setLoop(bool loop);
void setBuffer(const std::shared_ptr &buffer);
- [[nodiscard]] bool processAudio(float *audioData, int32_t numFrames) override;
+
+ protected:
+ void processNode(AudioBus* processingBus, int framesToProcess) override;
private:
bool loop_;
std::shared_ptr buffer_;
int bufferIndex_;
};
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioBus.cpp b/packages/react-native-audio-api/common/cpp/core/AudioBus.cpp
new file mode 100644
index 00000000..4385a4b2
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/core/AudioBus.cpp
@@ -0,0 +1,357 @@
+#include
+
+#include "AudioBus.h"
+#include "Constants.h"
+#include "AudioArray.h"
+#include "VectorMath.h"
+#include "BaseAudioContext.h"
+
+// Implementation of channel summing/mixing is based on the WebKit approach, source:
+// https://github.com/WebKit/WebKit/blob/main/Source/WebCore/platform/audio/AudioBus.cpp
+
+const float SQRT_HALF = sqrtf(0.5f);
+
+namespace audioapi {
+
+/**
+ * Public interfaces - memory management
+ */
+AudioBus::AudioBus(int sampleRate, int size)
+ : sampleRate_(sampleRate), size_(size), numberOfChannels_(CHANNEL_COUNT) {
+ createChannels();
+}
+
+AudioBus::AudioBus(int sampleRate, int size, int numberOfChannels)
+ : sampleRate_(sampleRate), size_(size), numberOfChannels_(numberOfChannels) {
+ createChannels();
+}
+
+AudioBus::~AudioBus() {
+ channels_.clear();
+}
+
+/**
+ * Public interfaces - getters
+ */
+
+int AudioBus::getNumberOfChannels() const {
+ return numberOfChannels_;
+}
+
+int AudioBus::getSampleRate() const {
+ return sampleRate_;
+}
+
+int AudioBus::getSize() const {
+ return size_;
+}
+
+AudioArray* AudioBus::getChannel(int index) const {
+ return channels_[index].get();
+}
+
+AudioArray* AudioBus::getChannelByType(int channelType) const {
+ switch (getNumberOfChannels()) {
+ case 1: // mono
+ if (channelType == ChannelMono || channelType == ChannelLeft) {
+ return getChannel(0);
+ }
+ return 0;
+
+ case 2: // stereo
+ switch (channelType) {
+ case ChannelLeft: return getChannel(0);
+ case ChannelRight: return getChannel(1);
+ default: return 0;
+ }
+
+ case 4: // quad
+ switch (channelType) {
+ case ChannelLeft: return getChannel(0);
+ case ChannelRight: return getChannel(1);
+ case ChannelSurroundLeft: return getChannel(2);
+ case ChannelSurroundRight: return getChannel(3);
+ default: return 0;
+ }
+
+ case 5: // 5.0
+ switch (channelType) {
+ case ChannelLeft: return getChannel(0);
+ case ChannelRight: return getChannel(1);
+ case ChannelCenter: return getChannel(2);
+ case ChannelSurroundLeft: return getChannel(3);
+ case ChannelSurroundRight: return getChannel(4);
+ default: return 0;
+ }
+
+ case 6: // 5.1
+ switch (channelType) {
+ case ChannelLeft: return getChannel(0);
+ case ChannelRight: return getChannel(1);
+ case ChannelCenter: return getChannel(2);
+ case ChannelLFE: return getChannel(3);
+ case ChannelSurroundLeft: return getChannel(4);
+ case ChannelSurroundRight: return getChannel(5);
+ default: return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Public interfaces - audio processing and setters
+ */
+
+void AudioBus::zero() {
+ zero(0, getSize());
+}
+
+void AudioBus::zero(int start, int length) {
+ for (auto it = channels_.begin(); it != channels_.end(); it += 1) {
+ it->get()->zero(start, length);
+ }
+}
+
+void AudioBus::normalize() {
+ float maxAbsValue = this->maxAbsValue();
+
+ if (maxAbsValue == 0.0f || maxAbsValue == 1.0f) {
+ return;
+ }
+
+ float scale = 1.0f / maxAbsValue;
+ this->scale(scale);
+}
+
+void AudioBus::scale(float value) {
+ for (auto it = channels_.begin(); it != channels_.end(); ++it) {
+ it->get()->scale(value);
+ }
+}
+
+float AudioBus::maxAbsValue() const {
+ float maxAbsValue = 1.0f;
+
+ for (auto it = channels_.begin(); it != channels_.end(); ++it) {
+ float channelMaxAbsValue = it->get()->getMaxAbsValue();
+ maxAbsValue = std::max(maxAbsValue, channelMaxAbsValue);
+ }
+
+ return maxAbsValue;
+}
+
+void AudioBus::sum(const AudioBus *source) {
+ sum(source, 0, 0, getSize());
+}
+
+void AudioBus::sum(const AudioBus *source, int start, int length) {
+ sum(source, start, start, length);
+}
+
+void AudioBus::sum(const AudioBus *source, int sourceStart, int destinationStart, int length) {
+ if (source == this) {
+ return;
+ }
+
+ int numberOfSourceChannels = source->getNumberOfChannels();
+ int numberOfChannels = getNumberOfChannels();
+
+ // TODO: consider adding ability to enforce discrete summing (if/when it will be useful).
+ // Source channel count is smaller than current bus, we need to up-mix.
+ if (numberOfSourceChannels < numberOfChannels) {
+ sumByUpMixing(source, sourceStart, destinationStart, length);
+ return;
+ }
+
+ // Source channel count is larger than current bus, we need to down-mix.
+ if (numberOfSourceChannels > numberOfChannels) {
+ sumByDownMixing(source, sourceStart, destinationStart, length);
+ return;
+ }
+
+ // Source and destination channel counts are the same. Just sum the channels.
+ for (int i = 0; i < numberOfChannels_; i += 1) {
+ getChannel(i)->sum(source->getChannel(i), sourceStart, destinationStart, length);
+ }
+}
+
+void AudioBus::copy(const AudioBus *source) {
+ copy(source, 0, 0, getSize());
+}
+
+void AudioBus::copy(const AudioBus *source, int start, int length) {
+ copy(source, start, start, length);
+}
+
+void AudioBus::copy(const AudioBus *source, int sourceStart, int destinationStart, int length) {
+ if (source == this) {
+ return;
+ }
+
+ if (source->getNumberOfChannels() == getNumberOfChannels()) {
+ for (int i = 0; i < getNumberOfChannels(); i += 1) {
+ getChannel(i)->copy(source->getChannel(i), sourceStart, destinationStart, length);
+ }
+
+ return;
+ }
+
+ // zero + sum is equivalent to copy, but takes care of up/down-mixing.
+ zero(destinationStart, length);
+ sum(source, sourceStart, destinationStart, length);
+}
+
+/**
+ * Internal tooling - channel initialization
+ */
+
+void AudioBus::createChannels() {
+ channels_ = std::vector>(numberOfChannels_);
+
+ for (int i = 0; i < numberOfChannels_; i += 1) {
+ channels_[i] = std::make_shared(size_);
+ }
+}
+
+/**
+ * Internal tooling - channel summing
+ */
+
+void AudioBus::discreteSum(const AudioBus *source, int sourceStart, int destinationStart, int length) {
+ int numberOfChannels = std::min(getNumberOfChannels(), source->getNumberOfChannels());
+
+ // In case of source > destination, we "down-mix" and drop the extra channels.
+ // In case of source < destination, we "up-mix" as many channels as we have, leaving the remaining channels untouched.
+ for (int i = 0; i < numberOfChannels; i++) {
+ getChannel(i)->sum(source->getChannel(i), sourceStart, destinationStart, length);
+ }
+}
+
+void AudioBus::sumByUpMixing(const AudioBus *source, int sourceStart, int destinationStart, int length) {
+ int numberOfSourceChannels = source->getNumberOfChannels();
+ int numberOfChannels = getNumberOfChannels();
+
+ // Mono to stereo (1 -> 2, 4)
+ if (numberOfSourceChannels == 1 && (numberOfChannels == 2 || numberOfChannels == 4)) {
+ AudioArray* sourceChannel = source->getChannelByType(ChannelMono);
+
+ getChannelByType(ChannelLeft)->sum(sourceChannel, sourceStart, destinationStart, length);
+ getChannelByType(ChannelRight)->sum(sourceChannel, sourceStart, destinationStart, length);
+ return;
+ }
+
+ // Mono to 5.1 (1 -> 6)
+ if (numberOfSourceChannels == 1 && numberOfChannels == 6) {
+ AudioArray* sourceChannel = source->getChannel(0);
+
+ getChannelByType(ChannelCenter)->sum(sourceChannel, sourceStart, destinationStart, length);
+ return;
+ }
+
+ // Stereo 2 to stereo 4 or 5.1 (2 -> 4, 6)
+ if (numberOfSourceChannels == 2 && (numberOfChannels == 4 || numberOfChannels == 6)) {
+ getChannelByType(ChannelLeft)->sum(source->getChannelByType(ChannelLeft), sourceStart, destinationStart, length);
+ getChannelByType(ChannelRight)->sum(source->getChannelByType(ChannelRight), sourceStart, destinationStart, length);
+ return;
+ }
+
+ // Stereo 4 to 5.1 (4 -> 6)
+ if (numberOfSourceChannels == 4 && numberOfChannels == 6) {
+ getChannelByType(ChannelLeft)->sum(source->getChannelByType(ChannelLeft), sourceStart, destinationStart, length);
+ getChannelByType(ChannelRight)->sum(source->getChannelByType(ChannelRight), sourceStart, destinationStart, length);
+ getChannelByType(ChannelSurroundLeft)->sum(source->getChannelByType(ChannelSurroundLeft), sourceStart, destinationStart, length);
+ getChannelByType(ChannelSurroundRight)->sum(source->getChannelByType(ChannelSurroundRight), sourceStart, destinationStart, length);
+ return;
+ }
+
+ discreteSum(source, sourceStart, destinationStart, length);
+}
+
+void AudioBus::sumByDownMixing(const AudioBus *source, int sourceStart, int destinationStart, int length) {
+ int numberOfSourceChannels = source->getNumberOfChannels();
+ int numberOfChannels = getNumberOfChannels();
+
+ // Stereo to mono (2 -> 1): output += 0.5 * (input.left + input.right).
+ if (numberOfSourceChannels == 2 && numberOfChannels == 1) {
+ float* sourceLeft = source->getChannelByType(ChannelLeft)->getData();
+ float* sourceRight = source->getChannelByType(ChannelRight)->getData();
+
+ float* destinationData = getChannelByType(ChannelMono)->getData();
+
+ VectorMath::multiplyByScalarThenAddToOutput(sourceLeft + sourceStart, 0.5f, destinationData + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceRight + sourceStart, 0.5f, destinationData + destinationStart, length);
+ return;
+ }
+
+ // Stereo 4 to mono: output += 0.25 * (input.left + input.right + input.surroundLeft + input.surroundRight)
+ if (numberOfSourceChannels == 4 && numberOfChannels == 1) {
+ float* sourceLeft = source->getChannelByType(ChannelLeft)->getData();
+ float* sourceRight = source->getChannelByType(ChannelRight)->getData();
+ float* sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData();
+ float* sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData();
+
+ float* destinationData = getChannelByType(ChannelMono)->getData();
+
+ VectorMath::multiplyByScalarThenAddToOutput(sourceLeft + sourceStart, 0.25f, destinationData + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceRight + sourceStart, 0.25f, destinationData + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceSurroundLeft + sourceStart, 0.25f, destinationData + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceSurroundRight + sourceStart, 0.25f, destinationData + destinationStart, length);
+ return;
+ }
+
+ // 5.1 to stereo:
+ // output.left += input.left + sqrt(1/2) * (input.center + input.surroundLeft)
+ // output.right += input.right + sqrt(1/2) * (input.center + input.surroundRight)
+ if (numberOfSourceChannels == 6 && numberOfChannels == 2) {
+ float* sourceLeft = source->getChannelByType(ChannelLeft)->getData();
+ float* sourceRight = source->getChannelByType(ChannelRight)->getData();
+ float* sourceCenter = source->getChannelByType(ChannelCenter)->getData();
+ float* sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData();
+ float* sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData();
+
+ float* destinationLeft = getChannelByType(ChannelLeft)->getData();
+ float* destinationRight = getChannelByType(ChannelRight)->getData();
+
+ VectorMath::add(sourceLeft + sourceStart, destinationLeft + destinationStart, destinationLeft + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceCenter + sourceStart, SQRT_HALF, destinationLeft + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceSurroundLeft + sourceStart, SQRT_HALF, destinationLeft + destinationStart, length);
+
+ VectorMath::add(sourceRight + sourceStart, destinationRight + destinationStart, destinationRight + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceCenter + sourceStart, SQRT_HALF, destinationRight + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceSurroundRight + sourceStart, SQRT_HALF, destinationRight + destinationStart, length);
+ return;
+ }
+
+ // 5.1 to stereo 4:
+ // output.left += input.left + sqrt(1/2) * input.center
+ // output.right += input.right + sqrt(1/2) * input.center
+ // output.surroundLeft += input.surroundLeft
+ // output.surroundRight += input.surroundRight
+ if (numberOfSourceChannels == 6 && numberOfChannels == 4) {
+ float* sourceLeft = source->getChannelByType(ChannelLeft)->getData();
+ float* sourceRight = source->getChannelByType(ChannelRight)->getData();
+ float* sourceCenter = source->getChannelByType(ChannelCenter)->getData();
+ float* sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData();
+ float* sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData();
+
+ float* destinationLeft = getChannelByType(ChannelLeft)->getData();
+ float* destinationRight = getChannelByType(ChannelRight)->getData();
+ float* destinationSurroundLeft = getChannelByType(ChannelSurroundLeft)->getData();
+ float* destinationSurroundRight = getChannelByType(ChannelSurroundRight)->getData();
+
+ VectorMath::add(sourceLeft + sourceStart, destinationLeft + destinationStart, destinationLeft + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceCenter, SQRT_HALF, destinationLeft + destinationStart, length);
+
+ VectorMath::add(sourceRight + sourceStart, destinationRight + destinationStart, destinationRight + destinationStart, length);
+ VectorMath::multiplyByScalarThenAddToOutput(sourceCenter, SQRT_HALF, destinationRight + destinationStart, length);
+
+ VectorMath::add(sourceSurroundLeft + sourceStart, destinationSurroundLeft + destinationStart, destinationSurroundLeft + destinationStart, length);
+ VectorMath::add(sourceSurroundRight + sourceStart, destinationSurroundRight + destinationStart, destinationSurroundRight + destinationStart, length);
+ return;
+ }
+
+ discreteSum(source, sourceStart, destinationStart, length);
+}
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioBus.h b/packages/react-native-audio-api/common/cpp/core/AudioBus.h
new file mode 100644
index 00000000..b54dbf60
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/core/AudioBus.h
@@ -0,0 +1,63 @@
+#pragma once
+
+#include
+#include
+#include
+
+namespace audioapi {
+
+class BaseAudioContext;
+class AudioArray;
+
+class AudioBus {
+ public:
+ enum {
+ ChannelMono = 0,
+ ChannelLeft = 0,
+ ChannelRight = 1,
+ ChannelCenter = 2,
+ ChannelLFE = 3,
+ ChannelSurroundLeft = 4,
+ ChannelSurroundRight = 5,
+ };
+
+ explicit AudioBus(int sampleRate, int size);
+ explicit AudioBus(int sampleRate, int size, int numberOfChannels);
+
+ ~AudioBus();
+
+ [[nodiscard]] int getNumberOfChannels() const;
+ [[nodiscard]] int getSampleRate() const;
+ [[nodiscard]] int getSize() const;
+ AudioArray* getChannel(int index) const;
+ AudioArray* getChannelByType(int channelType) const;
+
+ void normalize();
+ void scale(float value);
+ float maxAbsValue() const;
+
+ void zero();
+ void zero(int start, int length);
+
+ void sum(const AudioBus *source);
+ void sum(const AudioBus *source, int start, int length);
+ void sum(const AudioBus *source, int sourceStart, int destinationStart, int length);
+
+ void copy(const AudioBus *source);
+ void copy(const AudioBus *source, int start, int length);
+ void copy(const AudioBus *source, int sourceStart, int destinationStart, int length);
+
+ private:
+ std::vector> channels_;
+
+ int numberOfChannels_;
+ int sampleRate_;
+ int size_;
+
+ void createChannels();
+ void discreteSum(const AudioBus *source, int sourceStart, int destinationStart, int length);
+ void sumByUpMixing(const AudioBus *source, int sourceStart, int destinationStart, int length);
+ void sumByDownMixing(const AudioBus *source, int sourceStart, int destinationStart, int length);
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioContext.cpp b/packages/react-native-audio-api/common/cpp/core/AudioContext.cpp
index 41873ce9..1fb3608f 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioContext.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/AudioContext.cpp
@@ -1,5 +1,12 @@
+#ifdef ANDROID
+#include "AudioPlayer.h"
+#else
+#include "IOSAudioPlayer.h"
+#endif
+
#include "AudioContext.h"
+
namespace audioapi {
AudioContext::AudioContext() : BaseAudioContext() {}
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.cpp b/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.cpp
index 3febab08..898d2258 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.cpp
@@ -1,48 +1,45 @@
-#include "AudioDestinationNode.h"
+#include "AudioBus.h"
+#include "AudioNode.h"
+#include "VectorMath.h"
+#include "AudioNodeManager.h"
#include "BaseAudioContext.h"
+#include "AudioDestinationNode.h"
namespace audioapi {
AudioDestinationNode::AudioDestinationNode(BaseAudioContext *context)
- : AudioNode(context) {
+ : AudioNode(context), currentSampleFrame_(0) {
numberOfOutputs_ = 0;
numberOfInputs_ = INT_MAX;
channelCountMode_ = ChannelCountMode::EXPLICIT;
+ isInitialized_ = true;
}
-void AudioDestinationNode::renderAudio(float *audioData, int32_t numFrames) {
- processAudio(audioData, numFrames);
+std::size_t AudioDestinationNode::getCurrentSampleFrame() const {
+ return currentSampleFrame_;
}
-bool AudioDestinationNode::processAudio(float *audioData, int32_t numFrames) {
- int numSamples = numFrames * CHANNEL_COUNT;
-
- if (mixingBuffer == nullptr) {
- mixingBuffer = std::make_unique(numSamples);
- }
+double AudioDestinationNode::getCurrentTime() const {
+ return static_cast(currentSampleFrame_) / context_->getSampleRate();
+}
- memset(audioData, 0.0f, sizeof(float) * numSamples);
+void AudioDestinationNode::renderAudio(AudioBus *destinationBus, int32_t numFrames) {
+ context_->getNodeManager()->preProcessGraph();
+ destinationBus->zero();
- for (auto &node : inputNodes_) {
- if (node && node->processAudio(mixingBuffer.get(), numFrames)) {
- normalize(mixingBuffer.get(), numFrames);
- VectorMath::add(audioData, mixingBuffer.get(), audioData, numSamples);
- }
+ if (!numFrames) {
+ return;
}
- return true;
-}
-
-void AudioDestinationNode::normalize(float *audioData, int32_t numFrames) {
- auto maxValue = std::max(
- 1.0f, VectorMath::maximumMagnitude(audioData, numFrames * channelCount_));
+ AudioBus* processedBus = processAudio(destinationBus, numFrames);
- if (maxValue == 1.0f) {
- return;
+ if (processedBus && processedBus != destinationBus) {
+ destinationBus->copy(processedBus);
}
- VectorMath::multiplyByScalar(
- audioData, 1.0f / maxValue, audioData, numFrames * channelCount_);
+ destinationBus->normalize();
+
+ currentSampleFrame_ += numFrames;
}
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.h b/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.h
index 424cb7f3..f345cdd2 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/AudioDestinationNode.h
@@ -1,26 +1,32 @@
#pragma once
-#include
-#include
#include
+#include
+#include
#include "AudioNode.h"
-#include "VectorMath.h"
namespace audioapi {
+class AudioBus;
+class BaseAudioContext;
+
class AudioDestinationNode : public AudioNode {
public:
explicit AudioDestinationNode(BaseAudioContext *context);
- void renderAudio(float *audioData, int32_t numFrames);
+ void renderAudio(AudioBus* audioData, int32_t numFrames);
+
+ std::size_t getCurrentSampleFrame() const;
+ double getCurrentTime() const;
protected:
- bool processAudio(float *audioData, int32_t numFrames) override;
+ // DestinationNode is triggered by AudioContext using renderAudio
+ // processNode function is not necessary and is never called.
+ void processNode(AudioBus*, int) final { };
private:
- std::unique_ptr mixingBuffer;
-
- void normalize(float *audioData, int32_t numFrames);
+ std::size_t currentSampleFrame_;
};
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioNode.cpp b/packages/react-native-audio-api/common/cpp/core/AudioNode.cpp
index c7fff825..be7d7f19 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/AudioNode.cpp
@@ -1,11 +1,18 @@
+#include
+
+#include "AudioBus.h"
#include "AudioNode.h"
#include "BaseAudioContext.h"
+#include "AudioNodeManager.h"
namespace audioapi {
-AudioNode::AudioNode(BaseAudioContext *context) : context_(context) {}
+AudioNode::AudioNode(BaseAudioContext *context) : context_(context) {
+ audioBus_ = std::make_shared(context->getSampleRate(), context->getBufferSizeInFrames(), channelCount_);
+}
AudioNode::~AudioNode() {
+ isInitialized_ = false;
cleanup();
}
@@ -30,39 +37,186 @@ std::string AudioNode::getChannelInterpretation() const {
}
void AudioNode::connect(const std::shared_ptr &node) {
- if (numberOfOutputs_ > outputNodes_.size() &&
- node->getNumberOfInputs() > node->inputNodes_.size()) {
- outputNodes_.push_back(node);
- node->inputNodes_.push_back(shared_from_this());
- }
+ context_->getNodeManager()->addPendingConnection(shared_from_this(), node, AudioNodeManager::ConnectionType::CONNECT);
+}
+
+void AudioNode::connectNode(const std::shared_ptr &node) {
+ outputNodes_.push_back(node);
+ node->onInputConnected(this);
}
void AudioNode::disconnect(const std::shared_ptr &node) {
- outputNodes_.erase(
- std::remove(outputNodes_.begin(), outputNodes_.end(), node),
- outputNodes_.end());
- if (auto sharedThis = shared_from_this()) {
- node->inputNodes_.erase(
- std::remove(
- node->inputNodes_.begin(), node->inputNodes_.end(), sharedThis),
- node->inputNodes_.end());
+ context_->getNodeManager()->addPendingConnection(shared_from_this(), node, AudioNodeManager::ConnectionType::DISCONNECT);
+}
+
+void AudioNode::disconnectNode(const std::shared_ptr &node) {
+ node->onInputDisconnected(this);
+
+ auto position = std::find(outputNodes_.begin(), outputNodes_.end(), node);
+
+ if (position != outputNodes_.end()) {
+ outputNodes_.erase(position);
+ }
+}
+
+bool AudioNode::isEnabled() const {
+ return isEnabled_;
+}
+
+void AudioNode::enable() {
+ isEnabled_ = true;
+
+ for (auto it = outputNodes_.begin(); it != outputNodes_.end(); ++it) {
+ it->get()->onInputEnabled();
+ }
+}
+
+void AudioNode::disable() {
+ isEnabled_ = false;
+
+ for (auto it = outputNodes_.begin(); it != outputNodes_.end(); ++it) {
+ it->get()->onInputDisabled();
+ }
+}
+
+std::string AudioNode::toString(ChannelCountMode mode) {
+ switch (mode) {
+ case ChannelCountMode::MAX:
+ return "max";
+ case ChannelCountMode::CLAMPED_MAX:
+ return "clamped-max";
+ case ChannelCountMode::EXPLICIT:
+ return "explicit";
+ default:
+ throw std::invalid_argument("Unknown channel count mode");
}
}
+std::string AudioNode::toString(ChannelInterpretation interpretation) {
+ switch (interpretation) {
+ case ChannelInterpretation::SPEAKERS:
+ return "speakers";
+ case ChannelInterpretation::DISCRETE:
+ return "discrete";
+ default:
+ throw std::invalid_argument("Unknown channel interpretation");
+ }
+}
+
+AudioBus* AudioNode::processAudio(AudioBus* outputBus, int framesToProcess) {
+ if (!isInitialized_) {
+ return outputBus;
+ }
+
+ std::size_t currentSampleFrame = context_->getCurrentSampleFrame();
+
+ // check if the node has already been processed for this rendering quantum
+ bool isAlreadyProcessed = currentSampleFrame == lastRenderedFrame_;
+
+ // Node can't use output bus if:
+ // - outputBus is not provided, which means that next node is doing a multi-node summing.
+ // - it has more than one input, which means that it has to sum all inputs using internal bus.
+ // - it has more than one output, so each output node can get the processed data without re-calculating the node.
+ bool canUseOutputBus = outputBus != 0 && inputNodes_.size() < 2 && outputNodes_.size() < 2;
+
+ if (isAlreadyProcessed) {
+ // If it was already processed in the rendering quantum, return it.
+ return audioBus_.get();
+ }
+
+ // Update the last rendered frame before processing node and its inputs.
+ lastRenderedFrame_ = currentSampleFrame;
+
+ AudioBus* processingBus = canUseOutputBus ? outputBus : audioBus_.get();
+
+ if (!canUseOutputBus) {
+ // Clear the bus before summing all connected nodes.
+ processingBus->zero();
+ }
+
+ if (inputNodes_.empty()) {
+ // If there are no connected inputs, process the node just to advance the audio params.
+ // The node will output silence anyway.
+ processNode(processingBus, framesToProcess);
+ return processingBus;
+ }
+
+ for (auto it = inputNodes_.begin(); it != inputNodes_.end(); ++it) {
+ if (!(*it)->isEnabled()) {
+ continue;
+ }
+
+ // Process first connected node, it can be directly connected to the processingBus,
+ // resulting in one less summing operation.
+ if (it == inputNodes_.begin()) {
+ AudioBus* inputBus = (*it)->processAudio(processingBus, framesToProcess);
+
+ if (inputBus != processingBus) {
+ processingBus->sum(inputBus);
+ }
+ } else {
+ // Enforce the summing to be done using the internal bus.
+ AudioBus* inputBus = (*it)->processAudio(0, framesToProcess);
+ if (inputBus) {
+ processingBus->sum(inputBus);
+ }
+ }
+ }
+
+ // Finally, process the node itself.
+ processNode(processingBus, framesToProcess);
+
+ return processingBus;
+}
+
void AudioNode::cleanup() {
outputNodes_.clear();
inputNodes_.clear();
}
-bool AudioNode::processAudio(float *audioData, int32_t numFrames) {
- bool isPlaying = false;
- for (auto &node : inputNodes_) {
- if (node->processAudio(audioData, numFrames)) {
- isPlaying = true;
- }
+void AudioNode::onInputEnabled() {
+ numberOfEnabledInputNodes_ += 1;
+
+ if (!isEnabled()) {
+ enable();
}
+}
+
+void AudioNode::onInputDisabled() {
+ numberOfEnabledInputNodes_ -= 1;
- return isPlaying;
+ if (isEnabled() && numberOfEnabledInputNodes_ == 0) {
+ disable();
+ }
+}
+
+void AudioNode::onInputConnected(AudioNode *node) {
+ inputNodes_.push_back(node);
+
+ if (node->isEnabled()) {
+ onInputEnabled();
+ }
+}
+
+void AudioNode::onInputDisconnected(AudioNode *node) {
+ auto position = std::find(inputNodes_.begin(), inputNodes_.end(), node);
+
+ if (position != inputNodes_.end()) {
+ inputNodes_.erase(position);
+ }
+
+
+ if (inputNodes_.size() > 0) {
+ return;
+ }
+
+ if (isEnabled()) {
+ node->onInputDisabled();
+ }
+
+ for (auto outputNode : outputNodes_) {
+ disconnectNode(outputNode);
+ }
}
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioNode.h b/packages/react-native-audio-api/common/cpp/core/AudioNode.h
index 0b331dc2..e769c8ad 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/AudioNode.h
@@ -3,14 +3,14 @@
#include
#include
#include
+
+#include "Constants.h"
#include "ChannelCountMode.h"
#include "ChannelInterpretation.h"
-#include "Constants.h"
-
-// channelCount always equal to 2
namespace audioapi {
+class AudioBus;
class BaseAudioContext;
class AudioNode : public std::enable_shared_from_this {
@@ -25,47 +25,50 @@ class AudioNode : public std::enable_shared_from_this {
void connect(const std::shared_ptr &node);
void disconnect(const std::shared_ptr &node);
- // Change public to protected
- virtual bool processAudio(float *audioData, int32_t numFrames);
+ bool isEnabled() const;
+ void enable();
+ void disable();
protected:
+ friend class AudioNodeManager;
+ friend class AudioDestinationNode;
+
BaseAudioContext *context_;
+ std::shared_ptr audioBus_;
+
+ int channelCount_ = CHANNEL_COUNT;
+
int numberOfInputs_ = 1;
int numberOfOutputs_ = 1;
- int channelCount_ = CHANNEL_COUNT;
+ int numberOfEnabledInputNodes_ = 0;
+
+ bool isInitialized_ = false;
+ bool isEnabled_ = true;
+
+ std::size_t lastRenderedFrame_ { SIZE_MAX };
+
ChannelCountMode channelCountMode_ = ChannelCountMode::MAX;
ChannelInterpretation channelInterpretation_ =
ChannelInterpretation::SPEAKERS;
- std::vector> inputNodes_ = {};
+ std::vector inputNodes_ = {};
std::vector> outputNodes_ = {};
private:
- static std::string toString(ChannelCountMode mode) {
- switch (mode) {
- case ChannelCountMode::MAX:
- return "max";
- case ChannelCountMode::CLAMPED_MAX:
- return "clamped-max";
- case ChannelCountMode::EXPLICIT:
- return "explicit";
- default:
- throw std::invalid_argument("Unknown channel count mode");
- }
- }
-
- static std::string toString(ChannelInterpretation interpretation) {
- switch (interpretation) {
- case ChannelInterpretation::SPEAKERS:
- return "speakers";
- case ChannelInterpretation::DISCRETE:
- return "discrete";
- default:
- throw std::invalid_argument("Unknown channel interpretation");
- }
- }
+ static std::string toString(ChannelCountMode mode);
+ static std::string toString(ChannelInterpretation interpretation);
void cleanup();
+ AudioBus* processAudio(AudioBus* outputBus, int framesToProcess);
+ virtual void processNode(AudioBus* processingBus, int framesToProcess) = 0;
+
+ void connectNode(const std::shared_ptr &node);
+ void disconnectNode(const std::shared_ptr &node);
+
+ void onInputEnabled();
+ void onInputDisabled();
+ void onInputConnected(AudioNode *node);
+ void onInputDisconnected(AudioNode *node);
};
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioNodeManager.cpp b/packages/react-native-audio-api/common/cpp/core/AudioNodeManager.cpp
new file mode 100644
index 00000000..f790e193
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/core/AudioNodeManager.cpp
@@ -0,0 +1,72 @@
+
+#include "Locker.h"
+#include "AudioNode.h"
+#include "AudioNodeManager.h"
+
+namespace audioapi {
+
+AudioNodeManager::AudioNodeManager() {}
+
+AudioNodeManager::~AudioNodeManager() {
+ audioNodesToConnect_.clear();
+ sourceNodes_.clear();
+}
+
+void AudioNodeManager::addPendingConnection(const std::shared_ptr &from, const std::shared_ptr &to, ConnectionType type) {
+ Locker lock(getGraphLock());
+
+ audioNodesToConnect_.push_back(std::make_tuple(from, to, type));
+}
+
+void AudioNodeManager::addSourceNode(const std::shared_ptr &node) {
+ Locker lock(getGraphLock());
+
+ sourceNodes_.push_back(node);
+}
+
+void AudioNodeManager::preProcessGraph() {
+ if (!Locker::tryLock(getGraphLock())) {
+ return;
+ }
+
+ settlePendingConnections();
+ removeFinishedSourceNodes();
+}
+
+std::mutex& AudioNodeManager::getGraphLock() {
+ return graphLock_;
+}
+
+void AudioNodeManager::settlePendingConnections() {
+ for (auto& connection : audioNodesToConnect_) {
+ std::shared_ptr from = std::get<0>(connection);
+ std::shared_ptr to = std::get<1>(connection);
+ ConnectionType type = std::get<2>(connection);
+
+ if (type == ConnectionType::CONNECT) {
+ from->connectNode(to);
+ } else {
+ from->disconnectNode(to);
+ }
+ }
+
+ audioNodesToConnect_.clear();
+}
+
+void AudioNodeManager::removeFinishedSourceNodes() {
+ for (auto it = sourceNodes_.begin(); it != sourceNodes_.end();) {
+ auto currentNode = it->get();
+ // Release the source node if use count is equal to 1 (this vector)
+ if (!currentNode->isEnabled() && it->use_count() == 1) {
+ for (auto& outputNode : currentNode->outputNodes_) {
+ currentNode->disconnectNode(outputNode);
+ }
+
+ it = sourceNodes_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioNodeManager.h b/packages/react-native-audio-api/common/cpp/core/AudioNodeManager.h
new file mode 100644
index 00000000..6a06de99
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/core/AudioNodeManager.h
@@ -0,0 +1,35 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+namespace audioapi {
+
+class AudioNode;
+
+class AudioNodeManager {
+ public:
+ enum class ConnectionType { CONNECT, DISCONNECT };
+ AudioNodeManager();
+ ~AudioNodeManager();
+
+ void preProcessGraph();
+ void addPendingConnection(const std::shared_ptr &from, const std::shared_ptr &to, ConnectionType type);
+
+ void addSourceNode(const std::shared_ptr &node);
+
+ std::mutex& getGraphLock();
+
+ private:
+ std::mutex graphLock_;
+
+ std::vector> sourceNodes_;
+ std::vector, std::shared_ptr, ConnectionType>> audioNodesToConnect_;
+
+ void settlePendingConnections();
+ void removeFinishedSourceNodes();
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.cpp b/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.cpp
index 741e09c1..90926f24 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.cpp
@@ -1,14 +1,19 @@
-#include "AudioScheduledSourceNode.h"
#include "BaseAudioContext.h"
+#include "AudioNodeManager.h"
+#include "AudioScheduledSourceNode.h"
namespace audioapi {
AudioScheduledSourceNode::AudioScheduledSourceNode(BaseAudioContext *context)
- : AudioNode(context), isPlaying_(false) {
+ : AudioNode(context), playbackState_(PlaybackState::UNSCHEDULED) {
numberOfInputs_ = 0;
+ isInitialized_ = true;
}
void AudioScheduledSourceNode::start(double time) {
+ context_->getNodeManager()->addSourceNode(shared_from_this());
+
+ playbackState_ = PlaybackState::SCHEDULED;
waitAndExecute(time, [this](double time) { startPlayback(); });
}
@@ -16,12 +21,21 @@ void AudioScheduledSourceNode::stop(double time) {
waitAndExecute(time, [this](double time) { stopPlayback(); });
}
+bool AudioScheduledSourceNode::isPlaying() {
+ return playbackState_ == PlaybackState::PLAYING;
+}
+
+bool AudioScheduledSourceNode::isFinished() {
+ return playbackState_ == PlaybackState::FINISHED;
+}
+
void AudioScheduledSourceNode::startPlayback() {
- isPlaying_ = true;
+ playbackState_ = PlaybackState::PLAYING;
}
void AudioScheduledSourceNode::stopPlayback() {
- isPlaying_ = false;
+ playbackState_ = PlaybackState::FINISHED;
+ disable();
}
void AudioScheduledSourceNode::waitAndExecute(
diff --git a/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.h b/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.h
index 85691aef..0f6ad72a 100644
--- a/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/AudioScheduledSourceNode.h
@@ -13,13 +13,17 @@ namespace audioapi {
class AudioScheduledSourceNode : public AudioNode {
public:
+ enum class PlaybackState { UNSCHEDULED, SCHEDULED, PLAYING, FINISHED };
explicit AudioScheduledSourceNode(BaseAudioContext *context);
void start(double time);
void stop(double time);
+ bool isFinished();
+ bool isPlaying();
+
protected:
- std::atomic isPlaying_;
+ std::atomic playbackState_;
private:
void startPlayback();
diff --git a/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.cpp b/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.cpp
index 43e01e38..367372bf 100644
--- a/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.cpp
@@ -1,5 +1,23 @@
+#ifdef ANDROID
+#include "AudioPlayer.h"
+#else
+#include "IOSAudioPlayer.h"
+#endif
+
#include "BaseAudioContext.h"
+#include "GainNode.h"
+#include "AudioBus.h"
+#include "AudioArray.h"
+#include "AudioBuffer.h"
+#include "ContextState.h"
+#include "OscillatorNode.h"
+#include "StereoPannerNode.h"
+#include "BiquadFilterNode.h"
+#include "AudioNodeManager.h"
+#include "AudioDestinationNode.h"
+#include "AudioBufferSourceNode.h"
+
namespace audioapi {
BaseAudioContext::BaseAudioContext() {
@@ -8,17 +26,13 @@ BaseAudioContext::BaseAudioContext() {
#else
audioPlayer_ = std::make_shared(this->renderAudio());
#endif
- destination_ = std::make_shared(this);
sampleRate_ = audioPlayer_->getSampleRate();
-
- auto now = std::chrono::high_resolution_clock ::now();
- contextStartTime_ =
- static_cast(std::chrono::duration_cast(
- now.time_since_epoch())
- .count());
+ bufferSizeInFrames_ = audioPlayer_->getBufferSizeInFrames();
audioPlayer_->start();
+ nodeManager_ = std::make_shared();
+ destination_ = std::make_shared(this);
}
std::string BaseAudioContext::getState() {
@@ -29,13 +43,16 @@ int BaseAudioContext::getSampleRate() const {
return sampleRate_;
}
+int BaseAudioContext::getBufferSizeInFrames() const {
+ return bufferSizeInFrames_;
+}
+
+std::size_t BaseAudioContext::getCurrentSampleFrame() const {
+ return destination_->getCurrentSampleFrame();
+}
+
double BaseAudioContext::getCurrentTime() const {
- auto now = std::chrono::high_resolution_clock ::now();
- auto currentTime =
- static_cast(std::chrono::duration_cast(
- now.time_since_epoch())
- .count());
- return (currentTime - contextStartTime_) / 1e9;
+ return destination_->getCurrentTime();
}
std::shared_ptr BaseAudioContext::getDestination() {
@@ -79,18 +96,34 @@ std::shared_ptr BaseAudioContext::createPeriodicWave(
sampleRate_, real, imag, length, disableNormalization);
}
-std::function BaseAudioContext::renderAudio() {
+std::function BaseAudioContext::renderAudio() {
if (state_ == ContextState::CLOSED) {
- return [](float *, int) {};
+ return [](AudioBus *, int) {};
}
- return [this](float *data, int frames) {
+ return [this](AudioBus* data, int frames) {
destination_->renderAudio(data, frames);
};
}
-std::shared_ptr BaseAudioContext::getBasicWaveForm(
- OscillatorType type) {
+AudioNodeManager* BaseAudioContext::getNodeManager() {
+ return nodeManager_.get();
+}
+
+std::string BaseAudioContext::toString(ContextState state) {
+ switch (state) {
+ case ContextState::SUSPENDED:
+ return "suspended";
+ case ContextState::RUNNING:
+ return "running";
+ case ContextState::CLOSED:
+ return "closed";
+ default:
+ throw std::invalid_argument("Unknown context state");
+ }
+}
+
+std::shared_ptr BaseAudioContext::getBasicWaveForm(OscillatorType type) {
switch (type) {
case OscillatorType::SINE:
if (cachedSineWave_ == nullptr) {
@@ -117,9 +150,9 @@ std::shared_ptr BaseAudioContext::getBasicWaveForm(
}
return cachedTriangleWave_;
case OscillatorType::CUSTOM:
- throw std::invalid_argument(
- "You can't get a custom wave form. You need to create it.");
+ throw std::invalid_argument("You can't get a custom wave form. You need to create it.");
break;
}
}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.h b/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.h
index 86f53201..fb9052cf 100644
--- a/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.h
+++ b/packages/react-native-audio-api/common/cpp/core/BaseAudioContext.h
@@ -1,38 +1,41 @@
#pragma once
-#include
#include
#include
-#include
#include
+#include
+#include
-#include "AudioBuffer.h"
-#include "AudioBufferSourceNode.h"
-#include "AudioDestinationNode.h"
-#include "AudioScheduledSourceNode.h"
-#include "BiquadFilterNode.h"
-#include "Constants.h"
#include "ContextState.h"
-#include "GainNode.h"
-#include "OscillatorNode.h"
#include "OscillatorType.h"
-#include "PeriodicWave.h"
-#include "StereoPannerNode.h"
+
+namespace audioapi {
+
+class AudioBus;
+class GainNode;
+class AudioBuffer;
+class PeriodicWave;
+class OscillatorNode;
+class StereoPannerNode;
+class AudioNodeManager;
+class BiquadFilterNode;
+class AudioDestinationNode;
+class AudioBufferSourceNode;
#ifdef ANDROID
-#include "AudioPlayer.h"
+class AudioPlayer;
#else
-#include "IOSAudioPlayer.h"
+class IOSAudioPlayer;
#endif
-namespace audioapi {
-
class BaseAudioContext {
public:
BaseAudioContext();
std::string getState();
[[nodiscard]] int getSampleRate() const;
[[nodiscard]] double getCurrentTime() const;
+ [[nodiscard]] int getBufferSizeInFrames() const;
+ [[nodiscard]] std::size_t getCurrentSampleFrame() const;
std::shared_ptr getDestination();
std::shared_ptr createOscillator();
@@ -40,46 +43,38 @@ class BaseAudioContext {
std::shared_ptr createStereoPanner();
std::shared_ptr createBiquadFilter();
std::shared_ptr createBufferSource();
- static std::shared_ptr
- createBuffer(int numberOfChannels, int length, int sampleRate);
+ static std::shared_ptr createBuffer(int numberOfChannels, int length, int sampleRate);
std::shared_ptr createPeriodicWave(
float *real,
float *imag,
bool disableNormalization,
int length);
-
- std::function renderAudio();
std::shared_ptr getBasicWaveForm(OscillatorType type);
+ std::function renderAudio();
+
+ AudioNodeManager* getNodeManager();
+
protected:
+ static std::string toString(ContextState state);
std::shared_ptr destination_;
+
#ifdef ANDROID
std::shared_ptr audioPlayer_;
#else
std::shared_ptr audioPlayer_;
#endif
- ContextState state_ = ContextState::RUNNING;
+
int sampleRate_;
- double contextStartTime_;
+ int bufferSizeInFrames_;
+ ContextState state_ = ContextState::RUNNING;
+ std::shared_ptr nodeManager_;
private:
std::shared_ptr cachedSineWave_ = nullptr;
std::shared_ptr cachedSquareWave_ = nullptr;
std::shared_ptr cachedSawtoothWave_ = nullptr;
std::shared_ptr cachedTriangleWave_ = nullptr;
-
- static std::string toString(ContextState state) {
- switch (state) {
- case ContextState::SUSPENDED:
- return "suspended";
- case ContextState::RUNNING:
- return "running";
- case ContextState::CLOSED:
- return "closed";
- default:
- throw std::invalid_argument("Unknown context state");
- }
- }
};
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.cpp b/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.cpp
index e658b70c..79b357ac 100644
--- a/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.cpp
@@ -1,3 +1,5 @@
+#include "AudioBus.h"
+#include "AudioArray.h"
#include "BiquadFilterNode.h"
#include "BaseAudioContext.h"
@@ -17,6 +19,7 @@ BiquadFilterNode::BiquadFilterNode(BaseAudioContext *context)
gainParam_ = std::make_shared(
context, 0.0, MIN_FILTER_GAIN, MAX_FILTER_GAIN);
type_ = BiquadFilterType::LOWPASS;
+ isInitialized_ = true;
}
std::string BiquadFilterNode::getType() {
@@ -90,12 +93,12 @@ void BiquadFilterNode::setNormalizedCoefficients(
float a0,
float a1,
float a2) {
- auto a0Inversed = 1.0f / a0;
- b0_ = b0 * a0Inversed;
- b1_ = b1 * a0Inversed;
- b2_ = b2 * a0Inversed;
- a1_ = a1 * a0Inversed;
- a2_ = a2 * a0Inversed;
+ auto a0Inverted = 1.0f / a0;
+ b0_ = b0 * a0Inverted;
+ b1_ = b1 * a0Inverted;
+ b2_ = b2 * a0Inverted;
+ a1_ = a1 * a0Inverted;
+ a2_ = a2 * a0Inverted;
}
void BiquadFilterNode::setLowpassCoefficients(float frequency, float Q) {
@@ -114,11 +117,11 @@ void BiquadFilterNode::setLowpassCoefficients(float frequency, float Q) {
float theta = M_PI * frequency;
float alpha = std::sin(theta) / (2 * g);
- float cosw = std::cos(theta);
- float beta = (1 - cosw) / 2;
+ float cosW = std::cos(theta);
+ float beta = (1 - cosW) / 2;
setNormalizedCoefficients(
- beta, 2 * beta, beta, 1 + alpha, -2 * cosw, 1 - alpha);
+ beta, 2 * beta, beta, 1 + alpha, -2 * cosW, 1 - alpha);
}
void BiquadFilterNode::setHighpassCoefficients(float frequency, float Q) {
@@ -137,11 +140,11 @@ void BiquadFilterNode::setHighpassCoefficients(float frequency, float Q) {
float theta = M_PI * frequency;
float alpha = std::sin(theta) / (2 * g);
- float cosw = std::cos(theta);
- float beta = (1 - cosw) / 2;
+ float cosW = std::cos(theta);
+ float beta = (1 - cosW) / 2;
setNormalizedCoefficients(
- beta, -2 * beta, beta, 1 + alpha, -2 * cosw, 1 - alpha);
+ beta, -2 * beta, beta, 1 + alpha, -2 * cosW, 1 - alpha);
}
void BiquadFilterNode::setBandpassCoefficients(float frequency, float Q) {
@@ -349,40 +352,34 @@ void BiquadFilterNode::applyFilter() {
}
}
-bool BiquadFilterNode::processAudio(float *audioData, int32_t numFrames) {
- if (!AudioNode::processAudio(audioData, numFrames)) {
- return false;
- }
-
+void BiquadFilterNode::processNode(AudioBus* processingBus, int framesToProcess) {
resetCoefficients();
applyFilter();
- float x1 = x1_;
- float x2 = x2_;
- float y1 = y1_;
- float y2 = y2_;
+ for (int c = 0; c < processingBus->getNumberOfChannels(); c += 1) {
+ float x1 = x1_;
+ float x2 = x2_;
+ float y1 = y1_;
+ float y2 = y2_;
- float b0 = b0_;
- float b1 = b1_;
- float b2 = b2_;
- float a1 = a1_;
- float a2 = a2_;
+ float b0 = b0_;
+ float b1 = b1_;
+ float b2 = b2_;
+ float a1 = a1_;
+ float a2 = a2_;
- for (int i = 0; i < numFrames; i++) {
- auto input = audioData[i * channelCount_];
- auto output =
- static_cast(b0 * input + b1 * x1 + b2 * x2 - a1 * y1 - a2 * y2);
+ for (int i = 0; i < framesToProcess; i += 1) {
+ float input = (*processingBus->getChannel(c))[i];
+ float output = b0 * input + b1 * x1 + b2 * x2 - a1 * y1 - a2 * y2;
- for (int j = 0; j < channelCount_; j++) {
- audioData[i * channelCount_ + j] = output;
- }
+ (*processingBus->getChannel(c))[i] = output;
- x2 = x1;
- x1 = input;
- y2 = y1;
- y1 = output;
+ x2 = x1;
+ x1 = input;
+ y2 = y1;
+ y1 = output;
+ }
}
-
- return true;
}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.h b/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.h
index ac55d1da..8f30832d 100644
--- a/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/BiquadFilterNode.h
@@ -14,6 +14,8 @@
namespace audioapi {
+class AudioBus;
+
class BiquadFilterNode : public AudioNode {
public:
explicit BiquadFilterNode(BaseAudioContext *context);
@@ -30,7 +32,7 @@ class BiquadFilterNode : public AudioNode {
std::vector &phaseResponseOutput);
protected:
- bool processAudio(float *audioData, int32_t numFrames) override;
+ void processNode(AudioBus *processingBus, int framesToProcess) override;
private:
std::shared_ptr frequencyParam_;
diff --git a/packages/react-native-audio-api/common/cpp/core/GainNode.cpp b/packages/react-native-audio-api/common/cpp/core/GainNode.cpp
index 2435b385..9e43ff14 100644
--- a/packages/react-native-audio-api/common/cpp/core/GainNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/GainNode.cpp
@@ -1,30 +1,30 @@
#include "GainNode.h"
+#include "AudioBus.h"
+#include "AudioArray.h"
#include "BaseAudioContext.h"
namespace audioapi {
GainNode::GainNode(BaseAudioContext *context) : AudioNode(context) {
gainParam_ = std::make_shared(context, 1.0, -MAX_GAIN, MAX_GAIN);
+ isInitialized_ = true;
}
std::shared_ptr GainNode::getGainParam() const {
return gainParam_;
}
-bool GainNode::processAudio(float *audioData, int32_t numFrames) {
- if (!AudioNode::processAudio(audioData, numFrames)) {
- return false;
- }
+void GainNode::processNode(AudioBus *processingBus, int framesToProcess) {
+ double time = context_->getCurrentTime();
+ double deltaTime = 1.0 / context_->getSampleRate();
- auto time = context_->getCurrentTime();
- auto deltaTime = 1.0 / context_->getSampleRate();
+ for (int i = 0; i < framesToProcess; i += 1) {
+ for (int j = 0; j < processingBus->getNumberOfChannels(); j += 1) {
+ (*processingBus->getChannel(j))[i] *= gainParam_->getValueAtTime(time);
+ }
- for (int i = 0; i < numFrames * channelCount_; i++) {
- audioData[i] *= gainParam_->getValueAtTime(time);
time += deltaTime;
}
-
- return true;
}
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/GainNode.h b/packages/react-native-audio-api/common/cpp/core/GainNode.h
index 7a74c203..ee387791 100644
--- a/packages/react-native-audio-api/common/cpp/core/GainNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/GainNode.h
@@ -7,6 +7,8 @@
namespace audioapi {
+class AudioBus;
+
class GainNode : public AudioNode {
public:
explicit GainNode(BaseAudioContext *context);
@@ -14,7 +16,7 @@ class GainNode : public AudioNode {
[[nodiscard]] std::shared_ptr getGainParam() const;
protected:
- bool processAudio(float *audioData, int32_t numFrames) override;
+ void processNode(AudioBus *processingBus, int framesToProcess) override;
private:
std::shared_ptr gainParam_;
diff --git a/packages/react-native-audio-api/common/cpp/core/OscillatorNode.cpp b/packages/react-native-audio-api/common/cpp/core/OscillatorNode.cpp
index 92e61156..f8bbe403 100644
--- a/packages/react-native-audio-api/common/cpp/core/OscillatorNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/OscillatorNode.cpp
@@ -1,3 +1,5 @@
+#include "AudioBus.h"
+#include "AudioArray.h"
#include "OscillatorNode.h"
#include "BaseAudioContext.h"
@@ -11,6 +13,7 @@ OscillatorNode::OscillatorNode(BaseAudioContext *context)
std::make_shared(context, 0.0, -MAX_DETUNE, MAX_DETUNE);
type_ = OscillatorType::SINE;
periodicWave_ = context_->getBasicWaveForm(type_);
+ isInitialized_ = true;
}
std::shared_ptr OscillatorNode::getFrequencyParam() const {
@@ -36,17 +39,18 @@ void OscillatorNode::setPeriodicWave(
type_ = OscillatorType::CUSTOM;
}
-bool OscillatorNode::processAudio(float *audioData, int32_t numFrames) {
- if (!isPlaying_) {
- return false;
+void OscillatorNode::processNode(AudioBus* processingBus, int framesToProcess) {
+ if (!isPlaying()) {
+ processingBus->zero();
+ return;
}
- auto time = context_->getCurrentTime();
- auto deltaTime = 1.0 / context_->getSampleRate();
+ double time = context_->getCurrentTime();
+ double deltaTime = 1.0 / context_->getSampleRate();
- for (int i = 0; i < numFrames; ++i) {
+ for (int i = 0; i < framesToProcess; i += 1) {
auto detuneRatio =
- std::pow(2.0f, detuneParam_->getValueAtTime(time) / 1200.0f);
+ std::pow(2.0f, detuneParam_->getValueAtTime(time) / 1200.0f);
auto detunedFrequency =
round(frequencyParam_->getValueAtTime(time) * detuneRatio);
auto phaseIncrement = detunedFrequency * periodicWave_->getScale();
@@ -54,8 +58,8 @@ bool OscillatorNode::processAudio(float *audioData, int32_t numFrames) {
float sample =
periodicWave_->getSample(detunedFrequency, phase_, phaseIncrement);
- for (int j = 0; j < channelCount_; j++) {
- audioData[i * channelCount_ + j] = sample;
+ for (int j = 0; j < processingBus->getNumberOfChannels(); j += 1) {
+ (*processingBus->getChannel(j))[i] = sample;
}
phase_ += phaseIncrement;
@@ -66,7 +70,6 @@ bool OscillatorNode::processAudio(float *audioData, int32_t numFrames) {
time += deltaTime;
}
-
- return true;
}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/OscillatorNode.h b/packages/react-native-audio-api/common/cpp/core/OscillatorNode.h
index 2e04a9fe..e87a9a44 100644
--- a/packages/react-native-audio-api/common/cpp/core/OscillatorNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/OscillatorNode.h
@@ -5,12 +5,14 @@
#include
#include "AudioParam.h"
-#include "AudioScheduledSourceNode.h"
-#include "OscillatorType.h"
#include "PeriodicWave.h"
+#include "OscillatorType.h"
+#include "AudioScheduledSourceNode.h"
namespace audioapi {
+class AudioBus;
+
class OscillatorNode : public AudioScheduledSourceNode {
public:
explicit OscillatorNode(BaseAudioContext *context);
@@ -22,7 +24,7 @@ class OscillatorNode : public AudioScheduledSourceNode {
void setPeriodicWave(const std::shared_ptr &periodicWave);
protected:
- bool processAudio(float *audioData, int32_t numFrames) override;
+ void processNode(AudioBus *processingBus, int framesToProcess) override;
private:
std::shared_ptr frequencyParam_;
diff --git a/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.cpp b/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.cpp
index 8958adf4..1b89d805 100644
--- a/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.cpp
+++ b/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.cpp
@@ -1,3 +1,6 @@
+#include "AudioBus.h"
+#include "Constants.h"
+#include "AudioArray.h"
#include "StereoPannerNode.h"
#include "BaseAudioContext.h"
@@ -9,42 +12,45 @@ StereoPannerNode::StereoPannerNode(BaseAudioContext *context)
: AudioNode(context) {
channelCountMode_ = ChannelCountMode::CLAMPED_MAX;
panParam_ = std::make_shared(context, 0.0, -MAX_PAN, MAX_PAN);
+ isInitialized_ = true;
}
std::shared_ptr StereoPannerNode::getPanParam() const {
return panParam_;
}
-bool StereoPannerNode::processAudio(float *audioData, int32_t numFrames) {
- // assumed channelCount = 2
- if (!AudioNode::processAudio(audioData, numFrames)) {
- return false;
- }
+void StereoPannerNode::processNode(AudioBus* processingBus, int framesToProcess) {
+ // TODO: Currently assumed channelCount is 2
+ // it should:
+ // - support mono-channel buses
+ // - throw errors when trying to setup stereo panner with more than 2 channels
+
+ double time = context_->getCurrentTime();
+ double deltaTime = 1.0 / context_->getSampleRate();
- auto time = context_->getCurrentTime();
- auto deltaTime = 1.0 / context_->getSampleRate();
+ AudioArray* left = processingBus->getChannelByType(AudioBus::ChannelLeft);
+ AudioArray* right = processingBus->getChannelByType(AudioBus::ChannelRight);
- for (int i = 0; i < numFrames; i++) {
- auto pan = panParam_->getValueAtTime(time);
- auto x = (pan <= 0 ? pan + 1 : pan) * M_PI / 2;
+ for (int i = 0; i < framesToProcess; i += 1) {
+ float pan = panParam_->getValueAtTime(time);
+ float x = (pan <= 0 ? pan + 1 : pan) * M_PI / 2;
- auto gainL = static_cast(cos(x));
- auto gainR = static_cast(sin(x));
+ float gainL = static_cast(cos(x));
+ float gainR = static_cast(sin(x));
- auto inputL = audioData[i * 2];
- auto inputR = audioData[i * 2 + 1];
+ float inputL = (*left)[i];
+ float inputR = (*right)[i];
if (pan <= 0) {
- audioData[i * 2] = inputL + inputR * gainL;
- audioData[i * 2 + 1] = inputR * gainR;
+ (*left)[i] = inputL + inputR * gainL;
+ (*right)[i] = inputR * gainR;
} else {
- audioData[i * 2] = inputL * gainL;
- audioData[i * 2 + 1] = inputR + inputL * gainR;
+ (*left)[i] = inputL * gainL;
+ (*right)[i] = inputR + inputL * gainR;
}
time += deltaTime;
}
-
- return true;
}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.h b/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.h
index 10c436f8..b3550e3d 100644
--- a/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.h
+++ b/packages/react-native-audio-api/common/cpp/core/StereoPannerNode.h
@@ -5,10 +5,11 @@
#include "AudioNode.h"
#include "AudioParam.h"
-#include "VectorMath.h"
namespace audioapi {
+class AudioBus;
+
class StereoPannerNode : public AudioNode {
public:
explicit StereoPannerNode(BaseAudioContext *context);
@@ -16,7 +17,7 @@ class StereoPannerNode : public AudioNode {
[[nodiscard]] std::shared_ptr getPanParam() const;
protected:
- bool processAudio(float *audioData, int32_t numFrames) override;
+ void processNode(AudioBus* processingBus, int framesToProcess) override;
private:
std::shared_ptr panParam_;
diff --git a/packages/react-native-audio-api/common/cpp/utils/Locker.h b/packages/react-native-audio-api/common/cpp/utils/Locker.h
new file mode 100644
index 00000000..8764be38
--- /dev/null
+++ b/packages/react-native-audio-api/common/cpp/utils/Locker.h
@@ -0,0 +1,47 @@
+#pragma once
+
+#include
+
+namespace audioapi {
+
+// Small easy interface to manage locking
+class Locker {
+ public:
+ Locker(): lockPtr_(0) {}
+ explicit Locker(std::mutex& lockPtr): lockPtr_(&lockPtr) {
+ lock();
+ }
+
+ ~Locker() {
+ unlock();
+ }
+
+ explicit operator bool() const { return !!lockPtr_; }
+
+ void lock() {
+ if (lockPtr_) {
+ lockPtr_->lock();
+ }
+ }
+
+ void unlock() {
+ if (lockPtr_) {
+ lockPtr_->unlock();
+ }
+ }
+
+ static Locker tryLock(std::mutex& lock) {
+ Locker result = Locker();
+
+ if (lock.try_lock()) {
+ result.lockPtr_ = &lock;
+ }
+
+ return result;
+ }
+
+ private:
+ std::mutex* lockPtr_;
+};
+
+} // namespace audioapi
diff --git a/packages/react-native-audio-api/common/cpp/utils/VectorMath.cpp b/packages/react-native-audio-api/common/cpp/utils/VectorMath.cpp
index 859b4289..4195b10a 100644
--- a/packages/react-native-audio-api/common/cpp/utils/VectorMath.cpp
+++ b/packages/react-native-audio-api/common/cpp/utils/VectorMath.cpp
@@ -77,7 +77,7 @@ void add(
numberOfElementsToProcess);
}
-void substract(
+void subtract(
const float *inputVector1,
const float *inputVector2,
float *outputVector,
@@ -115,6 +115,10 @@ float maximumMagnitude(
return maximumValue;
}
+void multiplyByScalarThenAddToOutput(const float* inputVector, float scalar, float* outputVector, size_t numberOfElementsToProcess) {
+ vDSP_vsma(inputVector, 1, &scalar, outputVector, 1, outputVector, 1, numberOfElementsToProcess);
+}
+
#else
#if defined(HAVE_X86_SSE2)
@@ -363,7 +367,7 @@ void add(
}
}
-void substract(
+void subtract(
const float *inputVector1,
const float *inputVector2,
float *outputVector,
@@ -578,7 +582,7 @@ float maximumMagnitude(
max = std::max(max, groupMaxP[3]);
n = tailFrames;
-#elif defined(HAVE_ARM_NEON_INTRINSICS)
+#elif defined(c)
size_t tailFrames = n % 4;
const float *endP = inputVector + n - tailFrames;
@@ -605,5 +609,70 @@ float maximumMagnitude(
return max;
}
+void multiplyByScalarThenAddToOutput(const float* inputVector, float scalar, float* outputVector, size_t numberOfElementsToProcess) {
+ size_t n = numberOfElementsToProcess;
+
+#if HAVE_X86_SSE2
+ // If the inputVector address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
+ while (!is16ByteAligned(inputVector) && n) {
+ *outputVector += scalar * *inputVector;
+ inputVector++;
+ outputVector++;
+ n--;
+ }
+
+ // Now the inputVector is aligned, use SSE.
+ size_t tailFrames = n % 4;
+ const float* endP = outputVector + n - tailFrames;
+
+ __m128 pSource;
+ __m128 dest;
+ __m128 temp;
+ __m128 mScale = _mm_set_ps1(scalar);
+
+ bool destAligned = is16ByteAligned(outputVector);
+
+#define SSE2_MULT_ADD(loadInstr, storeInstr) \
+ while (outputVector < endP) \
+ { \
+ pSource = _mm_load_ps(inputVector); \
+ temp = _mm_mul_ps(pSource, mScale); \
+ dest = _mm_##loadInstr##_ps(outputVector); \
+ dest = _mm_add_ps(dest, temp); \
+ _mm_##storeInstr##_ps(outputVector, dest); \
+ inputVector += 4; \
+ outputVector += 4; \
+ }
+
+ if (destAligned)
+ SSE2_MULT_ADD(load, store)
+ else
+ SSE2_MULT_ADD(loadu, storeu)
+
+ n = tailFrames;
+#elif HAVE_ARM_NEON_INTRINSICS
+ size_t tailFrames = n % 4;
+ const float* endP = outputVector + n - tailFrames;
+
+ float32x4_t k = vdupq_n_f32(scalar);
+ while (outputVector < endP) {
+ float32x4_t source = vld1q_f32(inputVector);
+ float32x4_t dest = vld1q_f32(outputVector);
+
+ dest = vmlaq_f32(dest, source, k);
+ vst1q_f32(outputVector, dest);
+
+ inputVector += 4;
+ outputVector += 4;
+ }
+ n = tailFrames;
+#endif
+ while (n--) {
+ *outputVector += *inputVector * scalar;
+ ++inputVector;
+ ++outputVector;
+ }
+}
+
#endif
} // namespace audioapi::VectorMath
diff --git a/packages/react-native-audio-api/common/cpp/utils/VectorMath.h b/packages/react-native-audio-api/common/cpp/utils/VectorMath.h
index a7507e0c..c29f58f8 100644
--- a/packages/react-native-audio-api/common/cpp/utils/VectorMath.h
+++ b/packages/react-native-audio-api/common/cpp/utils/VectorMath.h
@@ -32,6 +32,12 @@
namespace audioapi::VectorMath {
+void multiplyByScalarThenAddToOutput(
+ const float *inputVector,
+ float scalar,
+ float *outputVector,
+ size_t numberOfElementsToProcess);
+
void multiplyByScalar(
const float *inputVector,
float scalar,
@@ -47,7 +53,7 @@ void add(
const float *inputVector2,
float *outputVector,
size_t numberOfElementsToProcess);
-void substract(
+void subtract(
const float *inputVector1,
const float *inputVector2,
float *outputVector,
diff --git a/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.h b/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.h
index d8b9a9d0..df6a6ebb 100644
--- a/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.h
+++ b/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.h
@@ -3,7 +3,7 @@
#import
#import
-typedef void (^RenderAudioBlock)(float *audioData, int numFrames);
+typedef void (^RenderAudioBlock)(AudioBufferList* outputBuffer, int numFrames);
@interface AudioPlayer : NSObject
@@ -12,12 +12,13 @@ typedef void (^RenderAudioBlock)(float *audioData, int numFrames);
@property (nonatomic, strong) AVAudioFormat *format;
@property (nonatomic, strong) AVAudioSourceNode *sourceNode;
@property (nonatomic, copy) RenderAudioBlock renderAudio;
-@property (nonatomic, assign) float *buffer;
- (instancetype)initWithRenderAudioBlock:(RenderAudioBlock)renderAudio;
- (int)getSampleRate;
+- (int)getBufferSizeInFrames;
+
- (void)start;
- (void)stop;
diff --git a/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.m b/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.m
index 5b3f4932..9042163c 100644
--- a/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.m
+++ b/packages/react-native-audio-api/ios/AudioPlayer/AudioPlayer.m
@@ -42,8 +42,6 @@ - (instancetype)initWithRenderAudioBlock:(RenderAudioBlock)renderAudio
frameCount:frameCount
outputData:outputData];
}];
-
- self.buffer = nil;
}
return self;
@@ -54,11 +52,25 @@ - (int)getSampleRate
return [self.audioSession sampleRate];
}
+- (int)getBufferSizeInFrames
+{
+ // Note: might be important in the future.
+ // For some reason audioSession.IOBufferDuration is always 0.01, which for sample rate of 48k
+ // gives exactly 480 frames, while at the same time frameCount requested by AVAudioSourceEngine
+ // might vary f.e. between 555-560.
+ // preferredIOBufferDuration seems to be double the value (resulting in 960 frames),
+ // which is safer to base our internal AudioBus sizes.
+ // Buut no documentation => no guarantee :)
+ // If something is crackling when it should play silence, start here 📻
+ return (int)(self.audioSession.preferredIOBufferDuration * self.audioSession.sampleRate);
+}
+
- (void)start
{
[self.audioEngine attachNode:self.sourceNode];
[self.audioEngine connect:self.sourceNode to:self.audioEngine.mainMixerNode format:self.format];
+
if (!self.audioEngine.isRunning) {
NSError *error = nil;
if (![self.audioEngine startAndReturnError:&error]) {
@@ -88,8 +100,6 @@ - (void)cleanup
self.audioEngine = nil;
self.audioSession = nil;
self.renderAudio = nil;
-
- free(_buffer);
}
- (OSStatus)renderCallbackWithIsSilence:(BOOL *)isSilence
@@ -101,19 +111,7 @@ - (OSStatus)renderCallbackWithIsSilence:(BOOL *)isSilence
return noErr; // Ensure we have stereo output
}
- if (!self.buffer) {
- self.buffer = malloc(frameCount * 2 * sizeof(float));
- }
-
- float *leftBuffer = (float *)outputData->mBuffers[0].mData;
- float *rightBuffer = (float *)outputData->mBuffers[1].mData;
-
- self.renderAudio(self.buffer, frameCount);
-
- for (int frame = 0; frame < frameCount; frame += 1) {
- leftBuffer[frame] = self.buffer[frame * 2];
- rightBuffer[frame] = self.buffer[frame * 2 + 1];
- }
+ self.renderAudio(outputData, frameCount);
return noErr;
}
diff --git a/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.h b/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.h
index d523bba1..f8f0f5b1 100644
--- a/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.h
+++ b/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.h
@@ -11,19 +11,23 @@ typedef struct objc_object AudioPlayer;
namespace audioapi {
class AudioContext;
+class AudioBus;
class IOSAudioPlayer {
protected:
- AudioPlayer *audioPlayer_;
- std::function renderAudio_;
+ AudioBus* audioBus_;
+ AudioPlayer* audioPlayer_;
+ std::function renderAudio_;
public:
- explicit IOSAudioPlayer(const std::function &renderAudio);
+ explicit IOSAudioPlayer(const std::function &renderAudio);
~IOSAudioPlayer();
int getSampleRate() const;
+ int getBufferSizeInFrames() const;
+
void start();
void stop();
- void renderAudio(float *audioData, int32_t numFrames);
+
};
} // namespace audioapi
diff --git a/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.mm b/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.mm
index ecc2852a..7c40ab66 100644
--- a/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.mm
+++ b/packages/react-native-audio-api/ios/AudioPlayer/IOSAudioPlayer.mm
@@ -1,25 +1,37 @@
+#import
+
+#include
+#include
+#include
#include
namespace audioapi {
-IOSAudioPlayer::IOSAudioPlayer(const std::function &renderAudio) : renderAudio_(renderAudio)
+IOSAudioPlayer::IOSAudioPlayer(const std::function &renderAudio) : renderAudio_(renderAudio), audioBus_(0)
{
- RenderAudioBlock renderAudioBlock = ^(float *audioData, int numFrames) {
- renderAudio_(audioData, numFrames);
+ RenderAudioBlock renderAudioBlock = ^(AudioBufferList* outputData, int numFrames) {
+ renderAudio_(audioBus_, numFrames);
+
+ for (int i = 0; i < outputData->mNumberBuffers; i += 1) {
+ float *outputBuffer = (float *)outputData->mBuffers[i].mData;
+
+ memcpy(outputBuffer, audioBus_->getChannel(i)->getData(), sizeof(float) * numFrames);
+ }
};
audioPlayer_ = [[AudioPlayer alloc] initWithRenderAudioBlock:renderAudioBlock];
+ audioBus_ = new AudioBus(getSampleRate(), getBufferSizeInFrames(), CHANNEL_COUNT);
}
IOSAudioPlayer::~IOSAudioPlayer()
{
stop();
[audioPlayer_ cleanup];
-}
-int IOSAudioPlayer::getSampleRate() const
-{
- return [audioPlayer_ getSampleRate];
+ if (audioBus_) {
+ delete audioBus_;
+ audioBus_ = 0;
+ }
}
void IOSAudioPlayer::start()
@@ -31,4 +43,15 @@
{
return [audioPlayer_ stop];
}
+
+int IOSAudioPlayer::getSampleRate() const
+{
+ return [audioPlayer_ getSampleRate];
+}
+
+int IOSAudioPlayer::getBufferSizeInFrames() const
+{
+ return [audioPlayer_ getBufferSizeInFrames];
+}
+
} // namespace audioapi
diff --git a/packages/react-native-audio-api/src/core/AudioScheduledSourceNode.ts b/packages/react-native-audio-api/src/core/AudioScheduledSourceNode.ts
index 244e6ee4..74ce7787 100644
--- a/packages/react-native-audio-api/src/core/AudioScheduledSourceNode.ts
+++ b/packages/react-native-audio-api/src/core/AudioScheduledSourceNode.ts
@@ -22,9 +22,7 @@ export default class AudioScheduledSourceNode extends AudioNode {
}
this.hasBeenStarted = true;
-
(this.node as IAudioScheduledSourceNode).start(when);
- this.hasBeenStarted = true;
}
public stop(when: number = 0): void {