clean sweep of old now unused audio files

This commit is contained in:
Stephen Birarda 2016-02-08 15:36:33 -08:00
parent 7dee3c61f3
commit 445662f5ae
24 changed files with 13 additions and 1910 deletions

View file

@ -119,7 +119,7 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
// This improves the perceived quality of the audio slightly.
bool showDebug = false; // (randFloat() < 0.05f);
bool showDebug = false;
float repeatedFrameFadeFactor = 1.0f;
@ -361,69 +361,6 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
}
}
if (!sourceIsSelf && _enableFilter && !streamToAdd->ignorePenumbraFilter()) {
const float TWO_OVER_PI = 2.0f / PI;
const float ZERO_DB = 1.0f;
const float NEGATIVE_ONE_DB = 0.891f;
const float NEGATIVE_THREE_DB = 0.708f;
const float FILTER_GAIN_AT_0 = ZERO_DB; // source is in front
const float FILTER_GAIN_AT_90 = NEGATIVE_ONE_DB; // source is incident to left or right ear
const float FILTER_GAIN_AT_180 = NEGATIVE_THREE_DB; // source is behind
const float FILTER_CUTOFF_FREQUENCY_HZ = 1000.0f;
const float penumbraFilterFrequency = FILTER_CUTOFF_FREQUENCY_HZ; // constant frequency
const float penumbraFilterSlope = NEGATIVE_THREE_DB; // constant slope
float penumbraFilterGainL;
float penumbraFilterGainR;
// variable gain calculation broken down by quadrant
if (-bearingRelativeAngleToSource < -PI_OVER_TWO && -bearingRelativeAngleToSource > -PI) {
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_0;
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_90;
} else if (-bearingRelativeAngleToSource <= PI && -bearingRelativeAngleToSource > PI_OVER_TWO) {
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
} else if (-bearingRelativeAngleToSource <= PI_OVER_TWO && -bearingRelativeAngleToSource > 0) {
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI_OVER_TWO) + FILTER_GAIN_AT_90;
penumbraFilterGainR = FILTER_GAIN_AT_0;
} else {
penumbraFilterGainL = FILTER_GAIN_AT_0;
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource) + FILTER_GAIN_AT_0;
}
if (distanceBetween < RADIUS_OF_HEAD) {
// Diminish effect if source would be inside head
penumbraFilterGainL += (1.0f - penumbraFilterGainL) * (1.0f - distanceBetween / RADIUS_OF_HEAD);
penumbraFilterGainR += (1.0f - penumbraFilterGainR) * (1.0f - distanceBetween / RADIUS_OF_HEAD);
}
bool wantDebug = false;
if (wantDebug) {
qDebug() << "gainL=" << penumbraFilterGainL
<< "gainR=" << penumbraFilterGainR
<< "angle=" << -bearingRelativeAngleToSource;
}
// Get our per listener/source data so we can get our filter
AudioFilterHSF1s& penumbraFilter = listenerNodeData->getListenerSourcePairData(streamUUID)->getPenumbraFilter();
// set the gain on both filter channels
penumbraFilter.setParameters(0, 0, AudioConstants::SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainL, penumbraFilterSlope);
penumbraFilter.setParameters(0, 1, AudioConstants::SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainR, penumbraFilterSlope);
penumbraFilter.render(_preMixSamples, _preMixSamples, AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / 2);
}
// Actually mix the _preMixSamples into the _mixSamples here.
for (int s = 0; s < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; s++) {
_mixSamples[s] = glm::clamp(_mixSamples[s] + _preMixSamples[s], AudioConstants::MIN_SAMPLE_VALUE,

View file

@ -36,9 +36,9 @@ AudioMixerClientData::~AudioMixerClientData() {
}
// clean up our pair data...
foreach(PerListenerSourcePairData* pairData, _listenerSourcePairData) {
delete pairData;
}
// foreach(PerListenerSourcePairData* pairData, _listenerSourcePairData) {
// delete pairData;
// }
}
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() const {
@ -333,12 +333,3 @@ void AudioMixerClientData::printAudioStreamStats(const AudioStreamStats& streamS
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
}
PerListenerSourcePairData* AudioMixerClientData::getListenerSourcePairData(const QUuid& sourceUUID) {
if (!_listenerSourcePairData.contains(sourceUUID)) {
PerListenerSourcePairData* newData = new PerListenerSourcePairData();
_listenerSourcePairData[sourceUUID] = newData;
}
return _listenerSourcePairData[sourceUUID];
}

View file

@ -15,25 +15,10 @@
#include <QtCore/QJsonObject>
#include <AABox.h>
#include <AudioFormat.h> // For AudioFilterHSF1s and _penumbraFilter
#include <AudioBuffer.h> // For AudioFilterHSF1s and _penumbraFilter
#include <AudioFilter.h> // For AudioFilterHSF1s and _penumbraFilter
#include <AudioFilterBank.h> // For AudioFilterHSF1s and _penumbraFilter
#include "PositionalAudioStream.h"
#include "AvatarAudioStream.h"
class PerListenerSourcePairData {
public:
PerListenerSourcePairData() {
_penumbraFilter.initialize(AudioConstants::SAMPLE_RATE, AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / 2);
};
AudioFilterHSF1s& getPenumbraFilter() { return _penumbraFilter; }
private:
AudioFilterHSF1s _penumbraFilter;
};
class AudioMixerClientData : public NodeData {
public:
AudioMixerClientData();
@ -57,16 +42,12 @@ public:
void printUpstreamDownstreamStats() const;
PerListenerSourcePairData* getListenerSourcePairData(const QUuid& sourceUUID);
private:
void printAudioStreamStats(const AudioStreamStats& streamStats) const;
private:
QHash<QUuid, PositionalAudioStream*> _audioStreams; // mic stream stored under key of null UUID
// TODO: how can we prune this hash when a stream is no longer present?
QHash<QUuid, PerListenerSourcePairData*> _listenerSourcePairData;
quint16 _outgoingMixedAudioSequenceNumber;
AudioStreamStats _downstreamAudioStreamStats;

View file

@ -52,7 +52,7 @@ function setupMenus() {
}
if (!Menu.menuExists(ENTITIES_MENU)) {
Menu.addMenu(ENTITIES_MENU);
// NOTE: these menu items aren't currently working. I've temporarily removed them. Will add them back once we
// rewire these to work
/*
@ -66,20 +66,20 @@ function setupMenus() {
Menu.addMenuItem({ menuName: "Developer > Entities", menuItemName: "Disable Light Entities", isCheckable: true, isChecked: false });
*/
}
if (!Menu.menuExists(RENDER_MENU)) {
Menu.addMenu(RENDER_MENU);
createdRenderMenu = true;
}
if (!Menu.menuItemExists(RENDER_MENU, ENTITIES_ITEM)) {
Menu.addMenuItem({ menuName: RENDER_MENU, menuItemName: ENTITIES_ITEM, isCheckable: true, isChecked: Scene.shouldRenderEntities })
}
if (!Menu.menuItemExists(RENDER_MENU, AVATARS_ITEM)) {
Menu.addMenuItem({ menuName: RENDER_MENU, menuItemName: AVATARS_ITEM, isCheckable: true, isChecked: Scene.shouldRenderAvatars })
}
if (!Menu.menuExists(AUDIO_MENU)) {
Menu.addMenu(AUDIO_MENU);
}
@ -114,14 +114,6 @@ Menu.menuItemEvent.connect(function (menuItem) {
Scene.shouldRenderEntities = Menu.isOptionChecked(ENTITIES_ITEM);
} else if (menuItem == AVATARS_ITEM) {
Scene.shouldRenderAvatars = Menu.isOptionChecked(AVATARS_ITEM);
} else if (menuItem == AUDIO_SOURCE_INJECT && !createdGeneratedAudioMenu) {
Audio.injectGeneratedNoise(Menu.isOptionChecked(AUDIO_SOURCE_INJECT));
} else if (menuItem == AUDIO_SOURCE_PINK_NOISE && !createdGeneratedAudioMenu) {
Audio.selectPinkNoise();
Menu.setIsOptionChecked(AUDIO_SOURCE_SINE_440, false);
} else if (menuItem == AUDIO_SOURCE_SINE_440 && !createdGeneratedAudioMenu) {
Audio.selectSine440();
Menu.setIsOptionChecked(AUDIO_SOURCE_PINK_NOISE, false);
} else if (menuItem == AUDIO_STEREO_INPUT) {
Audio.setStereoInput(Menu.isOptionChecked(AUDIO_STEREO_INPUT))
} else if (AUDIO_LISTENER_OPTIONS.indexOf(menuItem) !== -1) {
@ -145,14 +137,14 @@ Scene.shouldRenderEntitiesChanged.connect(function(shouldRenderEntities) {
function scriptEnding() {
Menu.removeMenu(ENTITIES_MENU);
if (createdRenderMenu) {
Menu.removeMenu(RENDER_MENU);
} else {
Menu.removeMenuItem(RENDER_MENU, ENTITIES_ITEM);
Menu.removeMenuItem(RENDER_MENU, AVATARS_ITEM);
}
if (createdGeneratedAudioMenu) {
Audio.injectGeneratedNoise(false);
Menu.removeMenuItem(AUDIO_MENU, AUDIO_SOURCE_INJECT);

View file

@ -94,14 +94,11 @@ AudioClient::AudioClient() :
_shouldEchoLocally(false),
_shouldEchoToServer(false),
_isNoiseGateEnabled(true),
_audioSourceInjectEnabled(false),
_reverb(false),
_reverbOptions(&_scriptReverbOptions),
_inputToNetworkResampler(NULL),
_networkToOutputResampler(NULL),
_loopbackResampler(NULL),
_noiseSourceEnabled(false),
_toneSourceEnabled(true),
_outgoingAvatarAudioSequenceNumber(0),
_audioOutputIODevice(_receivedAudioStream, this),
_stats(&_receivedAudioStream),
@ -139,10 +136,6 @@ AudioClient::~AudioClient() {
void AudioClient::reset() {
_receivedAudioStream.reset();
_stats.reset();
_noiseSource.reset();
_toneSource.reset();
_sourceGain.reset();
_inputGain.reset();
_sourceReverb.reset();
_listenerReverb.reset();
}
@ -432,26 +425,9 @@ void AudioClient::start() {
qCDebug(audioclient) << "Unable to set up audio output because of a problem with output format.";
qCDebug(audioclient) << "The closest format available is" << outputDeviceInfo.nearestFormat(_desiredOutputFormat);
}
if (_audioInput) {
_inputFrameBuffer.initialize( _inputFormat.channelCount(), _audioInput->bufferSize() * 8 );
}
_inputGain.initialize();
_sourceGain.initialize();
_noiseSource.initialize();
_toneSource.initialize();
_sourceGain.setParameters(0.05f, 0.0f);
_inputGain.setParameters(1.0f, 0.0f);
}
void AudioClient::stop() {
_inputFrameBuffer.finalize();
_inputGain.finalize();
_sourceGain.finalize();
_noiseSource.finalize();
_toneSource.finalize();
// "switch" to invalid devices in order to shut down the state
switchInputToAudioDevice(QAudioDeviceInfo());
switchOutputToAudioDevice(QAudioDeviceInfo());
@ -705,24 +681,6 @@ void AudioClient::handleAudioInput() {
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
QByteArray inputByteArray = _inputDevice->readAll();
// Add audio source injection if enabled
if (!_muted && _audioSourceInjectEnabled) {
int16_t* inputFrameData = (int16_t*)inputByteArray.data();
const uint32_t inputFrameCount = inputByteArray.size() / sizeof(int16_t);
_inputFrameBuffer.copyFrames(1, inputFrameCount, inputFrameData, false /*copy in*/);
#if ENABLE_INPUT_GAIN
_inputGain.render(_inputFrameBuffer); // input/mic gain+mute
#endif
if (_toneSourceEnabled) { // sine generator
_toneSource.render(_inputFrameBuffer);
} else if(_noiseSourceEnabled) { // pink noise generator
_noiseSource.render(_inputFrameBuffer);
}
_sourceGain.render(_inputFrameBuffer); // post gain
_inputFrameBuffer.copyFrames(1, inputFrameCount, inputFrameData, true /*copy out*/);
}
handleLocalEchoAndReverb(inputByteArray);
@ -757,12 +715,12 @@ void AudioClient::handleAudioInput() {
_inputFormat, _desiredInputFormat);
// Remove DC offset
if (!_isStereoInput && !_audioSourceInjectEnabled) {
if (!_isStereoInput) {
_inputGate.removeDCOffset(networkAudioSamples, numNetworkSamples);
}
// only impose the noise gate and perform tone injection if we are sending mono audio
if (!_isStereoInput && !_audioSourceInjectEnabled && _isNoiseGateEnabled) {
if (!_isStereoInput && _isNoiseGateEnabled) {
_inputGate.gateSamples(networkAudioSamples, numNetworkSamples);
// if we performed the noise gate we can get values from it instead of enumerating the samples again
@ -886,19 +844,6 @@ void AudioClient::setIsStereoInput(bool isStereoInput) {
}
}
void AudioClient::enableAudioSourceInject(bool enable) {
_audioSourceInjectEnabled = enable;
}
void AudioClient::selectAudioSourcePinkNoise() {
_noiseSourceEnabled = true;
_toneSourceEnabled = false;
}
void AudioClient::selectAudioSourceSine440() {
_toneSourceEnabled = true;
_noiseSourceEnabled = false;
}
bool AudioClient::outputLocalInjector(bool isStereo, AudioInjector* injector) {
if (injector->getLocalBuffer()) {

View file

@ -25,13 +25,7 @@
#include <QtMultimedia/QAudioInput>
#include <AbstractAudioInterface.h>
#include <AudioBuffer.h>
#include <AudioEffectOptions.h>
#include <AudioFormat.h>
#include <AudioGain.h>
#include <AudioRingBuffer.h>
#include <AudioSourceTone.h>
#include <AudioSourceNoise.h>
#include <AudioStreamStats.h>
#include <DependencyManager.h>
@ -152,10 +146,6 @@ public slots:
void audioMixerKilled();
void toggleMute();
virtual void enableAudioSourceInject(bool enable);
virtual void selectAudioSourcePinkNoise();
virtual void selectAudioSourceSine440();
virtual void setIsStereoInput(bool stereo);
void toggleAudioNoiseReduction() { _isNoiseGateEnabled = !_isNoiseGateEnabled; }
@ -256,7 +246,6 @@ private:
bool _shouldEchoLocally;
bool _shouldEchoToServer;
bool _isNoiseGateEnabled;
bool _audioSourceInjectEnabled;
bool _reverb;
AudioEffectOptions _scriptReverbOptions;
@ -284,23 +273,6 @@ private:
int calculateNumberOfFrameSamples(int numBytes) const;
float calculateDeviceToNetworkInputRatio() const;
// Input framebuffer
AudioBufferFloat32 _inputFrameBuffer;
// Input gain
AudioGain _inputGain;
// Post tone/pink noise generator gain
AudioGain _sourceGain;
// Pink noise source
bool _noiseSourceEnabled;
AudioSourcePinkNoise _noiseSource;
// Tone source
bool _toneSourceEnabled;
AudioSourceTone _toneSource;
quint16 _outgoingAvatarAudioSequenceNumber;
AudioOutputIODevice _audioOutputIODevice;

View file

@ -33,10 +33,6 @@ public:
public slots:
virtual bool outputLocalInjector(bool isStereo, AudioInjector* injector) = 0;
virtual void enableAudioSourceInject(bool enable) = 0;
virtual void selectAudioSourcePinkNoise() = 0;
virtual void selectAudioSourceSine440() = 0;
virtual void setIsStereoInput(bool stereo) = 0;
};

View file

@ -1,462 +0,0 @@
//
// AudioBuffer.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/29/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioBuffer_h
#define hifi_AudioBuffer_h
#include <typeinfo>
#include <QDebug>
#include "AudioFormat.h"
template< typename T >
class AudioFrameBuffer {
protected:
uint32_t _channelCount;
uint32_t _channelCountMax;
uint32_t _frameCount;
uint32_t _frameCountMax;
T** _frameBuffer;
void allocateFrames();
void deallocateFrames();
public:
AudioFrameBuffer();
AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount);
virtual ~AudioFrameBuffer();
void initialize(const uint32_t channelCount, const uint32_t frameCount);
void finalize();
T**& getFrameData();
uint32_t getChannelCount();
uint32_t getFrameCount();
template< typename S >
void copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut = false);
void zeroFrames();
};
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer() :
_channelCount(0),
_frameCount(0),
_frameCountMax(0),
_frameBuffer(NULL) {
}
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount) :
_channelCount(channelCount),
_channelCountMax(channelCount),
_frameCount(frameCount),
_frameCountMax(frameCount),
_frameBuffer(NULL) {
allocateFrames();
}
template< typename T >
AudioFrameBuffer< T >::~AudioFrameBuffer() {
finalize();
}
template< typename T >
void AudioFrameBuffer< T >::allocateFrames() {
_frameBuffer = new T*[_channelCountMax];
for (uint32_t i = 0; i < _channelCountMax; ++i) {
_frameBuffer[i] = new T[_frameCountMax];
}
}
template< typename T >
void AudioFrameBuffer< T >::deallocateFrames() {
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
delete[] _frameBuffer[i];
}
delete[] _frameBuffer;
}
_frameBuffer = NULL;
}
template< typename T >
void AudioFrameBuffer< T >::initialize(const uint32_t channelCount, const uint32_t frameCount) {
if (_frameBuffer) {
finalize();
}
_channelCount = channelCount;
_channelCountMax = channelCount;
_frameCount = frameCount;
_frameCountMax = frameCount;
allocateFrames();
}
template< typename T >
void AudioFrameBuffer< T >::finalize() {
deallocateFrames();
_channelCount = 0;
_channelCountMax = 0;
_frameCount = 0;
_frameCountMax = 0;
}
template< typename T >
inline T**& AudioFrameBuffer< T >::getFrameData() {
return _frameBuffer;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getChannelCount() {
return _channelCount;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getFrameCount() {
return _frameCount;
}
template< typename T >
inline void AudioFrameBuffer< T >::zeroFrames() {
if (!_frameBuffer) {
return;
}
for (uint32_t i = 0; i < _channelCountMax; ++i) {
memset(_frameBuffer[i], 0, sizeof(T)*_frameCountMax);
}
}
template< typename T >
template< typename S >
inline void AudioFrameBuffer< T >::copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut) {
if ( !_frameBuffer || !frames) {
return;
}
if (channelCount <=_channelCountMax && frameCount <=_frameCountMax) {
// We always allow copying fewer frames than we have allocated
_frameCount = frameCount;
_channelCount = channelCount;
} else {
qDebug() << "Audio framing error: _channelCount="
<< _channelCount
<< "channelCountMax="
<< _channelCountMax
<< "_frameCount="
<< _frameCount
<< "frameCountMax="
<< _frameCountMax;
_channelCount = std::min(_channelCount,_channelCountMax);
_frameCount = std::min(_frameCount,_frameCountMax);
}
bool frameAlignment16 = (_frameCount & 0x0F) == 0;
if (copyOut) {
S* dst = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same, just copy out
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = _frameBuffer[0][i + 0];
*dst++ = _frameBuffer[0][i + 1];
*dst++ = _frameBuffer[0][i + 2];
*dst++ = _frameBuffer[0][i + 3];
*dst++ = _frameBuffer[0][i + 4];
*dst++ = _frameBuffer[0][i + 5];
*dst++ = _frameBuffer[0][i + 6];
*dst++ = _frameBuffer[0][i + 7];
*dst++ = _frameBuffer[0][i + 8];
*dst++ = _frameBuffer[0][i + 9];
*dst++ = _frameBuffer[0][i + 10];
*dst++ = _frameBuffer[0][i + 11];
*dst++ = _frameBuffer[0][i + 12];
*dst++ = _frameBuffer[0][i + 13];
*dst++ = _frameBuffer[0][i + 14];
*dst++ = _frameBuffer[0][i + 15];
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = _frameBuffer[0][i + 0];
*dst++ = _frameBuffer[1][i + 0];
*dst++ = _frameBuffer[0][i + 1];
*dst++ = _frameBuffer[1][i + 1];
*dst++ = _frameBuffer[0][i + 2];
*dst++ = _frameBuffer[1][i + 2];
*dst++ = _frameBuffer[0][i + 3];
*dst++ = _frameBuffer[1][i + 3];
*dst++ = _frameBuffer[0][i + 4];
*dst++ = _frameBuffer[1][i + 4];
*dst++ = _frameBuffer[0][i + 5];
*dst++ = _frameBuffer[1][i + 5];
*dst++ = _frameBuffer[0][i + 6];
*dst++ = _frameBuffer[1][i + 6];
*dst++ = _frameBuffer[0][i + 7];
*dst++ = _frameBuffer[1][i + 7];
*dst++ = _frameBuffer[0][i + 8];
*dst++ = _frameBuffer[1][i + 8];
*dst++ = _frameBuffer[0][i + 9];
*dst++ = _frameBuffer[1][i + 9];
*dst++ = _frameBuffer[0][i + 10];
*dst++ = _frameBuffer[1][i + 10];
*dst++ = _frameBuffer[0][i + 11];
*dst++ = _frameBuffer[1][i + 11];
*dst++ = _frameBuffer[0][i + 12];
*dst++ = _frameBuffer[1][i + 12];
*dst++ = _frameBuffer[0][i + 13];
*dst++ = _frameBuffer[1][i + 13];
*dst++ = _frameBuffer[0][i + 14];
*dst++ = _frameBuffer[1][i + 14];
*dst++ = _frameBuffer[0][i + 15];
*dst++ = _frameBuffer[1][i + 15];
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = _frameBuffer[j][i];
}
}
}
} else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) { // source and destination aare not the same, convert from float32_t to int16_t and copy out
const int scale = (1 << ((8 * sizeof(S)) - 1));
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = (S)(_frameBuffer[0][i + 0] * scale);
*dst++ = (S)(_frameBuffer[0][i + 1] * scale);
*dst++ = (S)(_frameBuffer[0][i + 2] * scale);
*dst++ = (S)(_frameBuffer[0][i + 3] * scale);
*dst++ = (S)(_frameBuffer[0][i + 4] * scale);
*dst++ = (S)(_frameBuffer[0][i + 5] * scale);
*dst++ = (S)(_frameBuffer[0][i + 6] * scale);
*dst++ = (S)(_frameBuffer[0][i + 7] * scale);
*dst++ = (S)(_frameBuffer[0][i + 8] * scale);
*dst++ = (S)(_frameBuffer[0][i + 9] * scale);
*dst++ = (S)(_frameBuffer[0][i + 10] * scale);
*dst++ = (S)(_frameBuffer[0][i + 11] * scale);
*dst++ = (S)(_frameBuffer[0][i + 12] * scale);
*dst++ = (S)(_frameBuffer[0][i + 13] * scale);
*dst++ = (S)(_frameBuffer[0][i + 14] * scale);
*dst++ = (S)(_frameBuffer[0][i + 15] * scale);
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = (S)(_frameBuffer[0][i + 0] * scale);
*dst++ = (S)(_frameBuffer[1][i + 0] * scale);
*dst++ = (S)(_frameBuffer[0][i + 1] * scale);
*dst++ = (S)(_frameBuffer[1][i + 1] * scale);
*dst++ = (S)(_frameBuffer[0][i + 2] * scale);
*dst++ = (S)(_frameBuffer[1][i + 2] * scale);
*dst++ = (S)(_frameBuffer[0][i + 3] * scale);
*dst++ = (S)(_frameBuffer[1][i + 3] * scale);
*dst++ = (S)(_frameBuffer[0][i + 4] * scale);
*dst++ = (S)(_frameBuffer[1][i + 4] * scale);
*dst++ = (S)(_frameBuffer[0][i + 5] * scale);
*dst++ = (S)(_frameBuffer[1][i + 5] * scale);
*dst++ = (S)(_frameBuffer[0][i + 6] * scale);
*dst++ = (S)(_frameBuffer[1][i + 6] * scale);
*dst++ = (S)(_frameBuffer[0][i + 7] * scale);
*dst++ = (S)(_frameBuffer[1][i + 7] * scale);
*dst++ = (S)(_frameBuffer[0][i + 8] * scale);
*dst++ = (S)(_frameBuffer[1][i + 8] * scale);
*dst++ = (S)(_frameBuffer[0][i + 9] * scale);
*dst++ = (S)(_frameBuffer[1][i + 9] * scale);
*dst++ = (S)(_frameBuffer[0][i + 10] * scale);
*dst++ = (S)(_frameBuffer[1][i + 10] * scale);
*dst++ = (S)(_frameBuffer[0][i + 11] * scale);
*dst++ = (S)(_frameBuffer[1][i + 11] * scale);
*dst++ = (S)(_frameBuffer[0][i + 12] * scale);
*dst++ = (S)(_frameBuffer[1][i + 12] * scale);
*dst++ = (S)(_frameBuffer[0][i + 13] * scale);
*dst++ = (S)(_frameBuffer[1][i + 13] * scale);
*dst++ = (S)(_frameBuffer[0][i + 14] * scale);
*dst++ = (S)(_frameBuffer[1][i + 14] * scale);
*dst++ = (S)(_frameBuffer[0][i + 15] * scale);
*dst++ = (S)(_frameBuffer[1][i + 15] * scale);
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = (S)(_frameBuffer[j][i] * scale);
}
}
}
} else {
assert(0); // currently unsupported conversion
}
}
} else { // copyIn
S* src = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same, copy in
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = *src++;
_frameBuffer[0][i + 1] = *src++;
_frameBuffer[0][i + 2] = *src++;
_frameBuffer[0][i + 3] = *src++;
_frameBuffer[0][i + 4] = *src++;
_frameBuffer[0][i + 5] = *src++;
_frameBuffer[0][i + 6] = *src++;
_frameBuffer[0][i + 7] = *src++;
_frameBuffer[0][i + 8] = *src++;
_frameBuffer[0][i + 9] = *src++;
_frameBuffer[0][i + 10] = *src++;
_frameBuffer[0][i + 11] = *src++;
_frameBuffer[0][i + 12] = *src++;
_frameBuffer[0][i + 13] = *src++;
_frameBuffer[0][i + 14] = *src++;
_frameBuffer[0][i + 15] = *src++;
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = *src++;
_frameBuffer[1][i + 0] = *src++;
_frameBuffer[0][i + 1] = *src++;
_frameBuffer[1][i + 1] = *src++;
_frameBuffer[0][i + 2] = *src++;
_frameBuffer[1][i + 2] = *src++;
_frameBuffer[0][i + 3] = *src++;
_frameBuffer[1][i + 3] = *src++;
_frameBuffer[0][i + 4] = *src++;
_frameBuffer[1][i + 4] = *src++;
_frameBuffer[0][i + 5] = *src++;
_frameBuffer[1][i + 5] = *src++;
_frameBuffer[0][i + 6] = *src++;
_frameBuffer[1][i + 6] = *src++;
_frameBuffer[0][i + 7] = *src++;
_frameBuffer[1][i + 7] = *src++;
_frameBuffer[0][i + 8] = *src++;
_frameBuffer[1][i + 8] = *src++;
_frameBuffer[0][i + 9] = *src++;
_frameBuffer[1][i + 9] = *src++;
_frameBuffer[0][i + 10] = *src++;
_frameBuffer[1][i + 10] = *src++;
_frameBuffer[0][i + 11] = *src++;
_frameBuffer[1][i + 11] = *src++;
_frameBuffer[0][i + 12] = *src++;
_frameBuffer[1][i + 12] = *src++;
_frameBuffer[0][i + 13] = *src++;
_frameBuffer[1][i + 13] = *src++;
_frameBuffer[0][i + 14] = *src++;
_frameBuffer[1][i + 14] = *src++;
_frameBuffer[0][i + 15] = *src++;
_frameBuffer[1][i + 15] = *src++;
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = *src++;
}
}
}
} else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) { // source and destination aare not the same, convert from int16_t to float32_t and copy in
const int scale = (1 << ((8 * sizeof(S)) - 1));
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 15] = ((T)(*src++)) / scale;
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 15] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 15] = ((T)(*src++)) / scale;
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = ((T)(*src++)) / scale;
}
}
}
} else {
assert(0); // currently unsupported conversion
}
}
}
}
typedef AudioFrameBuffer< float32_t > AudioBufferFloat32;
typedef AudioFrameBuffer< int32_t > AudioBufferSInt32;
#endif // hifi_AudioBuffer_h

View file

@ -1,315 +0,0 @@
//
// AudioFilter.h
// hifi
//
// Created by Craig Hansen-Sturm on 8/9/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFilter_h
#define hifi_AudioFilter_h
#include <NumericalConstants.h>
// Implements a standard biquad filter in "Direct Form 1"
// Reference http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
//
class AudioBiquad {
//
// private data
//
float32_t _a0; // gain
float32_t _a1; // feedforward 1
float32_t _a2; // feedforward 2
float32_t _b1; // feedback 1
float32_t _b2; // feedback 2
float32_t _xm1;
float32_t _xm2;
float32_t _ym1;
float32_t _ym2;
public:
//
// ctor/dtor
//
AudioBiquad() :
_xm1(0.),
_xm2(0.),
_ym1(0.),
_ym2(0.) {
setParameters(0.,0.,0.,0.,0.);
}
~AudioBiquad() {
}
//
// public interface
//
void setParameters(const float32_t a0, const float32_t a1, const float32_t a2, const float32_t b1, const float32_t b2) {
_a0 = a0; _a1 = a1; _a2 = a2; _b1 = b1; _b2 = b2;
}
void getParameters(float32_t& a0, float32_t& a1, float32_t& a2, float32_t& b1, float32_t& b2) {
a0 = _a0; a1 = _a1; a2 = _a2; b1 = _b1; b2 = _b2;
}
void render(const float32_t* in, float32_t* out, const uint32_t frames) {
float32_t x;
float32_t y;
for (uint32_t i = 0; i < frames; ++i) {
x = *in++;
// biquad
y = (_a0 * x)
+ (_a1 * _xm1)
+ (_a2 * _xm2)
- (_b1 * _ym1)
- (_b2 * _ym2);
y = (y >= -EPSILON && y < EPSILON) ? 0.0f : y; // clamp to 0
// update delay line
_xm2 = _xm1;
_xm1 = x;
_ym2 = _ym1;
_ym1 = y;
*out++ = y;
}
}
void reset() {
_xm1 = _xm2 = _ym1 = _ym2 = 0.;
}
};
//
// Implements common base class interface for all Audio Filter Objects
//
template< class T >
class AudioFilter {
protected:
//
// data
//
AudioBiquad _kernel;
float32_t _sampleRate;
float32_t _frequency;
float32_t _gain;
float32_t _slope;
//
// helpers
//
void updateKernel() {
static_cast<T*>(this)->updateKernel();
}
public:
//
// ctor/dtor
//
AudioFilter() {
setParameters(0.,0.,0.,0.);
}
~AudioFilter() {
}
//
// public interface
//
void setParameters(const float32_t sampleRate, const float32_t frequency, const float32_t gain, const float32_t slope) {
_sampleRate = std::max(sampleRate, 1.0f);
_frequency = std::max(frequency, 2.0f);
_gain = std::max(gain, 0.0f);
_slope = std::max(slope, 0.00001f);
updateKernel();
}
void getParameters(float32_t& sampleRate, float32_t& frequency, float32_t& gain, float32_t& slope) {
sampleRate = _sampleRate; frequency = _frequency; gain = _gain; slope = _slope;
}
void render(const float32_t* in, float32_t* out, const uint32_t frames) {
_kernel.render(in,out,frames);
}
void reset() {
_kernel.reset();
}
};
//
// Implements a low-shelf filter using a biquad
//
class AudioFilterLSF : public AudioFilter< AudioFilterLSF >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t a = _gain;
const float32_t aAdd1 = a + 1.0f;
const float32_t aSub1 = a - 1.0f;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float32_t aSub1TimesCosOmega = aSub1 * cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) - (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = 2*A*( (A-1) - (A+1)*cos(w0) )
b2 = A*( (A+1) - (A-1)*cos(w0) - 2*sqrt(A)*alpha )
a0 = (A+1) + (A-1)*cos(w0) + 2*sqrt(A)*alpha
a1 = -2*( (A-1) + (A+1)*cos(w0) )
a2 = (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float32_t b0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta) * a;
const float32_t b1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + 0.0f) * a;
const float32_t b2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta) * a;
const float32_t a0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta);
const float32_t a1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + 0.0f);
const float32_t a2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta);
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a hi-shelf filter using a biquad
//
class AudioFilterHSF : public AudioFilter< AudioFilterHSF >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t a = _gain;
const float32_t aAdd1 = a + 1.0f;
const float32_t aSub1 = a - 1.0f;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float32_t aSub1TimesCosOmega = aSub1 * cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) + (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = -2*A*( (A-1) + (A+1)*cos(w0) )
b2 = A*( (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha )
a0 = (A+1) - (A-1)*cos(w0) + 2*sqrt(A)*alpha
a1 = 2*( (A-1) - (A+1)*cos(w0) )
a2 = (A+1) - (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float32_t b0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta) * a;
const float32_t b1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + 0.0f) * a;
const float32_t b2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta) * a;
const float32_t a0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta);
const float32_t a1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + 0.0f);
const float32_t a2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta);
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a all-pass filter using a biquad
//
class AudioFilterALL : public AudioFilter< AudioFilterALL >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t cosOmega = cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
/*
b0 = 1 - alpha
b1 = -2*cos(w0)
b2 = 1 + alpha
a0 = 1 + alpha
a1 = -2*cos(w0)
a2 = 1 - alpha
*/
const float32_t b0 = +1.0f - alpha;
const float32_t b1 = -2.0f * cosOmega;
const float32_t b2 = +1.0f + alpha;
const float32_t a0 = +1.0f + alpha;
const float32_t a1 = -2.0f * cosOmega;
const float32_t a2 = +1.0f - alpha;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a single-band parametric EQ using a biquad "peaking EQ" configuration
//
class AudioFilterPEQ : public AudioFilter< AudioFilterPEQ >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t a = _gain;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t cosOmega = cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t alphaMulA = alpha * a;
const float32_t alphaDivA = alpha / a;
/*
b0 = 1 + alpha*A
b1 = -2*cos(w0)
b2 = 1 - alpha*A
a0 = 1 + alpha/A
a1 = -2*cos(w0)
a2 = 1 - alpha/A
*/
const float32_t b0 = +1.0f + alphaMulA;
const float32_t b1 = -2.0f * cosOmega;
const float32_t b2 = +1.0f - alphaMulA;
const float32_t a0 = +1.0f + alphaDivA;
const float32_t a1 = -2.0f * cosOmega;
const float32_t a2 = +1.0f - alphaDivA;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
#endif // hifi_AudioFilter_h

View file

@ -1,44 +0,0 @@
//
// AudioFilterBank.cpp
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "AudioFilterBank.h"
template<>
AudioFilterLSF1s::FilterParameter
AudioFilterLSF1s::_profiles[ AudioFilterLSF1s::_profileCount ][ AudioFilterLSF1s::_filterCount ] = {
// Freq Gain Slope
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterHSF1s::FilterParameter
AudioFilterHSF1s::_profiles[ AudioFilterHSF1s::_profileCount ][ AudioFilterHSF1s::_filterCount ] = {
// Freq Gain Slope
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterPEQ1s::FilterParameter
AudioFilterPEQ1s::_profiles[ AudioFilterPEQ1s::_profileCount ][ AudioFilterPEQ1s::_filterCount ] = {
// Freq Gain Q
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterPEQ3m::FilterParameter
AudioFilterPEQ3m::_profiles[ AudioFilterPEQ3m::_profileCount ][ AudioFilterPEQ3m::_filterCount ] = {
// Freq Gain Q Freq Gain Q Freq Gain Q
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // flat response (default)
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 0.1f, 1.0f } }, // treble cut
{ { 300.0f, 0.1f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // bass cut
{ { 300.0f, 1.5f, 0.71f }, { 1000.0f, 0.5f, 1.0f }, { 4000.0f, 1.50f, 0.71f } } // smiley curve
};

View file

@ -1,188 +0,0 @@
//
// AudioFilterBank.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/23/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFilterBank_h
#define hifi_AudioFilterBank_h
#include <stdint.h>
#include "AudioBuffer.h"
#include "AudioFilter.h"
#include "AudioFormat.h"
//
// Helper/convenience class that implements a bank of Filter objects
//
template< typename T, const uint32_t N, const uint32_t C >
class AudioFilterBank {
//
// types
//
struct FilterParameter {
float32_t _p1;
float32_t _p2;
float32_t _p3;
};
//
// private static data
//
static const uint32_t _filterCount = N;
static const uint32_t _channelCount = C;
static const uint32_t _profileCount = 4;
static FilterParameter _profiles[ _profileCount ][ _filterCount ];
//
// private data
//
T _filters[ _filterCount ][ _channelCount ];
float32_t* _buffer[ _channelCount ];
float32_t _sampleRate;
uint32_t _frameCount;
public:
//
// ctor/dtor
//
AudioFilterBank() :
_sampleRate(0.0f),
_frameCount(0) {
for (uint32_t i = 0; i < _channelCount; ++i) {
_buffer[ i ] = NULL;
}
}
~AudioFilterBank() {
finalize();
}
//
// public interface
//
void initialize(const float32_t sampleRate, const uint32_t frameCount = 0) {
finalize();
for (uint32_t i = 0; i < _channelCount; ++i) {
_buffer[i] = (float32_t*)malloc(frameCount * sizeof(float32_t));
}
_sampleRate = sampleRate;
_frameCount = frameCount;
reset();
loadProfile(0); // load default profile "flat response" into the bank (see AudioFilterBank.cpp)
}
void finalize() {
for (uint32_t i = 0; i < _channelCount; ++i) {
if (_buffer[i]) {
free (_buffer[i]);
_buffer[i] = NULL;
}
}
}
void loadProfile(int profileIndex) {
if (profileIndex >= 0 && profileIndex < (int)_profileCount) {
for (uint32_t i = 0; i < _filterCount; ++i) {
FilterParameter p = _profiles[profileIndex][i];
for (uint32_t j = 0; j < _channelCount; ++j) {
_filters[i][j].setParameters(_sampleRate,p._p1,p._p2,p._p3);
}
}
}
}
void setParameters(uint32_t filterStage, uint32_t filterChannel, const float32_t sampleRate, const float32_t frequency,
const float32_t gain, const float32_t slope) {
if (filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].setParameters(sampleRate,frequency,gain,slope);
}
}
void getParameters(uint32_t filterStage, uint32_t filterChannel, float32_t& sampleRate, float32_t& frequency,
float32_t& gain, float32_t& slope) {
if (filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].getParameters(sampleRate,frequency,gain,slope);
}
}
void render(const int16_t* in, int16_t* out, const uint32_t frameCount) {
if (frameCount > _frameCount) {
return;
}
const int scale = (1 << ((8 * sizeof(int16_t)) - 1));
// de-interleave and convert int16_t to float32 (normalized to -1. ... 1.)
for (uint32_t i = 0; i < frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_buffer[j][i] = ((float)(*in++)) * (1.0f / scale);
}
}
// now step through each filter
for (uint32_t i = 0; i < _channelCount; ++i) {
for (uint32_t j = 0; j < _filterCount; ++j) {
_filters[j][i].render( &_buffer[i][0], &_buffer[i][0], frameCount );
}
}
// convert float32 to int16_t and interleave
for (uint32_t i = 0; i < frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*out++ = (int16_t)(_buffer[j][i] * scale);
}
}
}
void render(AudioBufferFloat32& frameBuffer) {
float32_t** samples = frameBuffer.getFrameData();
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < _filterCount; ++i) {
_filters[i][j].render( samples[j], samples[j], frameBuffer.getFrameCount() );
}
}
}
void reset() {
for (uint32_t i = 0; i < _filterCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_filters[i][j].reset();
}
}
}
};
//
// Specializations of AudioFilterBank
//
typedef AudioFilterBank< AudioFilterLSF, 1, 1> AudioFilterLSF1m; // mono bank with one band of LSF
typedef AudioFilterBank< AudioFilterLSF, 1, 2> AudioFilterLSF1s; // stereo bank with one band of LSF
typedef AudioFilterBank< AudioFilterHSF, 1, 1> AudioFilterHSF1m; // mono bank with one band of HSF
typedef AudioFilterBank< AudioFilterHSF, 1, 2> AudioFilterHSF1s; // stereo bank with one band of HSF
typedef AudioFilterBank< AudioFilterPEQ, 1, 1> AudioFilterPEQ1m; // mono bank with one band of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 2, 1> AudioFilterPEQ2m; // mono bank with two bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 3, 1> AudioFilterPEQ3m; // mono bank with three bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 1, 2> AudioFilterPEQ1s; // stereo bank with one band of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 2, 2> AudioFilterPEQ2s; // stereo bank with two bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 3, 2> AudioFilterPEQ3s; // stereo bank with three bands of PEQ
// etc....
#endif // hifi_AudioFilter_h

View file

@ -1,90 +0,0 @@
//
// AudioFormat.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/28/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFormat_h
#define hifi_AudioFormat_h
#ifndef _FLOAT32_T
#define _FLOAT32_T
typedef float float32_t;
#endif
#ifndef _FLOAT64_T
#define _FLOAT64_T
typedef double float64_t;
#endif
#include <assert.h>
#include <cstring>
#include "AudioConstants.h"
//
// Audio format structure (currently for uncompressed streams only)
//
struct AudioFormat {
struct Flags {
uint32_t _isFloat : 1;
uint32_t _isSigned : 1;
uint32_t _isInterleaved : 1;
uint32_t _isBigEndian : 1;
uint32_t _isPacked : 1;
uint32_t _reserved : 27;
} _flags;
uint32_t _bytesPerFrame;
uint32_t _channelsPerFrame;
uint32_t _bitsPerChannel;
float64_t _sampleRate;
AudioFormat() {
memset(this, 0, sizeof(*this));
}
~AudioFormat() { }
AudioFormat& operator=(const AudioFormat& fmt) {
memcpy(this, &fmt, sizeof(*this));
return *this;
}
bool operator==(const AudioFormat& fmt) {
return memcmp(this, &fmt, sizeof(*this)) == 0;
}
bool operator!=(const AudioFormat& fmt) {
return memcmp(this, &fmt, sizeof(*this)) != 0;
}
void setCanonicalFloat32(uint32_t channels) {
assert(channels > 0 && channels <= 2);
_sampleRate = AudioConstants::SAMPLE_RATE;
_bitsPerChannel = sizeof(float32_t) * 8;
_channelsPerFrame = channels;
_bytesPerFrame = _channelsPerFrame * _bitsPerChannel / 8;
_flags._isFloat = true;
_flags._isInterleaved = _channelsPerFrame > 1;
}
void setCanonicalInt16(uint32_t channels) {
assert(channels > 0 && channels <= 2);
_sampleRate = AudioConstants::SAMPLE_RATE;
_bitsPerChannel = sizeof(int16_t) * 8;
_channelsPerFrame = channels;
_bytesPerFrame = _channelsPerFrame * _bitsPerChannel / 8;
_flags._isSigned = true;
_flags._isInterleaved = _channelsPerFrame > 1;
}
};
#endif // hifi_AudioFormat_h

View file

@ -1,48 +0,0 @@
//
// AudioGain.cpp
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <algorithm>
#include <math.h>
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioGain.h"
AudioGain::AudioGain() {
initialize();
}
AudioGain::~AudioGain() {
finalize();
}
void AudioGain::initialize() {
setParameters(1.0f,0.0f);
}
void AudioGain::finalize() {
}
void AudioGain::reset() {
initialize();
}
void AudioGain::setParameters(const float gain, const float mute) {
_gain = std::min(std::max(gain, 0.0f), 1.0f);
_mute = mute != 0.0f;
}
void AudioGain::getParameters(float& gain, float& mute) {
gain = _gain;
mute = _mute ? 1.0f : 0.0f;
}

View file

@ -1,117 +0,0 @@
//
// AudioGain.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioGain_h
#define hifi_AudioGain_h
class AudioGain
{
float32_t _gain;
bool _mute;
public:
AudioGain();
~AudioGain();
void initialize();
void finalize();
void reset();
void setParameters(const float gain, const float mute);
void getParameters(float& gain, float& mute);
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioGain::render(AudioBufferFloat32& frameBuffer) {
if (_mute) {
frameBuffer.zeroFrames();
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 1) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
}
} else if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
samples[1][i + 0] *= _gain;
samples[1][i + 1] *= _gain;
samples[1][i + 2] *= _gain;
samples[1][i + 3] *= _gain;
samples[1][i + 4] *= _gain;
samples[1][i + 5] *= _gain;
samples[1][i + 6] *= _gain;
samples[1][i + 7] *= _gain;
samples[1][i + 8] *= _gain;
samples[1][i + 9] *= _gain;
samples[1][i + 10] *= _gain;
samples[1][i + 11] *= _gain;
samples[1][i + 12] *= _gain;
samples[1][i + 13] *= _gain;
samples[1][i + 14] *= _gain;
samples[1][i + 15] *= _gain;
}
} else {
assert("unsupported channel format");
}
} else {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[j][i] *= _gain;
}
}
}
}
#endif // AudioGain_h

View file

@ -1,52 +0,0 @@
//
// AudioPan.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <math.h>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioPan.h"
float32_t AudioPan::ONE_MINUS_EPSILON = 1.0f - EPSILON;
float32_t AudioPan::ZERO_PLUS_EPSILON = 0.0f + EPSILON;
float32_t AudioPan::ONE_HALF_MINUS_EPSILON = 0.5f - EPSILON;
float32_t AudioPan::ONE_HALF_PLUS_EPSILON = 0.5f + EPSILON;
AudioPan::AudioPan() {
initialize();
}
AudioPan::~AudioPan() {
finalize();
}
void AudioPan::initialize() {
setParameters(0.5f);
}
void AudioPan::finalize() {
}
void AudioPan::reset() {
initialize();
}
void AudioPan::setParameters(const float32_t pan) {
// pan ranges between 0.0 and 1.0f inclusive. 0.5f is midpoint between full left and full right
_pan = std::min(std::max(pan, 0.0f), 1.0f);
updateCoefficients();
}
void AudioPan::getParameters(float32_t& pan) {
pan = _pan;
}

View file

@ -1,126 +0,0 @@
//
// AudioPan.h
// hifi
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioPan_h
#define hifi_AudioPan_h
#include <NumericalConstants.h>
#include "AudioFormat.h"
class AudioPan
{
float32_t _pan;
float32_t _gainLeft;
float32_t _gainRight;
static float32_t ONE_MINUS_EPSILON;
static float32_t ZERO_PLUS_EPSILON;
static float32_t ONE_HALF_MINUS_EPSILON;
static float32_t ONE_HALF_PLUS_EPSILON;
void updateCoefficients();
public:
AudioPan();
~AudioPan();
void initialize();
void finalize();
void reset();
void setParameters(const float32_t pan);
void getParameters(float32_t& pan);
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioPan::render(AudioBufferFloat32& frameBuffer) {
if (frameBuffer.getChannelCount() != 2) {
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gainLeft;
samples[0][i + 1] *= _gainLeft;
samples[0][i + 2] *= _gainLeft;
samples[0][i + 3] *= _gainLeft;
samples[0][i + 4] *= _gainLeft;
samples[0][i + 5] *= _gainLeft;
samples[0][i + 6] *= _gainLeft;
samples[0][i + 7] *= _gainLeft;
samples[0][i + 8] *= _gainLeft;
samples[0][i + 9] *= _gainLeft;
samples[0][i + 10] *= _gainLeft;
samples[0][i + 11] *= _gainLeft;
samples[0][i + 12] *= _gainLeft;
samples[0][i + 13] *= _gainLeft;
samples[0][i + 14] *= _gainLeft;
samples[0][i + 15] *= _gainLeft;
samples[1][i + 0] *= _gainRight;
samples[1][i + 1] *= _gainRight;
samples[1][i + 2] *= _gainRight;
samples[1][i + 3] *= _gainRight;
samples[1][i + 4] *= _gainRight;
samples[1][i + 5] *= _gainRight;
samples[1][i + 6] *= _gainRight;
samples[1][i + 7] *= _gainRight;
samples[1][i + 8] *= _gainRight;
samples[1][i + 9] *= _gainRight;
samples[1][i + 10] *= _gainRight;
samples[1][i + 11] *= _gainRight;
samples[1][i + 12] *= _gainRight;
samples[1][i + 13] *= _gainRight;
samples[1][i + 14] *= _gainRight;
samples[1][i + 15] *= _gainRight;
}
} else {
assert("unsupported channel format");
}
} else {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[0][i] *= _gainLeft;
samples[1][i] *= _gainRight;
}
}
}
inline void AudioPan::updateCoefficients() {
// implement constant power sin^2 + cos^2 = 1 panning law
if (_pan >= ONE_MINUS_EPSILON) { // full right
_gainLeft = 0.0f;
_gainRight = 1.0f;
} else if (_pan <= ZERO_PLUS_EPSILON) { // full left
_gainLeft = 1.0f;
_gainRight = 0.0f;
} else if ((_pan >= ONE_HALF_MINUS_EPSILON) && (_pan <= ONE_HALF_PLUS_EPSILON)) { // center
_gainLeft = 1.0f / SQUARE_ROOT_OF_2;
_gainRight = 1.0f / SQUARE_ROOT_OF_2;
} else { // intermediate cases
_gainLeft = cosf( TWO_PI * _pan );
_gainRight = sinf( TWO_PI * _pan );
}
}
#endif // AudioPan_h

View file

@ -1,21 +0,0 @@
//
// AudioSourceNoise.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <math.h>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioSourceNoise.h"
template<>
uint32_t AudioSourcePinkNoise::_randomSeed = 1974; // a truly random number

View file

@ -1,103 +0,0 @@
//
// AudioSourceNoise.h
// hifi
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// Adapted from code by Phil Burk http://www.firstpr.com.au/dsp/pink-noise/
//
#ifndef hifi_AudioSourceNoise_h
#define hifi_AudioSourceNoise_h
template< const uint16_t N = 30>
class AudioSourceNoise
{
static const uint16_t _randomRows = N;
static const uint16_t _randomBits = 24;
static const uint16_t _randomShift = (sizeof(int32_t) * 8) - _randomBits;
static uint32_t _randomSeed;
int32_t _rows[_randomRows];
int32_t _runningSum; // used to optimize summing of generators.
uint16_t _index; // incremented each sample.
uint16_t _indexMask; // index wrapped by ANDing with this mask.
float32_t _scale; // used to scale within range of -1.0 to +1.0
static uint32_t generateRandomNumber() {
_randomSeed = (_randomSeed * 196314165) + 907633515;
return _randomSeed >> _randomShift;
}
public:
AudioSourceNoise() {
initialize();
}
~AudioSourceNoise() {
finalize();
}
void initialize() {
memset(_rows, 0, _randomRows * sizeof(int32_t));
_runningSum = 0;
_index = 0;
_indexMask = (uint16_t)((1 << _randomRows) - 1);
_scale = 1.0f / ((_randomRows + 1) * (1 << (_randomBits - 1)));
}
void finalize() {
}
void reset() {
initialize();
}
void setParameters(void) {
}
void getParameters(void) {
}
void render(AudioBufferFloat32& frameBuffer) {
uint32_t randomNumber;
float32_t** samples = frameBuffer.getFrameData();
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
_index = (_index + 1) & _indexMask; // increment and mask index.
if (_index != 0) { // if index is zero, don't update any random values.
uint32_t numZeros = 0; // determine how many trailing zeros in _index
uint32_t tmp = _index;
while ((tmp & 1) == 0) {
tmp >>= 1;
numZeros++;
}
// replace the indexed _rows random value. subtract and add back to _runningSum instead
// of adding all the random values together. only one value changes each time.
_runningSum -= _rows[numZeros];
randomNumber = generateRandomNumber();
_runningSum += randomNumber;
_rows[numZeros] = randomNumber;
}
// add extra white noise value and scale between -1.0 and +1.0
samples[j][i] = (_runningSum + generateRandomNumber()) * _scale;
}
}
}
};
typedef AudioSourceNoise<> AudioSourcePinkNoise;
#endif // AudioSourceNoise_h

View file

@ -1,55 +0,0 @@
//
// AudioSourceTone.cpp
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <NumericalConstants.h>
#include "AudioSourceTone.h"
AudioSourceTone::AudioSourceTone() {
initialize();
}
AudioSourceTone::~AudioSourceTone() {
finalize();
}
void AudioSourceTone::finalize() {
}
void AudioSourceTone::reset() {
}
void AudioSourceTone::updateCoefficients() {
_omega = _frequency / _sampleRate * TWO_PI;
_epsilon = 2.0f * sinf(_omega / 2.0f);
_yq1 = cosf(-1.0f * _omega);
_y1 = sinf(+1.0f * _omega);
}
void AudioSourceTone::initialize() {
const float32_t FREQUENCY_220_HZ = 220.0f;
const float32_t GAIN_MINUS_6DB = 0.501f;
setParameters(AudioConstants::SAMPLE_RATE, FREQUENCY_220_HZ, GAIN_MINUS_6DB);
}
void AudioSourceTone::setParameters(const float32_t sampleRate, const float32_t frequency, const float32_t amplitude) {
_sampleRate = std::max(sampleRate, 1.0f);
_frequency = std::max(frequency, 1.0f);
_amplitude = std::max(amplitude, 1.0f);
updateCoefficients();
}
void AudioSourceTone::getParameters(float32_t& sampleRate, float32_t& frequency, float32_t& amplitude) {
sampleRate = _sampleRate;
frequency = _frequency;
amplitude = _amplitude;
}

View file

@ -1,65 +0,0 @@
//
// AudioSourceTone.h
// hifi
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioSourceTone_h
#define hifi_AudioSourceTone_h
#include "AudioBuffer.h"
#include "AudioFormat.h"
// Implements a two-pole Gordon-Smith oscillator
class AudioSourceTone {
float32_t _frequency;
float32_t _amplitude;
float32_t _sampleRate;
float32_t _omega;
float32_t _epsilon;
float32_t _yq1;
float32_t _y1;
void updateCoefficients();
public:
AudioSourceTone();
~AudioSourceTone();
void initialize();
void finalize();
void reset();
void setParameters(const float32_t sampleRate, const float32_t frequency, const float32_t amplitude);
void getParameters(float32_t& sampleRate, float32_t& frequency, float32_t& amplitude);
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioSourceTone::render(AudioBufferFloat32& frameBuffer) {
float32_t** samples = frameBuffer.getFrameData();
float32_t yq;
float32_t y;
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
yq = _yq1 - (_epsilon * _y1);
y = _y1 + (_epsilon * yq);
// update delays
_yq1 = yq;
_y1 = y;
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
samples[j][i] = _amplitude * y;
}
}
}
#endif

View file

@ -40,7 +40,6 @@ public:
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
bool ignorePenumbraFilter() { return _ignorePenumbra; }
PositionalAudioStream::Type getType() const { return _type; }
const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; }

View file

@ -24,8 +24,6 @@
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioLogging.h"
#include "Sound.h"

View file

@ -54,24 +54,6 @@ ScriptAudioInjector* AudioScriptingInterface::playSound(Sound* sound, const Audi
}
}
void AudioScriptingInterface::injectGeneratedNoise(bool inject) {
if (_localAudioInterface) {
_localAudioInterface->enableAudioSourceInject(inject);
}
}
void AudioScriptingInterface::selectPinkNoise() {
if (_localAudioInterface) {
_localAudioInterface->selectAudioSourcePinkNoise();
}
}
void AudioScriptingInterface::selectSine440() {
if (_localAudioInterface) {
_localAudioInterface->selectAudioSourceSine440();
}
}
void AudioScriptingInterface::setStereoInput(bool stereo) {
if (_localAudioInterface) {
_localAudioInterface->setIsStereoInput(stereo);

View file

@ -29,10 +29,6 @@ protected:
// this method is protected to stop C++ callers from calling, but invokable from script
Q_INVOKABLE ScriptAudioInjector* playSound(Sound* sound, const AudioInjectorOptions& injectorOptions = AudioInjectorOptions());
Q_INVOKABLE void injectGeneratedNoise(bool inject);
Q_INVOKABLE void selectPinkNoise();
Q_INVOKABLE void selectSine440();
Q_INVOKABLE void setStereoInput(bool stereo);
signals: