Merge pull request #3465 from chansensturm/audio-noise

New Audio Edit Buffer Object / Click Removal /  ....
This commit is contained in:
AndrewMeadows 2014-09-22 11:38:42 -07:00
commit a0b458fd1d
13 changed files with 898 additions and 475 deletions

View file

@ -509,7 +509,7 @@ void Audio::handleAudioInput() {
if (!_muted && (_audioSourceInjectEnabled || _peqEnabled)) {
int16_t* inputFrameData = (int16_t*)inputByteArray.data();
const int inputFrameCount = inputByteArray.size() / sizeof(int16_t);
const uint32_t inputFrameCount = inputByteArray.size() / sizeof(int16_t);
_inputFrameBuffer.copyFrames(1, inputFrameCount, inputFrameData, false /*copy in*/);

View file

@ -17,193 +17,441 @@
template< typename T >
class AudioFrameBuffer {
uint16_t _channelCount;
uint16_t _channelCountMax;
uint16_t _frameCount;
uint16_t _frameCountMax;
protected:
uint32_t _channelCount;
uint32_t _channelCountMax;
uint32_t _frameCount;
uint32_t _frameCountMax;
T** _frameBuffer;
void allocateFrames() {
_frameBuffer = new T*[_channelCountMax];
if (_frameBuffer) {
for (uint16_t i = 0; i < _channelCountMax; ++i) {
_frameBuffer[i] = new T[_frameCountMax];
}
}
}
void deallocateFrames() {
if (_frameBuffer) {
for (uint16_t i = 0; i < _channelCountMax; ++i) {
delete _frameBuffer[i];
}
delete _frameBuffer;
}
_frameBuffer = NULL;
}
void allocateFrames();
void deallocateFrames();
public:
AudioFrameBuffer() :
_channelCount(0),
_frameCount(0),
_frameCountMax(0),
_frameBuffer(NULL) {
}
AudioFrameBuffer();
AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount);
virtual ~AudioFrameBuffer();
AudioFrameBuffer(const uint16_t channelCount, const uint16_t frameCount) :
_channelCount(channelCount),
_channelCountMax(channelCount),
_frameCount(frameCount),
_frameCountMax(frameCount),
_frameBuffer(NULL) {
allocateFrames();
void initialize(const uint32_t channelCount, const uint32_t frameCount);
void finalize();
T**& getFrameData();
uint32_t getChannelCount();
uint32_t getFrameCount();
template< typename S >
void copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut = false);
void zeroFrames();
};
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer() :
_channelCount(0),
_frameCount(0),
_frameCountMax(0),
_frameBuffer(NULL) {
}
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount) :
_channelCount(channelCount),
_channelCountMax(channelCount),
_frameCount(frameCount),
_frameCountMax(frameCount),
_frameBuffer(NULL) {
allocateFrames();
}
template< typename T >
AudioFrameBuffer< T >::~AudioFrameBuffer() {
finalize();
}
template< typename T >
void AudioFrameBuffer< T >::allocateFrames() {
_frameBuffer = new T*[_channelCountMax];
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
_frameBuffer[i] = new T[_frameCountMax];
}
}
~AudioFrameBuffer() {
}
template< typename T >
void AudioFrameBuffer< T >::deallocateFrames() {
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
delete _frameBuffer[i];
}
delete _frameBuffer;
}
_frameBuffer = NULL;
}
template< typename T >
void AudioFrameBuffer< T >::initialize(const uint32_t channelCount, const uint32_t frameCount) {
if (_frameBuffer) {
finalize();
}
_channelCount = channelCount;
_channelCountMax = channelCount;
_frameCount = frameCount;
_frameCountMax = frameCount;
allocateFrames();
}
template< typename T >
void AudioFrameBuffer< T >::finalize() {
deallocateFrames();
_channelCount = 0;
_channelCountMax = 0;
_frameCount = 0;
_frameCountMax = 0;
}
template< typename T >
inline T**& AudioFrameBuffer< T >::getFrameData() {
return _frameBuffer;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getChannelCount() {
return _channelCount;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getFrameCount() {
return _frameCount;
}
template< typename T >
inline void AudioFrameBuffer< T >::zeroFrames() {
if (!_frameBuffer) {
return;
}
for (uint32_t i = 0; i < _channelCountMax; ++i) {
memset(_frameBuffer[i], 0, sizeof(T)*_frameCountMax);
}
}
template< typename T >
template< typename S >
inline void AudioFrameBuffer< T >::copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut) {
if ( !_frameBuffer || !frames) {
return;
}
void initialize(const uint16_t channelCount, const uint16_t frameCount) {
if (_frameBuffer) {
finalize();
}
_channelCount = channelCount;
_channelCountMax = channelCount;
if (channelCount <=_channelCountMax && frameCount <=_frameCountMax) {
// We always allow copying fewer frames than we have allocated
_frameCount = frameCount;
_frameCountMax = frameCount;
allocateFrames();
_channelCount = channelCount;
} else {
qDebug() << "Audio framing error: _channelCount="
<< _channelCount
<< "channelCountMax="
<< _channelCountMax
<< "_frameCount="
<< _frameCount
<< "frameCountMax="
<< _frameCountMax;
_channelCount = std::min(_channelCount,_channelCountMax);
_frameCount = std::min(_frameCount,_frameCountMax);
}
void finalize() {
deallocateFrames();
_channelCount = 0;
_channelCountMax = 0;
_frameCount = 0;
_frameCountMax = 0;
}
T**& getFrameData() {
return _frameBuffer;
}
bool frameAlignment16 = (_frameCount & 0x0F) == 0;
uint16_t getChannelCount() {
return _channelCount;
}
uint16_t getFrameCount() {
return _frameCount;
}
void zeroFrames() {
if (!_frameBuffer) {
return;
}
for (uint16_t i = 0; i < _channelCountMax; ++i) {
memset(_frameBuffer[i], 0, sizeof(T)*_frameCountMax);
}
}
template< typename S >
void copyFrames(uint16_t channelCount, const uint16_t frameCount, S* frames, const bool copyOut = false) {
if ( !_frameBuffer || !frames) {
return;
}
if (channelCount <=_channelCountMax && frameCount <=_frameCountMax) {
// We always allow copying fewer frames than we have allocated
_frameCount = frameCount;
_channelCount = channelCount;
}
else {
//
// However we do not attempt to copy more frames than we've allocated ;-) This is a framing error caused by either
// a/ the platform audio driver not correctly queuing and regularly smoothing device IO capture frames -or-
// b/ our IO processing thread (currently running on a Qt GUI thread) has been delayed/scheduled too late.
//
// The fix is not to make the problem worse by allocating additional frames on this thread, rather, it is to handle
// dynamic re-sizing off the IO processing thread. While a/ is not in our control, we will address the off thread
// re-sizing,, as well as b/, in later releases.
//
// For now, we log this condition, and do our best to recover by copying as many frames as we have allocated.
// Unfortunately, this will result (temporarily), in an audible discontinuity.
//
// If you repeatedly receive this error, contact craig@highfidelity.io and send me what audio device you are using,
// what audio-stack you are using (pulse/alsa, core audio, ...), what OS, and what the reported frame/channel
// counts are. In addition, any information about what you were doing at the time of the discontinuity, would be
// useful (e.g., accessing any client features/menus)
//
qDebug() << "Audio framing error: _channelCount="
<< _channelCount
<< "channelCountMax="
<< _channelCountMax
<< "_frameCount="
<< _frameCount
<< "frameCountMax="
<< _frameCountMax;
_channelCount = std::min(_channelCount,_channelCountMax);
_frameCount = std::min(_frameCount,_frameCountMax);
}
if (copyOut) {
S* dst = frames;
if (copyOut) {
S* dst = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same, just copy out
if(typeid(T) == typeid(S)) { // source and destination types are the same
for (int i = 0; i < _frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = _frameBuffer[0][i + 0];
*dst++ = _frameBuffer[0][i + 1];
*dst++ = _frameBuffer[0][i + 2];
*dst++ = _frameBuffer[0][i + 3];
*dst++ = _frameBuffer[0][i + 4];
*dst++ = _frameBuffer[0][i + 5];
*dst++ = _frameBuffer[0][i + 6];
*dst++ = _frameBuffer[0][i + 7];
*dst++ = _frameBuffer[0][i + 8];
*dst++ = _frameBuffer[0][i + 9];
*dst++ = _frameBuffer[0][i + 10];
*dst++ = _frameBuffer[0][i + 11];
*dst++ = _frameBuffer[0][i + 12];
*dst++ = _frameBuffer[0][i + 13];
*dst++ = _frameBuffer[0][i + 14];
*dst++ = _frameBuffer[0][i + 15];
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = _frameBuffer[0][i + 0];
*dst++ = _frameBuffer[1][i + 0];
*dst++ = _frameBuffer[0][i + 1];
*dst++ = _frameBuffer[1][i + 1];
*dst++ = _frameBuffer[0][i + 2];
*dst++ = _frameBuffer[1][i + 2];
*dst++ = _frameBuffer[0][i + 3];
*dst++ = _frameBuffer[1][i + 3];
*dst++ = _frameBuffer[0][i + 4];
*dst++ = _frameBuffer[1][i + 4];
*dst++ = _frameBuffer[0][i + 5];
*dst++ = _frameBuffer[1][i + 5];
*dst++ = _frameBuffer[0][i + 6];
*dst++ = _frameBuffer[1][i + 6];
*dst++ = _frameBuffer[0][i + 7];
*dst++ = _frameBuffer[1][i + 7];
*dst++ = _frameBuffer[0][i + 8];
*dst++ = _frameBuffer[1][i + 8];
*dst++ = _frameBuffer[0][i + 9];
*dst++ = _frameBuffer[1][i + 9];
*dst++ = _frameBuffer[0][i + 10];
*dst++ = _frameBuffer[1][i + 10];
*dst++ = _frameBuffer[0][i + 11];
*dst++ = _frameBuffer[1][i + 11];
*dst++ = _frameBuffer[0][i + 12];
*dst++ = _frameBuffer[1][i + 12];
*dst++ = _frameBuffer[0][i + 13];
*dst++ = _frameBuffer[1][i + 13];
*dst++ = _frameBuffer[0][i + 14];
*dst++ = _frameBuffer[1][i + 14];
*dst++ = _frameBuffer[0][i + 15];
*dst++ = _frameBuffer[1][i + 15];
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = _frameBuffer[j][i];
}
}
}
else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) {
} else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) { // source and destination aare not the same, convert from float32_t to int16_t and copy out
const int scale = (2 << ((8 * sizeof(S)) - 1));
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
const int scale = (2 << ((8 * sizeof(S)) - 1));
for (int i = 0; i < _frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = (S)(_frameBuffer[0][i + 0] * scale);
*dst++ = (S)(_frameBuffer[0][i + 1] * scale);
*dst++ = (S)(_frameBuffer[0][i + 2] * scale);
*dst++ = (S)(_frameBuffer[0][i + 3] * scale);
*dst++ = (S)(_frameBuffer[0][i + 4] * scale);
*dst++ = (S)(_frameBuffer[0][i + 5] * scale);
*dst++ = (S)(_frameBuffer[0][i + 6] * scale);
*dst++ = (S)(_frameBuffer[0][i + 7] * scale);
*dst++ = (S)(_frameBuffer[0][i + 8] * scale);
*dst++ = (S)(_frameBuffer[0][i + 9] * scale);
*dst++ = (S)(_frameBuffer[0][i + 10] * scale);
*dst++ = (S)(_frameBuffer[0][i + 11] * scale);
*dst++ = (S)(_frameBuffer[0][i + 12] * scale);
*dst++ = (S)(_frameBuffer[0][i + 13] * scale);
*dst++ = (S)(_frameBuffer[0][i + 14] * scale);
*dst++ = (S)(_frameBuffer[0][i + 15] * scale);
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = (S)(_frameBuffer[0][i + 0] * scale);
*dst++ = (S)(_frameBuffer[1][i + 0] * scale);
*dst++ = (S)(_frameBuffer[0][i + 1] * scale);
*dst++ = (S)(_frameBuffer[1][i + 1] * scale);
*dst++ = (S)(_frameBuffer[0][i + 2] * scale);
*dst++ = (S)(_frameBuffer[1][i + 2] * scale);
*dst++ = (S)(_frameBuffer[0][i + 3] * scale);
*dst++ = (S)(_frameBuffer[1][i + 3] * scale);
*dst++ = (S)(_frameBuffer[0][i + 4] * scale);
*dst++ = (S)(_frameBuffer[1][i + 4] * scale);
*dst++ = (S)(_frameBuffer[0][i + 5] * scale);
*dst++ = (S)(_frameBuffer[1][i + 5] * scale);
*dst++ = (S)(_frameBuffer[0][i + 6] * scale);
*dst++ = (S)(_frameBuffer[1][i + 6] * scale);
*dst++ = (S)(_frameBuffer[0][i + 7] * scale);
*dst++ = (S)(_frameBuffer[1][i + 7] * scale);
*dst++ = (S)(_frameBuffer[0][i + 8] * scale);
*dst++ = (S)(_frameBuffer[1][i + 8] * scale);
*dst++ = (S)(_frameBuffer[0][i + 9] * scale);
*dst++ = (S)(_frameBuffer[1][i + 9] * scale);
*dst++ = (S)(_frameBuffer[0][i + 10] * scale);
*dst++ = (S)(_frameBuffer[1][i + 10] * scale);
*dst++ = (S)(_frameBuffer[0][i + 11] * scale);
*dst++ = (S)(_frameBuffer[1][i + 11] * scale);
*dst++ = (S)(_frameBuffer[0][i + 12] * scale);
*dst++ = (S)(_frameBuffer[1][i + 12] * scale);
*dst++ = (S)(_frameBuffer[0][i + 13] * scale);
*dst++ = (S)(_frameBuffer[1][i + 13] * scale);
*dst++ = (S)(_frameBuffer[0][i + 14] * scale);
*dst++ = (S)(_frameBuffer[1][i + 14] * scale);
*dst++ = (S)(_frameBuffer[0][i + 15] * scale);
*dst++ = (S)(_frameBuffer[1][i + 15] * scale);
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = (S)(_frameBuffer[j][i] * scale);
}
}
}
else {
assert(0); // currently unsupported conversion
}
} else {
assert(0); // currently unsupported conversion
}
}
else { // copyIn
S* src = frames;
} else { // copyIn
S* src = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same, copy in
if(typeid(T) == typeid(S)) { // source and destination types are the same
for (int i = 0; i < _frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = *src++;
_frameBuffer[0][i + 1] = *src++;
_frameBuffer[0][i + 2] = *src++;
_frameBuffer[0][i + 3] = *src++;
_frameBuffer[0][i + 4] = *src++;
_frameBuffer[0][i + 5] = *src++;
_frameBuffer[0][i + 6] = *src++;
_frameBuffer[0][i + 7] = *src++;
_frameBuffer[0][i + 8] = *src++;
_frameBuffer[0][i + 9] = *src++;
_frameBuffer[0][i + 10] = *src++;
_frameBuffer[0][i + 11] = *src++;
_frameBuffer[0][i + 12] = *src++;
_frameBuffer[0][i + 13] = *src++;
_frameBuffer[0][i + 14] = *src++;
_frameBuffer[0][i + 15] = *src++;
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = *src++;
_frameBuffer[1][i + 0] = *src++;
_frameBuffer[0][i + 1] = *src++;
_frameBuffer[1][i + 1] = *src++;
_frameBuffer[0][i + 2] = *src++;
_frameBuffer[1][i + 2] = *src++;
_frameBuffer[0][i + 3] = *src++;
_frameBuffer[1][i + 3] = *src++;
_frameBuffer[0][i + 4] = *src++;
_frameBuffer[1][i + 4] = *src++;
_frameBuffer[0][i + 5] = *src++;
_frameBuffer[1][i + 5] = *src++;
_frameBuffer[0][i + 6] = *src++;
_frameBuffer[1][i + 6] = *src++;
_frameBuffer[0][i + 7] = *src++;
_frameBuffer[1][i + 7] = *src++;
_frameBuffer[0][i + 8] = *src++;
_frameBuffer[1][i + 8] = *src++;
_frameBuffer[0][i + 9] = *src++;
_frameBuffer[1][i + 9] = *src++;
_frameBuffer[0][i + 10] = *src++;
_frameBuffer[1][i + 10] = *src++;
_frameBuffer[0][i + 11] = *src++;
_frameBuffer[1][i + 11] = *src++;
_frameBuffer[0][i + 12] = *src++;
_frameBuffer[1][i + 12] = *src++;
_frameBuffer[0][i + 13] = *src++;
_frameBuffer[1][i + 13] = *src++;
_frameBuffer[0][i + 14] = *src++;
_frameBuffer[1][i + 14] = *src++;
_frameBuffer[0][i + 15] = *src++;
_frameBuffer[1][i + 15] = *src++;
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = *src++;
}
}
}
else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) {
} else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) { // source and destination aare not the same, convert from int16_t to float32_t and copy in
const int scale = (2 << ((8 * sizeof(S)) - 1));
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
const int scale = (2 << ((8 * sizeof(S)) - 1));
for (int i = 0; i < _frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 15] = ((T)(*src++)) / scale;
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 15] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 15] = ((T)(*src++)) / scale;
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = ((T)(*src++)) / scale;
}
}
}
else {
assert(0); // currently unsupported conversion
}
} else {
assert(0); // currently unsupported conversion
}
}
}
};
}
typedef AudioFrameBuffer< float32_t > AudioBufferFloat32;
typedef AudioFrameBuffer< int32_t > AudioBufferSInt32;

View file

@ -0,0 +1,112 @@
//
// AudioEditBuffer.h
// hifi
//
// Created by Craig Hansen-Sturm on 8/29/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioEditBuffer_h
#define hifi_AudioEditBuffer_h
template< typename T >
class AudioEditBuffer : public AudioFrameBuffer<T> {
public:
AudioEditBuffer();
AudioEditBuffer(const uint32_t channelCount, const uint32_t frameCount);
~AudioEditBuffer();
bool getZeroCrossing(uint32_t start, bool direction, float32_t epsilon, uint32_t& zero);
void linearFade(uint32_t start, uint32_t stop, bool slope);
void exponentialFade(uint32_t start, uint32_t stop, bool slope);
};
template< typename T >
AudioEditBuffer<T>::AudioEditBuffer() :
AudioFrameBuffer<T>() {
}
template< typename T >
AudioEditBuffer<T>::AudioEditBuffer(const uint32_t channelCount, const uint32_t frameCount) :
AudioFrameBuffer<T>(channelCount, frameCount) {
}
template< typename T >
AudioEditBuffer<T>::~AudioEditBuffer() {
}
template< typename T >
inline bool AudioEditBuffer<T>::getZeroCrossing(uint32_t start, bool direction, float32_t epsilon, uint32_t& zero) {
zero = this->_frameCount;
if (direction) { // scan from the left
if (start < this->_frameCount) {
for (uint32_t i = start; i < this->_frameCount; ++i) {
for (uint32_t j = 0; j < this->_channelCount; ++j) {
if (this->_frameBuffer[j][i] >= -epsilon && this->_frameBuffer[j][i] <= epsilon) {
zero = i;
return true;
}
}
}
}
} else { // scan from the right
if (start != 0 && start < this->_frameCount) {
for (uint32_t i = start; i != 0; --i) {
for (uint32_t j = 0; j < this->_channelCount; ++j) {
if (this->_frameBuffer[j][i] >= -epsilon && this->_frameBuffer[j][i] <= epsilon) {
zero = i;
return true;
}
}
}
}
}
return false;
}
template< typename T >
inline void AudioEditBuffer<T>::linearFade(uint32_t start, uint32_t stop, bool slope) {
if (start >= stop || start > this->_frameCount || stop > this->_frameCount ) {
return;
}
uint32_t count = stop - start;
float32_t delta;
float32_t gain;
if (slope) { // 0.0 to 1.0f in delta increments
delta = 1.0f / (float32_t)count;
gain = 0.0f;
} else { // 1.0f to 0.0f in delta increments
delta = -1.0f / (float32_t)count;
gain = 1.0f;
}
for (uint32_t i = start; i < stop; ++i) {
for (uint32_t j = 0; j < this->_channelCount; ++j) {
this->_frameBuffer[j][i] *= gain;
gain += delta;
}
}
}
template< typename T >
inline void AudioEditBuffer<T>::exponentialFade(uint32_t start, uint32_t stop, bool slope) {
// TBD
}
typedef AudioEditBuffer< float32_t > AudioEditBufferFloat32;
typedef AudioEditBuffer< int32_t > AudioEditBufferSInt32;
#endif // hifi_AudioEditBuffer_h

View file

@ -21,16 +21,16 @@ class AudioBiquad {
//
// private data
//
float _a0; // gain
float _a1; // feedforward 1
float _a2; // feedforward 2
float _b1; // feedback 1
float _b2; // feedback 2
float32_t _a0; // gain
float32_t _a1; // feedforward 1
float32_t _a2; // feedforward 2
float32_t _b1; // feedback 1
float32_t _b2; // feedback 2
float _xm1;
float _xm2;
float _ym1;
float _ym2;
float32_t _xm1;
float32_t _xm2;
float32_t _ym1;
float32_t _ym2;
public:
@ -51,20 +51,20 @@ public:
//
// public interface
//
void setParameters(const float a0, const float a1, const float a2, const float b1, const float b2) {
void setParameters(const float32_t a0, const float32_t a1, const float32_t a2, const float32_t b1, const float32_t b2) {
_a0 = a0; _a1 = a1; _a2 = a2; _b1 = b1; _b2 = b2;
}
void getParameters(float& a0, float& a1, float& a2, float& b1, float& b2) {
void getParameters(float32_t& a0, float32_t& a1, float32_t& a2, float32_t& b1, float32_t& b2) {
a0 = _a0; a1 = _a1; a2 = _a2; b1 = _b1; b2 = _b2;
}
void render(const float* in, float* out, const int frames) {
void render(const float32_t* in, float32_t* out, const uint32_t frames) {
float x;
float y;
float32_t x;
float32_t y;
for (int i = 0; i < frames; ++i) {
for (uint32_t i = 0; i < frames; ++i) {
x = *in++;
@ -105,10 +105,10 @@ protected:
// data
//
AudioBiquad _kernel;
float _sampleRate;
float _frequency;
float _gain;
float _slope;
float32_t _sampleRate;
float32_t _frequency;
float32_t _gain;
float32_t _slope;
//
// helpers
@ -131,7 +131,7 @@ public:
//
// public interface
//
void setParameters(const float sampleRate, const float frequency, const float gain, const float slope) {
void setParameters(const float32_t sampleRate, const float32_t frequency, const float32_t gain, const float32_t slope) {
_sampleRate = std::max(sampleRate, 1.0f);
_frequency = std::max(frequency, 2.0f);
@ -141,11 +141,11 @@ public:
updateKernel();
}
void getParameters(float& sampleRate, float& frequency, float& gain, float& slope) {
void getParameters(float32_t& sampleRate, float32_t& frequency, float32_t& gain, float32_t& slope) {
sampleRate = _sampleRate; frequency = _frequency; gain = _gain; slope = _slope;
}
void render(const float* in, float* out, const int frames) {
void render(const float32_t* in, float32_t* out, const uint32_t frames) {
_kernel.render(in,out,frames);
}
@ -166,14 +166,14 @@ public:
//
void updateKernel() {
const float a = _gain;
const float aAdd1 = a + 1.0f;
const float aSub1 = a - 1.0f;
const float omega = TWO_PI * _frequency / _sampleRate;
const float aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float aSub1TimesCosOmega = aSub1 * cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
const float zeta = 2.0f * sqrtf(a) * alpha;
const float32_t a = _gain;
const float32_t aAdd1 = a + 1.0f;
const float32_t aSub1 = a - 1.0f;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float32_t aSub1TimesCosOmega = aSub1 * cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) - (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = 2*A*( (A-1) - (A+1)*cos(w0) )
@ -182,14 +182,14 @@ public:
a1 = -2*( (A-1) + (A+1)*cos(w0) )
a2 = (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float b0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta) * a;
const float b1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + ZERO) * a;
const float b2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta) * a;
const float a0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta);
const float a1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + ZERO);
const float a2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta);
const float32_t b0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta) * a;
const float32_t b1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + ZERO) * a;
const float32_t b2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta) * a;
const float32_t a0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta);
const float32_t a1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + ZERO);
const float32_t a2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta);
const float normA0 = 1.0f / a0;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
@ -207,14 +207,14 @@ public:
//
void updateKernel() {
const float a = _gain;
const float aAdd1 = a + 1.0f;
const float aSub1 = a - 1.0f;
const float omega = TWO_PI * _frequency / _sampleRate;
const float aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float aSub1TimesCosOmega = aSub1 * cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
const float zeta = 2.0f * sqrtf(a) * alpha;
const float32_t a = _gain;
const float32_t aAdd1 = a + 1.0f;
const float32_t aSub1 = a - 1.0f;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float32_t aSub1TimesCosOmega = aSub1 * cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) + (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = -2*A*( (A-1) + (A+1)*cos(w0) )
@ -223,14 +223,14 @@ public:
a1 = 2*( (A-1) - (A+1)*cos(w0) )
a2 = (A+1) - (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float b0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta) * a;
const float b1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + ZERO) * a;
const float b2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta) * a;
const float a0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta);
const float a1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + ZERO);
const float a2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta);
const float32_t b0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta) * a;
const float32_t b1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + ZERO) * a;
const float32_t b2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta) * a;
const float32_t a0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta);
const float32_t a1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + ZERO);
const float32_t a2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta);
const float normA0 = 1.0f / a0;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
@ -248,9 +248,9 @@ public:
//
void updateKernel() {
const float omega = TWO_PI * _frequency / _sampleRate;
const float cosOmega = cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t cosOmega = cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
/*
b0 = 1 - alpha
b1 = -2*cos(w0)
@ -259,14 +259,14 @@ public:
a1 = -2*cos(w0)
a2 = 1 - alpha
*/
const float b0 = +1.0f - alpha;
const float b1 = -2.0f * cosOmega;
const float b2 = +1.0f + alpha;
const float a0 = +1.0f + alpha;
const float a1 = -2.0f * cosOmega;
const float a2 = +1.0f - alpha;
const float32_t b0 = +1.0f - alpha;
const float32_t b1 = -2.0f * cosOmega;
const float32_t b2 = +1.0f + alpha;
const float32_t a0 = +1.0f + alpha;
const float32_t a1 = -2.0f * cosOmega;
const float32_t a2 = +1.0f - alpha;
const float normA0 = 1.0f / a0;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
@ -284,12 +284,12 @@ public:
//
void updateKernel() {
const float a = _gain;
const float omega = TWO_PI * _frequency / _sampleRate;
const float cosOmega = cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
const float alphaMulA = alpha * a;
const float alphaDivA = alpha / a;
const float32_t a = _gain;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t cosOmega = cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t alphaMulA = alpha * a;
const float32_t alphaDivA = alpha / a;
/*
b0 = 1 + alpha*A
b1 = -2*cos(w0)
@ -298,14 +298,14 @@ public:
a1 = -2*cos(w0)
a2 = 1 - alpha/A
*/
const float b0 = +1.0f + alphaMulA;
const float b1 = -2.0f * cosOmega;
const float b2 = +1.0f - alphaMulA;
const float a0 = +1.0f + alphaDivA;
const float a1 = -2.0f * cosOmega;
const float a2 = +1.0f - alphaDivA;
const float32_t b0 = +1.0f + alphaMulA;
const float32_t b1 = -2.0f * cosOmega;
const float32_t b2 = +1.0f - alphaMulA;
const float32_t a0 = +1.0f + alphaDivA;
const float32_t a1 = -2.0f * cosOmega;
const float32_t a2 = +1.0f - alphaDivA;
const float normA0 = 1.0f / a0;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}

View file

@ -15,24 +15,24 @@
//
// Helper/convenience class that implements a bank of Filter objects
//
template< typename T, const int N, const int C >
template< typename T, const uint32_t N, const uint32_t C >
class AudioFilterBank {
//
// types
//
struct FilterParameter {
float _p1;
float _p2;
float _p3;
float32_t _p1;
float32_t _p2;
float32_t _p3;
};
//
// private static data
//
static const int _filterCount = N;
static const int _channelCount = C;
static const int _profileCount = 4;
static const uint32_t _filterCount = N;
static const uint32_t _channelCount = C;
static const uint32_t _profileCount = 4;
static FilterParameter _profiles[ _profileCount ][ _filterCount ];
@ -40,9 +40,9 @@ class AudioFilterBank {
// private data
//
T _filters[ _filterCount ][ _channelCount ];
float* _buffer[ _channelCount ];
float _sampleRate;
uint16_t _frameCount;
float32_t* _buffer[ _channelCount ];
float32_t _sampleRate;
uint32_t _frameCount;
public:
@ -64,11 +64,11 @@ public:
//
// public interface
//
void initialize(const float sampleRate, const int frameCount = 0) {
void initialize(const float32_t sampleRate, const uint32_t frameCount = 0) {
finalize();
for (int i = 0; i < _channelCount; ++i) {
_buffer[i] = (float*)malloc(frameCount * sizeof(float));
for (uint32_t i = 0; i < _channelCount; ++i) {
_buffer[i] = (float32_t*)malloc(frameCount * sizeof(float32_t));
}
_sampleRate = sampleRate;
@ -79,7 +79,7 @@ public:
}
void finalize() {
for (int i = 0; i < _channelCount; ++i) {
for (uint32_t i = 0; i < _channelCount; ++i) {
if (_buffer[i]) {
free (_buffer[i]);
_buffer[i] = NULL;
@ -90,52 +90,53 @@ public:
void loadProfile(int profileIndex) {
if (profileIndex >= 0 && profileIndex < _profileCount) {
for (int i = 0; i < _filterCount; ++i) {
for (uint32_t i = 0; i < _filterCount; ++i) {
FilterParameter p = _profiles[profileIndex][i];
for (int j = 0; j < _channelCount; ++j) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_filters[i][j].setParameters(_sampleRate,p._p1,p._p2,p._p3);
}
}
}
}
void setParameters(int filterStage, int filterChannel, const float sampleRate, const float frequency, const float gain,
const float slope) {
void setParameters(uint32_t filterStage, uint32_t filterChannel, const float32_t sampleRate, const float32_t frequency,
const float32_t gain, const float32_t slope) {
if (filterStage >= 0 && filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].setParameters(sampleRate,frequency,gain,slope);
}
}
void getParameters(int filterStage, int filterChannel, float& sampleRate, float& frequency, float& gain, float& slope) {
void getParameters(uint32_t filterStage, uint32_t filterChannel, float32_t& sampleRate, float32_t& frequency,
float32_t& gain, float32_t& slope) {
if (filterStage >= 0 && filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].getParameters(sampleRate,frequency,gain,slope);
}
}
void render(const int16_t* in, int16_t* out, const int frameCount) {
void render(const int16_t* in, int16_t* out, const uint32_t frameCount) {
if (!_buffer || (frameCount > _frameCount))
return;
const int scale = (2 << ((8 * sizeof(int16_t)) - 1));
// de-interleave and convert int16_t to float32 (normalized to -1. ... 1.)
for (int i = 0; i < frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
for (uint32_t i = 0; i < frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_buffer[j][i] = ((float)(*in++)) / scale;
}
}
// now step through each filter
for (int i = 0; i < _channelCount; ++i) {
for (int j = 0; j < _filterCount; ++j) {
for (uint32_t i = 0; i < _channelCount; ++i) {
for (uint32_t j = 0; j < _filterCount; ++j) {
_filters[j][i].render( &_buffer[i][0], &_buffer[i][0], frameCount );
}
}
// convert float32 to int16_t and interleave
for (int i = 0; i < frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
for (uint32_t i = 0; i < frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*out++ = (int16_t)(_buffer[j][i] * scale);
}
}
@ -144,16 +145,16 @@ public:
void render(AudioBufferFloat32& frameBuffer) {
float32_t** samples = frameBuffer.getFrameData();
for (uint16_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (int i = 0; i < _filterCount; ++i) {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < _filterCount; ++i) {
_filters[i][j].render( samples[j], samples[j], frameBuffer.getFrameCount() );
}
}
}
void reset() {
for (int i = 0; i < _filterCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
for (uint32_t i = 0; i < _filterCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_filters[i][j].reset();
}
}

View file

@ -0,0 +1,48 @@
//
// AudioGain.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <math.h>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioGain.h"
AudioGain::AudioGain() {
initialize();
}
AudioGain::~AudioGain() {
finalize();
}
void AudioGain::initialize() {
setParameters(1.0f,0.0f);
}
void AudioGain::finalize() {
}
void AudioGain::reset() {
initialize();
}
void AudioGain::setParameters(const float gain, const float mute) {
_gain = std::min(std::max(gain, 0.0f), 1.0f);
_mute = mute != 0.0f;
}
void AudioGain::getParameters(float& gain, float& mute) {
gain = _gain;
mute = _mute ? 1.0f : 0.0f;
}

View file

@ -18,120 +18,100 @@ class AudioGain
bool _mute;
public:
AudioGain() {
initialize();
}
AudioGain();
~AudioGain();
~AudioGain() {
finalize();
}
void initialize();
void finalize();
void reset();
void initialize() {
setParameters(1.0f,0.0f);
}
void setParameters(const float gain, const float mute);
void getParameters(float& gain, float& mute);
void finalize() {
}
void reset() {
initialize();
}
void setParameters(const float gain, const float mute) {
_gain = std::min(std::max(gain, 0.0f), 1.0f);
_mute = mute != 0.0f;
}
void getParameters(float& gain, float& mute) {
gain = _gain;
mute = _mute ? 1.0f : 0.0f;
}
void render(AudioBufferFloat32& frameBuffer) {
if (_mute) {
frameBuffer.zeroFrames();
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 1) {
for (uint16_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
}
}
else if (frameBuffer.getChannelCount() == 2) {
for (uint16_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
samples[1][i + 0] *= _gain;
samples[1][i + 1] *= _gain;
samples[1][i + 2] *= _gain;
samples[1][i + 3] *= _gain;
samples[1][i + 4] *= _gain;
samples[1][i + 5] *= _gain;
samples[1][i + 6] *= _gain;
samples[1][i + 7] *= _gain;
samples[1][i + 8] *= _gain;
samples[1][i + 9] *= _gain;
samples[1][i + 10] *= _gain;
samples[1][i + 11] *= _gain;
samples[1][i + 12] *= _gain;
samples[1][i + 13] *= _gain;
samples[1][i + 14] *= _gain;
samples[1][i + 15] *= _gain;
}
}
else {
assert("unsupported channel format");
}
}
else {
for (uint16_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint16_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[j][i] *= _gain;
}
}
}
}
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioGain::render(AudioBufferFloat32& frameBuffer) {
if (_mute) {
frameBuffer.zeroFrames();
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 1) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
}
} else if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
samples[1][i + 0] *= _gain;
samples[1][i + 1] *= _gain;
samples[1][i + 2] *= _gain;
samples[1][i + 3] *= _gain;
samples[1][i + 4] *= _gain;
samples[1][i + 5] *= _gain;
samples[1][i + 6] *= _gain;
samples[1][i + 7] *= _gain;
samples[1][i + 8] *= _gain;
samples[1][i + 9] *= _gain;
samples[1][i + 10] *= _gain;
samples[1][i + 11] *= _gain;
samples[1][i + 12] *= _gain;
samples[1][i + 13] *= _gain;
samples[1][i + 14] *= _gain;
samples[1][i + 15] *= _gain;
}
} else {
assert("unsupported channel format");
}
} else {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[j][i] *= _gain;
}
}
}
}
#endif // AudioGain_h

View file

@ -1,8 +1,8 @@
//
// AudioSourceTone.cpp
// AudioPan.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
@ -21,3 +21,32 @@ float32_t AudioPan::ONE_MINUS_EPSILON = 1.0f - EPSILON;
float32_t AudioPan::ZERO_PLUS_EPSILON = 0.0f + EPSILON;
float32_t AudioPan::ONE_HALF_MINUS_EPSILON = 0.5f - EPSILON;
float32_t AudioPan::ONE_HALF_PLUS_EPSILON = 0.5f + EPSILON;
AudioPan::AudioPan() {
initialize();
}
AudioPan::~AudioPan() {
finalize();
}
void AudioPan::initialize() {
setParameters(0.5f);
}
void AudioPan::finalize() {
}
void AudioPan::reset() {
initialize();
}
void AudioPan::setParameters(const float32_t pan) {
// pan ranges between 0.0 and 1.0f inclusive. 0.5f is midpoint between full left and full right
_pan = std::min(std::max(pan, 0.0f), 1.0f);
updateCoefficients();
}
void AudioPan::getParameters(float32_t& pan) {
pan = _pan;
}

View file

@ -23,119 +23,100 @@ class AudioPan
static float32_t ONE_HALF_MINUS_EPSILON;
static float32_t ONE_HALF_PLUS_EPSILON;
void updateCoefficients() {
// implement constant power sin^2 + cos^2 = 1 panning law
if (_pan >= ONE_MINUS_EPSILON) { // full right
_gainLeft = 0.0f;
_gainRight = 1.0f;
}
else if (_pan <= ZERO_PLUS_EPSILON) { // full left
_gainLeft = 1.0f;
_gainRight = 0.0f;
}
else if ((_pan >= ONE_HALF_MINUS_EPSILON) && (_pan <= ONE_HALF_PLUS_EPSILON)) { // center
_gainLeft = 1.0f / SQUARE_ROOT_OF_2;
_gainRight = 1.0f / SQUARE_ROOT_OF_2;
}
else { // intermediate cases
_gainLeft = cosf( TWO_PI * _pan );
_gainRight = sinf( TWO_PI * _pan );
}
}
void updateCoefficients();
public:
AudioPan() {
initialize();
}
AudioPan();
~AudioPan();
~AudioPan() {
finalize();
}
void initialize();
void finalize();
void reset();
void initialize() {
setParameters(0.5f);
}
void setParameters(const float32_t pan);
void getParameters(float32_t& pan);
void finalize() {
}
void reset() {
initialize();
}
void setParameters(const float32_t pan) {
// pan ranges between 0.0 and 1.0f inclusive. 0.5f is midpoint between full left and full right
_pan = std::min(std::max(pan, 0.0f), 1.0f);
updateCoefficients();
}
void getParameters(float32_t& pan) {
pan = _pan;
}
void render(AudioBufferFloat32& frameBuffer) {
if (frameBuffer.getChannelCount() != 2) {
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 2) {
for (uint16_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gainLeft;
samples[0][i + 1] *= _gainLeft;
samples[0][i + 2] *= _gainLeft;
samples[0][i + 3] *= _gainLeft;
samples[0][i + 4] *= _gainLeft;
samples[0][i + 5] *= _gainLeft;
samples[0][i + 6] *= _gainLeft;
samples[0][i + 7] *= _gainLeft;
samples[0][i + 8] *= _gainLeft;
samples[0][i + 9] *= _gainLeft;
samples[0][i + 10] *= _gainLeft;
samples[0][i + 11] *= _gainLeft;
samples[0][i + 12] *= _gainLeft;
samples[0][i + 13] *= _gainLeft;
samples[0][i + 14] *= _gainLeft;
samples[0][i + 15] *= _gainLeft;
samples[1][i + 0] *= _gainRight;
samples[1][i + 1] *= _gainRight;
samples[1][i + 2] *= _gainRight;
samples[1][i + 3] *= _gainRight;
samples[1][i + 4] *= _gainRight;
samples[1][i + 5] *= _gainRight;
samples[1][i + 6] *= _gainRight;
samples[1][i + 7] *= _gainRight;
samples[1][i + 8] *= _gainRight;
samples[1][i + 9] *= _gainRight;
samples[1][i + 10] *= _gainRight;
samples[1][i + 11] *= _gainRight;
samples[1][i + 12] *= _gainRight;
samples[1][i + 13] *= _gainRight;
samples[1][i + 14] *= _gainRight;
samples[1][i + 15] *= _gainRight;
}
}
else {
assert("unsupported channel format");
}
}
else {
for (uint16_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[0][i] *= _gainLeft;
samples[1][i] *= _gainRight;
}
}
}
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioPan::render(AudioBufferFloat32& frameBuffer) {
if (frameBuffer.getChannelCount() != 2) {
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gainLeft;
samples[0][i + 1] *= _gainLeft;
samples[0][i + 2] *= _gainLeft;
samples[0][i + 3] *= _gainLeft;
samples[0][i + 4] *= _gainLeft;
samples[0][i + 5] *= _gainLeft;
samples[0][i + 6] *= _gainLeft;
samples[0][i + 7] *= _gainLeft;
samples[0][i + 8] *= _gainLeft;
samples[0][i + 9] *= _gainLeft;
samples[0][i + 10] *= _gainLeft;
samples[0][i + 11] *= _gainLeft;
samples[0][i + 12] *= _gainLeft;
samples[0][i + 13] *= _gainLeft;
samples[0][i + 14] *= _gainLeft;
samples[0][i + 15] *= _gainLeft;
samples[1][i + 0] *= _gainRight;
samples[1][i + 1] *= _gainRight;
samples[1][i + 2] *= _gainRight;
samples[1][i + 3] *= _gainRight;
samples[1][i + 4] *= _gainRight;
samples[1][i + 5] *= _gainRight;
samples[1][i + 6] *= _gainRight;
samples[1][i + 7] *= _gainRight;
samples[1][i + 8] *= _gainRight;
samples[1][i + 9] *= _gainRight;
samples[1][i + 10] *= _gainRight;
samples[1][i + 11] *= _gainRight;
samples[1][i + 12] *= _gainRight;
samples[1][i + 13] *= _gainRight;
samples[1][i + 14] *= _gainRight;
samples[1][i + 15] *= _gainRight;
}
} else {
assert("unsupported channel format");
}
} else {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[0][i] *= _gainLeft;
samples[1][i] *= _gainRight;
}
}
}
inline void AudioPan::updateCoefficients() {
// implement constant power sin^2 + cos^2 = 1 panning law
if (_pan >= ONE_MINUS_EPSILON) { // full right
_gainLeft = 0.0f;
_gainRight = 1.0f;
} else if (_pan <= ZERO_PLUS_EPSILON) { // full left
_gainLeft = 1.0f;
_gainRight = 0.0f;
} else if ((_pan >= ONE_HALF_MINUS_EPSILON) && (_pan <= ONE_HALF_PLUS_EPSILON)) { // center
_gainLeft = 1.0f / SQUARE_ROOT_OF_2;
_gainRight = 1.0f / SQUARE_ROOT_OF_2;
} else { // intermediate cases
_gainLeft = cosf( TWO_PI * _pan );
_gainRight = sinf( TWO_PI * _pan );
}
}
#endif // AudioPan_h

View file

@ -70,8 +70,8 @@ public:
uint32_t randomNumber;
float32_t** samples = frameBuffer.getFrameData();
for (uint16_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
for (uint16_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
_index = (_index + 1) & _indexMask; // increment and mask index.
if (_index != 0) { // if index is zero, don't update any random values.

View file

@ -44,7 +44,7 @@ inline void AudioSourceTone::render(AudioBufferFloat32& frameBuffer) {
float32_t** samples = frameBuffer.getFrameData();
float32_t yq;
float32_t y;
for (uint16_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
yq = _yq1 - (_epsilon * _y1);
y = _y1 + (_epsilon * yq);
@ -53,7 +53,7 @@ inline void AudioSourceTone::render(AudioBufferFloat32& frameBuffer) {
_yq1 = yq;
_y1 = y;
for (uint16_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
samples[j][i] = _amplitude * y;
}
}

View file

@ -24,6 +24,9 @@
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioEditBuffer.h"
#include "Sound.h"
// procedural audio version of Sound
@ -120,6 +123,7 @@ void Sound::replyFinished() {
// Process as RAW file
downSample(rawAudioByteArray);
}
trimFrames();
} else {
qDebug() << "Network reply without 'Content-Type'.";
}
@ -133,7 +137,6 @@ void Sound::replyError(QNetworkReply::NetworkError code) {
}
void Sound::downSample(const QByteArray& rawAudioByteArray) {
// assume that this was a RAW file and is now an array of samples that are
// signed, 16-bit, 48Khz, mono
@ -155,6 +158,26 @@ void Sound::downSample(const QByteArray& rawAudioByteArray) {
}
}
void Sound::trimFrames() {
const uint32_t inputFrameCount = _byteArray.size() / sizeof(int16_t);
const uint32_t trimCount = 1024; // number of leading and trailing frames to trim
if (inputFrameCount <= (2 * trimCount)) {
return;
}
int16_t* inputFrameData = (int16_t*)_byteArray.data();
AudioEditBufferFloat32 editBuffer(1, inputFrameCount);
editBuffer.copyFrames(1, inputFrameCount, inputFrameData, false /*copy in*/);
editBuffer.linearFade(0, trimCount, true);
editBuffer.linearFade(inputFrameCount - trimCount, inputFrameCount, false);
editBuffer.copyFrames(1, inputFrameCount, inputFrameData, true /*copy out*/);
}
//
// Format description from https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
//

View file

@ -33,6 +33,7 @@ private:
QByteArray _byteArray;
bool _hasDownloaded;
void trimFrames();
void downSample(const QByteArray& rawAudioByteArray);
void interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);