Merge branch 'master' of https://github.com/highfidelity/hifi into metavoxels

This commit is contained in:
Andrzej Kapolka 2014-06-06 11:43:12 -07:00
commit f78187c256
12 changed files with 357 additions and 261 deletions

View file

@ -174,12 +174,15 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
} }
} }
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
if (!bufferToAdd->isStereo()) {
// this is a mono buffer, which means it gets full attenuation and spatialization
// if the bearing relative angle to source is > 0 then the delayed channel is the right one // if the bearing relative angle to source is > 0 then the delayed channel is the right one
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0; int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0; int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
const int16_t* nextOutputStart = bufferToAdd->getNextOutput();
const int16_t* bufferStart = bufferToAdd->getBuffer(); const int16_t* bufferStart = bufferToAdd->getBuffer();
int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity(); int ringBufferSampleCapacity = bufferToAdd->getSampleCapacity();
@ -303,6 +306,27 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
_clientSamples[parentIndex + delayedChannelOffset] = shortResults[3]; _clientSamples[parentIndex + delayedChannelOffset] = shortResults[3];
} }
} }
} else {
// stereo buffer - do attenuation but no sample delay for spatialization
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// use MMX to clamp four additions at a time
__m64 bufferSamples = _mm_set_pi16(_clientSamples[s], _clientSamples[s + 1],
_clientSamples[s + 2], _clientSamples[s + 3]);
__m64 addSamples = _mm_set_pi16(nextOutputStart[s] * attenuationCoefficient,
nextOutputStart[s + 1] * attenuationCoefficient,
nextOutputStart[s + 2] * attenuationCoefficient,
nextOutputStart[s + 3] * attenuationCoefficient);
__m64 mmxResult = _mm_adds_pi16(bufferSamples, addSamples);
int16_t* shortResults = reinterpret_cast<int16_t*>(&mmxResult);
_clientSamples[s] = shortResults[3];
_clientSamples[s + 1] = shortResults[2];
_clientSamples[s + 2] = shortResults[1];
_clientSamples[s + 3] = shortResults[0];
}
}
} }
void AudioMixer::prepareMixForListeningNode(Node* node) { void AudioMixer::prepareMixForListeningNode(Node* node) {

View file

@ -51,9 +51,21 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist) // grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer(); AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
// read the first byte after the header to see if this is a stereo or mono buffer
quint8 channelFlag = packet.at(numBytesForPacketHeader(packet));
bool isStereo = channelFlag == 1;
if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
// there's a mismatch in the buffer channels for the incoming and current buffer
// so delete our current buffer and create a new one
_ringBuffers.removeOne(avatarRingBuffer);
avatarRingBuffer->deleteLater();
avatarRingBuffer = NULL;
}
if (!avatarRingBuffer) { if (!avatarRingBuffer) {
// we don't have an AvatarAudioRingBuffer yet, so add it // we don't have an AvatarAudioRingBuffer yet, so add it
avatarRingBuffer = new AvatarAudioRingBuffer(); avatarRingBuffer = new AvatarAudioRingBuffer(isStereo);
_ringBuffers.push_back(avatarRingBuffer); _ringBuffers.push_back(avatarRingBuffer);
} }
@ -106,7 +118,8 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i]; PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
if (audioBuffer->willBeAddedToMix()) { if (audioBuffer->willBeAddedToMix()) {
audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL); audioBuffer->shiftReadPosition(audioBuffer->isStereo()
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->setWillBeAddedToMix(false); audioBuffer->setWillBeAddedToMix(false);
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector } else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector

View file

@ -24,14 +24,14 @@ public:
AudioMixerClientData(); AudioMixerClientData();
~AudioMixerClientData(); ~AudioMixerClientData();
const std::vector<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; } const QList<PositionalAudioRingBuffer*> getRingBuffers() const { return _ringBuffers; }
AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const; AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const;
int parseData(const QByteArray& packet); int parseData(const QByteArray& packet);
void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples); void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples);
void pushBuffersAfterFrameSend(); void pushBuffersAfterFrameSend();
private: private:
std::vector<PositionalAudioRingBuffer*> _ringBuffers; QList<PositionalAudioRingBuffer*> _ringBuffers;
}; };
#endif // hifi_AudioMixerClientData_h #endif // hifi_AudioMixerClientData_h

View file

@ -13,8 +13,8 @@
#include "AvatarAudioRingBuffer.h" #include "AvatarAudioRingBuffer.h"
AvatarAudioRingBuffer::AvatarAudioRingBuffer() : AvatarAudioRingBuffer::AvatarAudioRingBuffer(bool isStereo) :
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone) { PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone, isStereo) {
} }

View file

@ -18,7 +18,7 @@
class AvatarAudioRingBuffer : public PositionalAudioRingBuffer { class AvatarAudioRingBuffer : public PositionalAudioRingBuffer {
public: public:
AvatarAudioRingBuffer(); AvatarAudioRingBuffer(bool isStereo = false);
int parseData(const QByteArray& packet); int parseData(const QByteArray& packet);
private: private:

View file

@ -68,6 +68,7 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
_proceduralOutputDevice(NULL), _proceduralOutputDevice(NULL),
_inputRingBuffer(0), _inputRingBuffer(0),
_ringBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL), _ringBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL),
_isStereoInput(false),
_averagedLatency(0.0), _averagedLatency(0.0),
_measuredJitter(0), _measuredJitter(0),
_jitterBufferSamples(initialJitterBufferSamples), _jitterBufferSamples(initialJitterBufferSamples),
@ -289,7 +290,7 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
if (sourceToDestinationFactor >= 2) { if (sourceToDestinationFactor >= 2) {
// we need to downsample from 48 to 24 // we need to downsample from 48 to 24
// for now this only supports a mono output - this would be the case for audio input // for now this only supports a mono output - this would be the case for audio input
if (destinationAudioFormat.channelCount() == 1) {
for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) { for (unsigned int i = sourceAudioFormat.channelCount(); i < numSourceSamples; i += 2 * sourceAudioFormat.channelCount()) {
if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) { if (i + (sourceAudioFormat.channelCount()) >= numSourceSamples) {
destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] = destinationSamples[(i - sourceAudioFormat.channelCount()) / (int) sourceToDestinationFactor] =
@ -302,7 +303,14 @@ void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
+ (sourceSamples[i + sourceAudioFormat.channelCount()] / 4); + (sourceSamples[i + sourceAudioFormat.channelCount()] / 4);
} }
} }
} else {
// this is a 48 to 24 resampling but both source and destination are two channels
// squish two samples into one in each channel
for (int i = 0; i < numSourceSamples; i += 4) {
destinationSamples[i / 2] = (sourceSamples[i] / 2) + (sourceSamples[i + 2] / 2);
destinationSamples[(i / 2) + 1] = (sourceSamples[i + 1] / 2) + (sourceSamples[i + 3] / 2);
}
}
} else { } else {
if (sourceAudioFormat.sampleRate() == destinationAudioFormat.sampleRate()) { if (sourceAudioFormat.sampleRate() == destinationAudioFormat.sampleRate()) {
// mono to stereo, same sample rate // mono to stereo, same sample rate
@ -405,12 +413,12 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
} }
void Audio::handleAudioInput() { void Audio::handleAudioInput() {
static char monoAudioDataPacket[MAX_PACKET_SIZE]; static char audioDataPacket[MAX_PACKET_SIZE];
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho); static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat); static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes); static int16_t* networkAudioSamples = (int16_t*) (audioDataPacket + leadingBytes);
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes); float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
@ -453,17 +461,21 @@ void Audio::handleAudioInput() {
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired]; int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired); _inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
int numNetworkBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
int numNetworkSamples = _isStereoInput ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
// zero out the monoAudioSamples array and the locally injected audio // zero out the monoAudioSamples array and the locally injected audio
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL); memset(networkAudioSamples, 0, numNetworkBytes);
if (!_muted) { if (!_muted) {
// we aren't muted, downsample the input audio // we aren't muted, downsample the input audio
linearResampling((int16_t*) inputAudioSamples, linearResampling((int16_t*) inputAudioSamples, networkAudioSamples,
monoAudioSamples, inputSamplesRequired, numNetworkSamples,
inputSamplesRequired,
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
_inputFormat, _desiredInputFormat); _inputFormat, _desiredInputFormat);
// only impose the noise gate and perform tone injection if we sending mono audio
if (!_isStereoInput) {
// //
// Impose Noise Gate // Impose Noise Gate
// //
@ -504,9 +516,9 @@ void Audio::handleAudioInput() {
} }
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) { for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
measuredDcOffset += monoAudioSamples[i]; measuredDcOffset += networkAudioSamples[i];
monoAudioSamples[i] -= (int16_t) _dcOffset; networkAudioSamples[i] -= (int16_t) _dcOffset;
thisSample = fabsf(monoAudioSamples[i]); thisSample = fabsf(networkAudioSamples[i]);
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) { if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
_timeSinceLastClip = 0.0f; _timeSinceLastClip = 0.0f;
} }
@ -531,8 +543,8 @@ void Audio::handleAudioInput() {
if (_toneInjectionEnabled) { if (_toneInjectionEnabled) {
loudness = 0.0f; loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) { for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
monoAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample)); networkAudioSamples[i] = QUARTER_VOLUME * sinf(TONE_FREQ * (float)(i + _proceduralEffectSample));
loudness += fabsf(monoAudioSamples[i]); loudness += fabsf(networkAudioSamples[i]);
} }
} }
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL); _lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
@ -569,10 +581,19 @@ void Audio::handleAudioInput() {
} }
} }
if (!_noiseGateOpen) { if (!_noiseGateOpen) {
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL); memset(networkAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0; _lastInputLoudness = 0;
} }
} }
} else {
float loudness = 0.0f;
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; i++) {
loudness += fabsf(networkAudioSamples[i]);
}
_lastInputLoudness = fabs(loudness / NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
}
} else { } else {
// our input loudness is 0, since we're muted // our input loudness is 0, since we're muted
_lastInputLoudness = 0; _lastInputLoudness = 0;
@ -580,19 +601,19 @@ void Audio::handleAudioInput() {
// at this point we have clean monoAudioSamples, which match our target output... // at this point we have clean monoAudioSamples, which match our target output...
// this is what we should send to our interested listeners // this is what we should send to our interested listeners
if (_processSpatialAudio && !_muted && _audioOutput) { if (_processSpatialAudio && !_muted && !_isStereoInput && _audioOutput) {
QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t)); QByteArray monoInputData((char*)networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat); emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat);
} }
if (_proceduralAudioOutput) { if (!_isStereoInput && _proceduralAudioOutput) {
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL); processProceduralAudio(networkAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
} }
if (_scopeEnabled && !_scopeEnabledPause) { if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
unsigned int numMonoAudioChannels = 1; unsigned int numMonoAudioChannels = 1;
unsigned int monoAudioChannel = 0; unsigned int monoAudioChannel = 0;
addBufferToScope(_scopeInput, _scopeInputOffset, monoAudioSamples, monoAudioChannel, numMonoAudioChannels); addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME; _scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeInputOffset %= _samplesPerScope; _scopeInputOffset %= _samplesPerScope;
} }
@ -604,9 +625,7 @@ void Audio::handleAudioInput() {
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar(); MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition(); glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition();
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame(); glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
quint8 isStereo = _isStereoInput ? 1 : 0;
// we need the amount of bytes in the buffer + 1 for type
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
int numAudioBytes = 0; int numAudioBytes = 0;
@ -615,11 +634,12 @@ void Audio::handleAudioInput() {
packetType = PacketTypeSilentAudioFrame; packetType = PacketTypeSilentAudioFrame;
// we need to indicate how many silent samples this is to the audio mixer // we need to indicate how many silent samples this is to the audio mixer
monoAudioSamples[0] = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; audioDataPacket[0] = _isStereoInput
? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
: NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
numAudioBytes = sizeof(int16_t); numAudioBytes = sizeof(int16_t);
} else { } else {
numAudioBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL; numAudioBytes = _isStereoInput ? NETWORK_BUFFER_LENGTH_BYTES_STEREO : NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) { if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
packetType = PacketTypeMicrophoneAudioWithEcho; packetType = PacketTypeMicrophoneAudioWithEcho;
@ -628,7 +648,10 @@ void Audio::handleAudioInput() {
} }
} }
char* currentPacketPtr = monoAudioDataPacket + populatePacketHeader(monoAudioDataPacket, packetType); char* currentPacketPtr = audioDataPacket + populatePacketHeader(audioDataPacket, packetType);
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
// memcpy the three float positions // memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition)); memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
@ -638,7 +661,7 @@ void Audio::handleAudioInput() {
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation)); memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation); currentPacketPtr += sizeof(headOrientation);
nodeList->writeDatagram(monoAudioDataPacket, numAudioBytes + leadingBytes, audioMixer); nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO) Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(numAudioBytes + leadingBytes); .updateValue(numAudioBytes + leadingBytes);
@ -761,6 +784,24 @@ void Audio::toggleAudioNoiseReduction() {
_noiseGateEnabled = !_noiseGateEnabled; _noiseGateEnabled = !_noiseGateEnabled;
} }
void Audio::toggleStereoInput() {
int oldChannelCount = _desiredInputFormat.channelCount();
QAction* stereoAudioOption = Menu::getInstance()->getActionForOption(MenuOption::StereoAudio);
if (stereoAudioOption->isChecked()) {
_desiredInputFormat.setChannelCount(2);
_isStereoInput = true;
} else {
_desiredInputFormat.setChannelCount(1);
_isStereoInput = false;
}
if (oldChannelCount != _desiredInputFormat.channelCount()) {
// change in channel count for desired input format, restart the input device
switchInputToAudioDevice(_inputAudioDeviceName);
}
}
void Audio::processReceivedAudio(const QByteArray& audioByteArray) { void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.parseData(audioByteArray); _ringBuffer.parseData(audioByteArray);
@ -1301,6 +1342,8 @@ bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) { if (adjustedFormatForAudioDevice(inputDeviceInfo, _desiredInputFormat, _inputFormat)) {
qDebug() << "The format to be used for audio input is" << _inputFormat; qDebug() << "The format to be used for audio input is" << _inputFormat;
// if the user wants stereo but this device can't provide then bail
if (!_isStereoInput || _inputFormat.channelCount() == 2) {
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this); _audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat); _numInputCallbackBytes = calculateNumberOfInputCallbackBytes(_inputFormat);
_audioInput->setBufferSize(_numInputCallbackBytes); _audioInput->setBufferSize(_numInputCallbackBytes);
@ -1314,6 +1357,7 @@ bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
supportedFormat = true; supportedFormat = true;
} }
} }
}
return supportedFormat; return supportedFormat;
} }

View file

@ -85,6 +85,7 @@ public slots:
void toggleScope(); void toggleScope();
void toggleScopePause(); void toggleScopePause();
void toggleAudioSpatialProcessing(); void toggleAudioSpatialProcessing();
void toggleStereoInput();
void selectAudioScopeFiveFrames(); void selectAudioScopeFiveFrames();
void selectAudioScopeTwentyFrames(); void selectAudioScopeTwentyFrames();
void selectAudioScopeFiftyFrames(); void selectAudioScopeFiftyFrames();
@ -127,6 +128,7 @@ private:
QIODevice* _proceduralOutputDevice; QIODevice* _proceduralOutputDevice;
AudioRingBuffer _inputRingBuffer; AudioRingBuffer _inputRingBuffer;
AudioRingBuffer _ringBuffer; AudioRingBuffer _ringBuffer;
bool _isStereoInput;
QString _inputAudioDeviceName; QString _inputAudioDeviceName;
QString _outputAudioDeviceName; QString _outputAudioDeviceName;

View file

@ -432,6 +432,8 @@ Menu::Menu() :
SLOT(toggleAudioNoiseReduction())); SLOT(toggleAudioNoiseReduction()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio); addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoLocalAudio); addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoLocalAudio);
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::StereoAudio, 0, false,
appInstance->getAudio(), SLOT(toggleStereoInput()));
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::MuteAudio, addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::MuteAudio,
Qt::CTRL | Qt::Key_M, Qt::CTRL | Qt::Key_M,
false, false,

View file

@ -402,6 +402,7 @@ namespace MenuOption {
const QString StandOnNearbyFloors = "Stand on nearby floors"; const QString StandOnNearbyFloors = "Stand on nearby floors";
const QString Stars = "Stars"; const QString Stars = "Stars";
const QString Stats = "Stats"; const QString Stats = "Stats";
const QString StereoAudio = "Stereo Audio";
const QString StopAllScripts = "Stop All Scripts"; const QString StopAllScripts = "Stop All Scripts";
const QString SuppressShortTimings = "Suppress Timings Less than 10ms"; const QString SuppressShortTimings = "Suppress Timings Less than 10ms";
const QString TestPing = "Test Ping"; const QString TestPing = "Test Ping";

View file

@ -20,14 +20,15 @@
#include "PositionalAudioRingBuffer.h" #include "PositionalAudioRingBuffer.h"
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type) : PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo) :
AudioRingBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL), AudioRingBuffer(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
_type(type), _type(type),
_position(0.0f, 0.0f, 0.0f), _position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f), _orientation(0.0f, 0.0f, 0.0f, 0.0f),
_willBeAddedToMix(false), _willBeAddedToMix(false),
_shouldLoopbackForNode(false), _shouldLoopbackForNode(false),
_shouldOutputStarveDebug(true) _shouldOutputStarveDebug(true),
_isStereo(isStereo)
{ {
} }
@ -40,6 +41,9 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
// skip the packet header (includes the source UUID) // skip the packet header (includes the source UUID)
int readBytes = numBytesForPacketHeader(packet); int readBytes = numBytesForPacketHeader(packet);
// hop over the channel flag that has already been read in AudioMixerClientData
readBytes += sizeof(quint8);
// read the positional data
readBytes += parsePositionalData(packet.mid(readBytes)); readBytes += parsePositionalData(packet.mid(readBytes));
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) { if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {

View file

@ -24,7 +24,7 @@ public:
Injector Injector
}; };
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type); PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false);
~PositionalAudioRingBuffer(); ~PositionalAudioRingBuffer();
int parseData(const QByteArray& packet); int parseData(const QByteArray& packet);
@ -41,6 +41,8 @@ public:
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; } bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
PositionalAudioRingBuffer::Type getType() const { return _type; } PositionalAudioRingBuffer::Type getType() const { return _type; }
const glm::vec3& getPosition() const { return _position; } const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; } const glm::quat& getOrientation() const { return _orientation; }
@ -56,6 +58,7 @@ protected:
bool _willBeAddedToMix; bool _willBeAddedToMix;
bool _shouldLoopbackForNode; bool _shouldLoopbackForNode;
bool _shouldOutputStarveDebug; bool _shouldOutputStarveDebug;
bool _isStereo;
float _nextOutputTrailingLoudness; float _nextOutputTrailingLoudness;
}; };

View file

@ -47,6 +47,9 @@ int packArithmeticallyCodedValue(int value, char* destination) {
PacketVersion versionForPacketType(PacketType type) { PacketVersion versionForPacketType(PacketType type) {
switch (type) { switch (type) {
case PacketTypeMicrophoneAudioNoEcho:
case PacketTypeMicrophoneAudioWithEcho:
return 1;
case PacketTypeAvatarData: case PacketTypeAvatarData:
return 3; return 3;
case PacketTypeAvatarIdentity: case PacketTypeAvatarIdentity: