mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-07-22 18:23:17 +02:00
replace missing PortAudio with Qt audio
This commit is contained in:
parent
40a1517108
commit
e339155328
4 changed files with 274 additions and 643 deletions
|
@ -133,9 +133,7 @@ Application::Application(int& argc, char** argv, timeval &startup_time) :
|
||||||
_lookatIndicatorScale(1.0f),
|
_lookatIndicatorScale(1.0f),
|
||||||
_perfStatsOn(false),
|
_perfStatsOn(false),
|
||||||
_chatEntryOn(false),
|
_chatEntryOn(false),
|
||||||
#ifndef _WIN32
|
_audio(STARTUP_JITTER_SAMPLES),
|
||||||
_audio(&_audioScope, STARTUP_JITTER_SAMPLES),
|
|
||||||
#endif
|
|
||||||
_stopNetworkReceiveThread(false),
|
_stopNetworkReceiveThread(false),
|
||||||
_voxelProcessor(),
|
_voxelProcessor(),
|
||||||
_voxelEditSender(this),
|
_voxelEditSender(this),
|
||||||
|
@ -162,6 +160,14 @@ Application::Application(int& argc, char** argv, timeval &startup_time) :
|
||||||
|
|
||||||
NodeList::createInstance(NODE_TYPE_AGENT, listenPort);
|
NodeList::createInstance(NODE_TYPE_AGENT, listenPort);
|
||||||
|
|
||||||
|
// put the audio processing on a separate thread
|
||||||
|
QThread* audioThread = new QThread(this);
|
||||||
|
|
||||||
|
_audio.moveToThread(audioThread);
|
||||||
|
connect(audioThread, SIGNAL(started()), &_audio, SLOT(start()));
|
||||||
|
|
||||||
|
audioThread->start();
|
||||||
|
|
||||||
NodeList::getInstance()->addHook(&_voxels);
|
NodeList::getInstance()->addHook(&_voxels);
|
||||||
NodeList::getInstance()->addHook(this);
|
NodeList::getInstance()->addHook(this);
|
||||||
NodeList::getInstance()->addDomainListener(this);
|
NodeList::getInstance()->addDomainListener(this);
|
||||||
|
@ -245,8 +251,6 @@ Application::~Application() {
|
||||||
|
|
||||||
_sharedVoxelSystem.changeTree(new VoxelTree);
|
_sharedVoxelSystem.changeTree(new VoxelTree);
|
||||||
|
|
||||||
_audio.shutdown();
|
|
||||||
|
|
||||||
VoxelNode::removeDeleteHook(&_voxels); // we don't need to do this processing on shutdown
|
VoxelNode::removeDeleteHook(&_voxels); // we don't need to do this processing on shutdown
|
||||||
delete Menu::getInstance();
|
delete Menu::getInstance();
|
||||||
|
|
||||||
|
@ -648,9 +652,6 @@ void Application::keyPressEvent(QKeyEvent* event) {
|
||||||
case Qt::Key_Period:
|
case Qt::Key_Period:
|
||||||
Menu::getInstance()->handleViewFrustumOffsetKeyModifier(event->key());
|
Menu::getInstance()->handleViewFrustumOffsetKeyModifier(event->key());
|
||||||
break;
|
break;
|
||||||
case Qt::Key_Semicolon:
|
|
||||||
_audio.ping();
|
|
||||||
break;
|
|
||||||
case Qt::Key_Apostrophe:
|
case Qt::Key_Apostrophe:
|
||||||
_audioScope.inputPaused = !_audioScope.inputPaused;
|
_audioScope.inputPaused = !_audioScope.inputPaused;
|
||||||
break;
|
break;
|
||||||
|
@ -2425,7 +2426,6 @@ void Application::updateAudio(float deltaTime) {
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
_audio.setLastAcceleration(_myAvatar.getThrust());
|
_audio.setLastAcceleration(_myAvatar.getThrust());
|
||||||
_audio.setLastVelocity(_myAvatar.getVelocity());
|
_audio.setLastVelocity(_myAvatar.getVelocity());
|
||||||
_audio.eventuallyAnalyzePing();
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,14 +5,17 @@
|
||||||
// Created by Stephen Birarda on 1/22/13.
|
// Created by Stephen Birarda on 1/22/13.
|
||||||
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
// Copyright (c) 2013 High Fidelity, Inc. All rights reserved.
|
||||||
//
|
//
|
||||||
#ifndef _WIN32
|
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
|
#include <CoreAudio/AudioHardware.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <QtMultimedia/QAudioInput>
|
||||||
|
#include <QtMultimedia/QAudioOutput>
|
||||||
|
|
||||||
#include <AngleUtil.h>
|
#include <AngleUtil.h>
|
||||||
#include <NodeList.h>
|
#include <NodeList.h>
|
||||||
#include <NodeTypes.h>
|
#include <NodeTypes.h>
|
||||||
|
@ -27,291 +30,37 @@
|
||||||
#include "Menu.h"
|
#include "Menu.h"
|
||||||
#include "Util.h"
|
#include "Util.h"
|
||||||
|
|
||||||
//#define SHOW_AUDIO_DEBUG
|
|
||||||
|
|
||||||
static const int PHASE_DELAY_AT_90 = 20;
|
|
||||||
static const float AMPLITUDE_RATIO_AT_90 = 0.5;
|
|
||||||
static const int MIN_FLANGE_EFFECT_THRESHOLD = 600;
|
|
||||||
static const int MAX_FLANGE_EFFECT_THRESHOLD = 1500;
|
|
||||||
static const float FLANGE_BASE_RATE = 4;
|
|
||||||
static const float MAX_FLANGE_SAMPLE_WEIGHT = 0.50;
|
|
||||||
static const float MIN_FLANGE_INTENSITY = 0.25;
|
|
||||||
|
|
||||||
static const float JITTER_BUFFER_LENGTH_MSECS = 12;
|
static const float JITTER_BUFFER_LENGTH_MSECS = 12;
|
||||||
static const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_LENGTH_MSECS *
|
static const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_LENGTH_MSECS *
|
||||||
NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0);
|
NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0);
|
||||||
|
|
||||||
static const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
static const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
|
||||||
|
|
||||||
static const int NODE_LOOPBACK_MODIFIER = 307;
|
|
||||||
|
|
||||||
// Speex preprocessor and echo canceller adaption
|
|
||||||
static const int AEC_N_CHANNELS_MIC = 1; // Number of microphone channels
|
|
||||||
static const int AEC_N_CHANNELS_PLAY = 2; // Number of speaker channels
|
|
||||||
static const int AEC_FILTER_LENGTH = BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 20; // Width of the filter
|
|
||||||
static const int AEC_BUFFERED_FRAMES = 6; // Maximum number of frames to buffer
|
|
||||||
static const int AEC_BUFFERED_SAMPLES_PER_CHANNEL = BUFFER_LENGTH_SAMPLES_PER_CHANNEL * AEC_BUFFERED_FRAMES;
|
|
||||||
static const int AEC_BUFFERED_SAMPLES = AEC_BUFFERED_SAMPLES_PER_CHANNEL * AEC_N_CHANNELS_PLAY;
|
|
||||||
static const int AEC_TMP_BUFFER_SIZE = (AEC_N_CHANNELS_MIC + // Temporary space for processing a
|
|
||||||
AEC_N_CHANNELS_PLAY) * BUFFER_LENGTH_SAMPLES_PER_CHANNEL; // single frame
|
|
||||||
|
|
||||||
// Ping test configuration
|
|
||||||
static const float PING_PITCH = 16.f; // Ping wavelength, # samples / radian
|
|
||||||
static const float PING_VOLUME = 32000.f; // Ping peak amplitude
|
|
||||||
static const int PING_MIN_AMPLI = 225; // Minimum amplitude
|
|
||||||
static const int PING_MAX_PERIOD_DIFFERENCE = 15; // Maximum # samples from expected period
|
|
||||||
static const int PING_PERIOD = int(Radians::twicePi() * PING_PITCH); // Sine period based on the given pitch
|
|
||||||
static const int PING_HALF_PERIOD = int(Radians::pi() * PING_PITCH); // Distance between extrema
|
|
||||||
static const int PING_FRAMES_TO_RECORD = AEC_BUFFERED_FRAMES; // Frames to record for analysis
|
|
||||||
static const int PING_SAMPLES_TO_ANALYZE = AEC_BUFFERED_SAMPLES_PER_CHANNEL; // Samples to analyze (reusing AEC buffer)
|
|
||||||
static const int PING_BUFFER_OFFSET = BUFFER_LENGTH_SAMPLES_PER_CHANNEL - PING_PERIOD * 2.0f; // Signal start
|
|
||||||
|
|
||||||
// Mute icon configration
|
// Mute icon configration
|
||||||
static const int ICON_SIZE = 24;
|
static const int ICON_SIZE = 24;
|
||||||
static const int ICON_LEFT = 20;
|
static const int ICON_LEFT = 20;
|
||||||
static const int BOTTOM_PADDING = 110;
|
static const int BOTTOM_PADDING = 110;
|
||||||
|
|
||||||
inline void Audio::performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight) {
|
Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
|
QObject(parent),
|
||||||
|
_inputDevice(NULL),
|
||||||
|
_ringBuffer(true),
|
||||||
|
_averagedLatency(0.0),
|
||||||
|
_measuredJitter(0),
|
||||||
|
_jitterBufferSamples(initialJitterBufferSamples),
|
||||||
|
_lastInputLoudness(0),
|
||||||
|
_lastVelocity(0),
|
||||||
|
_lastAcceleration(0),
|
||||||
|
_totalPacketsReceived(0),
|
||||||
|
_collisionSoundMagnitude(0.0f),
|
||||||
|
_collisionSoundFrequency(0.0f),
|
||||||
|
_collisionSoundNoise(0.0f),
|
||||||
|
_collisionSoundDuration(0.0f),
|
||||||
|
_proceduralEffectSample(0),
|
||||||
|
_heartbeatMagnitude(0.0f),
|
||||||
|
_muted(false)
|
||||||
|
{
|
||||||
|
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
|
||||||
Application* interface = Application::getInstance();
|
|
||||||
Avatar* interfaceAvatar = interface->getAvatar();
|
|
||||||
|
|
||||||
memset(outputLeft, 0, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
|
||||||
memset(outputRight, 0, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
|
||||||
|
|
||||||
// If Mute button is pressed, clear the input buffer
|
|
||||||
if (_muted) {
|
|
||||||
memset(inputLeft, 0, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If local loopback enabled, copy input to output
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio)) {
|
|
||||||
memcpy(outputLeft, inputLeft, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
|
||||||
memcpy(outputRight, inputLeft, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add Procedural effects to input samples
|
|
||||||
addProceduralSounds(inputLeft, outputLeft, outputRight, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
|
||||||
|
|
||||||
if (nodeList && inputLeft) {
|
|
||||||
|
|
||||||
// Measure the loudness of the signal from the microphone and store in audio object
|
|
||||||
float loudness = 0;
|
|
||||||
for (int i = 0; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
|
||||||
loudness += abs(inputLeft[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
loudness /= BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
|
||||||
_lastInputLoudness = loudness;
|
|
||||||
|
|
||||||
// add input (@microphone) data to the scope
|
|
||||||
_scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
|
||||||
|
|
||||||
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
|
|
||||||
|
|
||||||
if (audioMixer) {
|
|
||||||
if (audioMixer->getActiveSocket()) {
|
|
||||||
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
|
|
||||||
glm::quat headOrientation = interfaceAvatar->getHead().getOrientation();
|
|
||||||
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO);
|
|
||||||
int leadingBytes = numBytesPacketHeader + sizeof(headPosition) + sizeof(headOrientation);
|
|
||||||
|
|
||||||
// we need the amount of bytes in the buffer + 1 for type
|
|
||||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
|
||||||
unsigned char dataPacket[MAX_PACKET_SIZE];
|
|
||||||
|
|
||||||
PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
|
|
||||||
? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO
|
|
||||||
: PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO;
|
|
||||||
|
|
||||||
unsigned char* currentPacketPtr = dataPacket + populateTypeAndVersion(dataPacket, packetType);
|
|
||||||
|
|
||||||
// pack Source Data
|
|
||||||
QByteArray rfcUUID = NodeList::getInstance()->getOwnerUUID().toRfc4122();
|
|
||||||
memcpy(currentPacketPtr, rfcUUID.constData(), rfcUUID.size());
|
|
||||||
currentPacketPtr += rfcUUID.size();
|
|
||||||
leadingBytes += rfcUUID.size();
|
|
||||||
|
|
||||||
// memcpy the three float positions
|
|
||||||
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
|
|
||||||
currentPacketPtr += (sizeof(headPosition));
|
|
||||||
|
|
||||||
// memcpy our orientation
|
|
||||||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
|
||||||
currentPacketPtr += sizeof(headOrientation);
|
|
||||||
|
|
||||||
// copy the audio data to the last BUFFER_LENGTH_BYTES bytes of the data packet
|
|
||||||
memcpy(currentPacketPtr, inputLeft, BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
|
||||||
|
|
||||||
nodeList->getNodeSocket()->send(audioMixer->getActiveSocket(),
|
|
||||||
dataPacket,
|
|
||||||
BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
|
|
||||||
|
|
||||||
interface->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO).updateValue(BUFFER_LENGTH_BYTES_PER_CHANNEL
|
|
||||||
+ leadingBytes);
|
|
||||||
} else {
|
|
||||||
nodeList->pingPublicAndLocalSocketsForInactiveNode(audioMixer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioRingBuffer* ringBuffer = &_ringBuffer;
|
|
||||||
|
|
||||||
// if there is anything in the ring buffer, decide what to do:
|
|
||||||
|
|
||||||
if (ringBuffer->getEndOfLastWrite()) {
|
|
||||||
if (ringBuffer->isStarved() && ringBuffer->diffLastWriteNextOutput() <
|
|
||||||
(PACKET_LENGTH_SAMPLES + _jitterBufferSamples * (ringBuffer->isStereo() ? 2 : 1))) {
|
|
||||||
//
|
|
||||||
// If not enough audio has arrived to start playback, keep waiting
|
|
||||||
//
|
|
||||||
#ifdef SHOW_AUDIO_DEBUG
|
|
||||||
qDebug("%i,%i,%i,%i\n",
|
|
||||||
_packetsReceivedThisPlayback,
|
|
||||||
ringBuffer->diffLastWriteNextOutput(),
|
|
||||||
PACKET_LENGTH_SAMPLES,
|
|
||||||
_jitterBufferSamples);
|
|
||||||
#endif
|
|
||||||
} else if (!ringBuffer->isStarved() && ringBuffer->diffLastWriteNextOutput() == 0) {
|
|
||||||
//
|
|
||||||
// If we have started and now have run out of audio to send to the audio device,
|
|
||||||
// this means we've starved and should restart.
|
|
||||||
//
|
|
||||||
ringBuffer->setIsStarved(true);
|
|
||||||
|
|
||||||
_numStarves++;
|
|
||||||
_packetsReceivedThisPlayback = 0;
|
|
||||||
_wasStarved = 10; // Frames for which to render the indication that the system was starved.
|
|
||||||
#ifdef SHOW_AUDIO_DEBUG
|
|
||||||
qDebug("Starved, remaining samples = %d\n",
|
|
||||||
ringBuffer->diffLastWriteNextOutput());
|
|
||||||
#endif
|
|
||||||
|
|
||||||
} else {
|
|
||||||
//
|
|
||||||
// We are either already playing back, or we have enough audio to start playing back.
|
|
||||||
//
|
|
||||||
if (ringBuffer->isStarved()) {
|
|
||||||
ringBuffer->setIsStarved(false);
|
|
||||||
ringBuffer->setHasStarted(true);
|
|
||||||
#ifdef SHOW_AUDIO_DEBUG
|
|
||||||
qDebug("starting playback %0.1f msecs delayed, jitter = %d, pkts recvd: %d \n",
|
|
||||||
(usecTimestampNow() - usecTimestamp(&_firstPacketReceivedTime))/1000.0,
|
|
||||||
_jitterBufferSamples,
|
|
||||||
_packetsReceivedThisPlayback);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// play whatever we have in the audio buffer
|
|
||||||
//
|
|
||||||
// if we haven't fired off the flange effect, check if we should
|
|
||||||
// TODO: lastMeasuredHeadYaw is now relative to body - check if this still works.
|
|
||||||
|
|
||||||
int lastYawMeasured = fabsf(interfaceAvatar->getHeadYawRate());
|
|
||||||
|
|
||||||
if (!_samplesLeftForFlange && lastYawMeasured > MIN_FLANGE_EFFECT_THRESHOLD) {
|
|
||||||
// we should flange for one second
|
|
||||||
if ((_lastYawMeasuredMaximum = std::max(_lastYawMeasuredMaximum, lastYawMeasured)) != lastYawMeasured) {
|
|
||||||
_lastYawMeasuredMaximum = std::min(_lastYawMeasuredMaximum, MIN_FLANGE_EFFECT_THRESHOLD);
|
|
||||||
|
|
||||||
_samplesLeftForFlange = SAMPLE_RATE;
|
|
||||||
|
|
||||||
_flangeIntensity = MIN_FLANGE_INTENSITY +
|
|
||||||
((_lastYawMeasuredMaximum - MIN_FLANGE_EFFECT_THRESHOLD) /
|
|
||||||
(float)(MAX_FLANGE_EFFECT_THRESHOLD - MIN_FLANGE_EFFECT_THRESHOLD)) *
|
|
||||||
(1 - MIN_FLANGE_INTENSITY);
|
|
||||||
|
|
||||||
_flangeRate = FLANGE_BASE_RATE * _flangeIntensity;
|
|
||||||
_flangeWeight = MAX_FLANGE_SAMPLE_WEIGHT * _flangeIntensity;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int s = 0; s < PACKET_LENGTH_SAMPLES_PER_CHANNEL; s++) {
|
|
||||||
|
|
||||||
int leftSample = ringBuffer->getNextOutput()[s];
|
|
||||||
int rightSample = ringBuffer->getNextOutput()[s + PACKET_LENGTH_SAMPLES_PER_CHANNEL];
|
|
||||||
|
|
||||||
if (_samplesLeftForFlange > 0) {
|
|
||||||
float exponent = (SAMPLE_RATE - _samplesLeftForFlange - (SAMPLE_RATE / _flangeRate)) /
|
|
||||||
(SAMPLE_RATE / _flangeRate);
|
|
||||||
int sampleFlangeDelay = (SAMPLE_RATE / (1000 * _flangeIntensity)) * powf(2, exponent);
|
|
||||||
|
|
||||||
if (_samplesLeftForFlange != SAMPLE_RATE || s >= (SAMPLE_RATE / 2000)) {
|
|
||||||
// we have a delayed sample to add to this sample
|
|
||||||
|
|
||||||
int16_t *flangeFrame = ringBuffer->getNextOutput();
|
|
||||||
int flangeIndex = s - sampleFlangeDelay;
|
|
||||||
|
|
||||||
if (flangeIndex < 0) {
|
|
||||||
// we need to grab the flange sample from earlier in the buffer
|
|
||||||
flangeFrame = ringBuffer->getNextOutput() != ringBuffer->getBuffer()
|
|
||||||
? ringBuffer->getNextOutput() - PACKET_LENGTH_SAMPLES
|
|
||||||
: ringBuffer->getNextOutput() + RING_BUFFER_LENGTH_SAMPLES - PACKET_LENGTH_SAMPLES;
|
|
||||||
|
|
||||||
flangeIndex = PACKET_LENGTH_SAMPLES_PER_CHANNEL + (s - sampleFlangeDelay);
|
|
||||||
}
|
|
||||||
|
|
||||||
int16_t leftFlangeSample = flangeFrame[flangeIndex];
|
|
||||||
int16_t rightFlangeSample = flangeFrame[flangeIndex + PACKET_LENGTH_SAMPLES_PER_CHANNEL];
|
|
||||||
|
|
||||||
leftSample = (1 - _flangeWeight) * leftSample + (_flangeWeight * leftFlangeSample);
|
|
||||||
rightSample = (1 - _flangeWeight) * rightSample + (_flangeWeight * rightFlangeSample);
|
|
||||||
|
|
||||||
_samplesLeftForFlange--;
|
|
||||||
|
|
||||||
if (_samplesLeftForFlange == 0) {
|
|
||||||
_lastYawMeasuredMaximum = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#ifndef TEST_AUDIO_LOOPBACK
|
|
||||||
outputLeft[s] += leftSample;
|
|
||||||
outputRight[s] += rightSample;
|
|
||||||
#else
|
|
||||||
outputLeft[s] += inputLeft[s];
|
|
||||||
outputRight[s] += inputLeft[s];
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES);
|
|
||||||
|
|
||||||
if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
|
|
||||||
ringBuffer->setNextOutput(ringBuffer->getBuffer());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eventuallySendRecvPing(inputLeft, outputLeft, outputRight);
|
|
||||||
|
|
||||||
|
|
||||||
// add output (@speakers) data just written to the scope
|
|
||||||
_scope->addSamples(1, outputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
|
||||||
_scope->addSamples(2, outputRight, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
|
||||||
|
|
||||||
gettimeofday(&_lastCallbackTime, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
// inputBuffer A pointer to an internal portaudio data buffer containing data read by portaudio.
|
|
||||||
// outputBuffer A pointer to an internal portaudio data buffer to be read by the configured output device.
|
|
||||||
// frames Number of frames that portaudio requests to be read/written.
|
|
||||||
// timeInfo Portaudio time info. Currently unused.
|
|
||||||
// statusFlags Portaudio status flags. Currently unused.
|
|
||||||
// userData Pointer to supplied user data (in this case, a pointer to the parent Audio object
|
|
||||||
int Audio::audioCallback (const void* inputBuffer,
|
|
||||||
void* outputBuffer,
|
|
||||||
unsigned long frames,
|
|
||||||
const PaStreamCallbackTimeInfo *timeInfo,
|
|
||||||
PaStreamCallbackFlags statusFlags,
|
|
||||||
void* userData) {
|
|
||||||
|
|
||||||
int16_t* inputLeft = static_cast<int16_t*const*>(inputBuffer)[0];
|
|
||||||
int16_t* outputLeft = static_cast<int16_t**>(outputBuffer)[0];
|
|
||||||
int16_t* outputRight = static_cast<int16_t**>(outputBuffer)[1];
|
|
||||||
|
|
||||||
static_cast<Audio*>(userData)->performIO(inputLeft, outputLeft, outputRight);
|
|
||||||
return paContinue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::init(QGLWidget *parent) {
|
void Audio::init(QGLWidget *parent) {
|
||||||
|
@ -320,118 +69,230 @@ void Audio::init(QGLWidget *parent) {
|
||||||
_muteTextureId = parent->bindTexture(QImage("./resources/images/mute.svg"));
|
_muteTextureId = parent->bindTexture(QImage("./resources/images/mute.svg"));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void outputPortAudioError(PaError error) {
|
|
||||||
if (error != paNoError) {
|
|
||||||
qDebug("-- portaudio termination error --\n");
|
|
||||||
qDebug("PortAudio error (%d): %s\n", error, Pa_GetErrorText(error));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Audio::reset() {
|
void Audio::reset() {
|
||||||
_packetsReceivedThisPlayback = 0;
|
|
||||||
_ringBuffer.reset();
|
_ringBuffer.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples) :
|
QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
||||||
_stream(NULL),
|
#ifdef __APPLE__
|
||||||
_ringBuffer(true),
|
if (QAudioDeviceInfo::availableDevices(mode).size() > 1) {
|
||||||
_scope(scope),
|
AudioDeviceID defaultDeviceID = 0;
|
||||||
_averagedLatency(0.0),
|
uint32_t propertySize = sizeof(AudioDeviceID);
|
||||||
_measuredJitter(0),
|
AudioObjectPropertyAddress propertyAddress = {
|
||||||
_jitterBufferSamples(initialJitterBufferSamples),
|
kAudioHardwarePropertyDefaultInputDevice,
|
||||||
_wasStarved(0),
|
kAudioObjectPropertyScopeGlobal,
|
||||||
_numStarves(0),
|
kAudioObjectPropertyElementMaster
|
||||||
_lastInputLoudness(0),
|
};
|
||||||
_lastVelocity(0),
|
|
||||||
_lastAcceleration(0),
|
|
||||||
_totalPacketsReceived(0),
|
|
||||||
_firstPacketReceivedTime(),
|
|
||||||
_packetsReceivedThisPlayback(0),
|
|
||||||
_echoSamplesLeft(NULL),
|
|
||||||
_isSendingEchoPing(false),
|
|
||||||
_pingAnalysisPending(false),
|
|
||||||
_pingFramesToRecord(0),
|
|
||||||
_samplesLeftForFlange(0),
|
|
||||||
_lastYawMeasuredMaximum(0),
|
|
||||||
_flangeIntensity(0.0f),
|
|
||||||
_flangeRate(0.0f),
|
|
||||||
_flangeWeight(0.0f),
|
|
||||||
_collisionSoundMagnitude(0.0f),
|
|
||||||
_collisionSoundFrequency(0.0f),
|
|
||||||
_collisionSoundNoise(0.0f),
|
|
||||||
_collisionSoundDuration(0.0f),
|
|
||||||
_proceduralEffectSample(0),
|
|
||||||
_heartbeatMagnitude(0.0f),
|
|
||||||
_muted(false),
|
|
||||||
_localEcho(false)
|
|
||||||
{
|
|
||||||
outputPortAudioError(Pa_Initialize());
|
|
||||||
|
|
||||||
// NOTE: Portaudio documentation is unclear as to whether it is safe to specify the
|
|
||||||
// number of frames per buffer explicitly versus setting this value to zero.
|
|
||||||
// Possible source of latency that we need to investigate further.
|
|
||||||
//
|
|
||||||
unsigned long FRAMES_PER_BUFFER = BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
|
||||||
|
|
||||||
// Manually initialize the portaudio stream to ask for minimum latency
|
|
||||||
PaStreamParameters inputParameters, outputParameters;
|
|
||||||
|
|
||||||
inputParameters.device = Pa_GetDefaultInputDevice();
|
|
||||||
outputParameters.device = Pa_GetDefaultOutputDevice();
|
|
||||||
|
|
||||||
if (inputParameters.device == -1 || outputParameters.device == -1) {
|
|
||||||
qDebug("Audio: Missing device.\n");
|
|
||||||
outputPortAudioError(Pa_Terminate());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
inputParameters.channelCount = 1; // Stereo input
|
|
||||||
inputParameters.sampleFormat = (paInt16 | paNonInterleaved);
|
|
||||||
inputParameters.suggestedLatency = Pa_GetDeviceInfo(inputParameters.device)->defaultLowInputLatency;
|
|
||||||
inputParameters.hostApiSpecificStreamInfo = NULL;
|
|
||||||
|
|
||||||
outputParameters.channelCount = 2; // Stereo output
|
|
||||||
outputParameters.sampleFormat = (paInt16 | paNonInterleaved);
|
|
||||||
outputParameters.suggestedLatency = Pa_GetDeviceInfo(outputParameters.device)->defaultLowOutputLatency;
|
|
||||||
outputParameters.hostApiSpecificStreamInfo = NULL;
|
|
||||||
|
|
||||||
outputPortAudioError(Pa_OpenStream(&_stream,
|
|
||||||
&inputParameters,
|
|
||||||
&outputParameters,
|
|
||||||
SAMPLE_RATE,
|
|
||||||
FRAMES_PER_BUFFER,
|
|
||||||
paNoFlag,
|
|
||||||
audioCallback,
|
|
||||||
(void*) this));
|
|
||||||
|
|
||||||
if (! _stream) {
|
if (mode == QAudio::AudioOutput) {
|
||||||
|
propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
OSStatus getPropertyError = AudioObjectGetPropertyData(kAudioObjectSystemObject,
|
||||||
|
&propertyAddress,
|
||||||
|
0,
|
||||||
|
NULL,
|
||||||
|
&propertySize,
|
||||||
|
&defaultDeviceID);
|
||||||
|
|
||||||
|
if (!getPropertyError && propertySize) {
|
||||||
|
CFStringRef deviceName = NULL;
|
||||||
|
propertySize = sizeof(deviceName);
|
||||||
|
propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
|
||||||
|
getPropertyError = AudioObjectGetPropertyData(defaultDeviceID, &propertyAddress, 0,
|
||||||
|
NULL, &propertySize, &deviceName);
|
||||||
|
|
||||||
|
if (!getPropertyError && propertySize) {
|
||||||
|
// find a device in the list that matches the name we have and return it
|
||||||
|
foreach(QAudioDeviceInfo audioDevice, QAudioDeviceInfo::availableDevices(mode)) {
|
||||||
|
if (audioDevice.deviceName() == CFStringGetCStringPtr(deviceName, kCFStringEncodingMacRoman)) {
|
||||||
|
return audioDevice;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// fallback for failed lookup is the default device
|
||||||
|
return (mode == QAudio::AudioInput) ? QAudioDeviceInfo::defaultInputDevice() : QAudioDeviceInfo::defaultOutputDevice();
|
||||||
|
}
|
||||||
|
|
||||||
|
const int QT_SAMPLE_RATE = 44100;
|
||||||
|
const int SAMPLE_RATE_RATIO = QT_SAMPLE_RATE / SAMPLE_RATE;
|
||||||
|
|
||||||
|
void Audio::start() {
|
||||||
|
|
||||||
|
QAudioFormat audioFormat;
|
||||||
|
// set up the desired audio format
|
||||||
|
audioFormat.setSampleRate(QT_SAMPLE_RATE);
|
||||||
|
audioFormat.setSampleSize(16);
|
||||||
|
audioFormat.setCodec("audio/pcm");
|
||||||
|
audioFormat.setSampleType(QAudioFormat::SignedInt);
|
||||||
|
audioFormat.setByteOrder(QAudioFormat::LittleEndian);
|
||||||
|
audioFormat.setChannelCount(2);
|
||||||
|
|
||||||
|
qDebug() << "The format for audio I/O is" << audioFormat << "\n";
|
||||||
|
|
||||||
|
QAudioDeviceInfo inputAudioDevice = defaultAudioDeviceForMode(QAudio::AudioInput);
|
||||||
|
|
||||||
|
qDebug() << "Audio input device is" << inputAudioDevice.deviceName() << "\n";
|
||||||
|
if (!inputAudioDevice.isFormatSupported(audioFormat)) {
|
||||||
|
qDebug() << "The desired audio input format is not supported by this device. Not starting audio input.\n";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_echoSamplesLeft = new int16_t[AEC_BUFFERED_SAMPLES + AEC_TMP_BUFFER_SIZE];
|
|
||||||
memset(_echoSamplesLeft, 0, AEC_BUFFERED_SAMPLES * sizeof(int16_t));
|
|
||||||
|
|
||||||
// start the stream now that sources are good to go
|
_audioInput = new QAudioInput(inputAudioDevice, audioFormat, this);
|
||||||
outputPortAudioError(Pa_StartStream(_stream));
|
_audioInput->setBufferSize(BUFFER_LENGTH_BYTES_STEREO * SAMPLE_RATE_RATIO);
|
||||||
|
_inputDevice = _audioInput->start();
|
||||||
|
|
||||||
// Uncomment these lines to see the system-reported latency
|
connect(_inputDevice, SIGNAL(readyRead()), SLOT(handleAudioInput()));
|
||||||
//qDebug("Default low input, output latency (secs): %0.4f, %0.4f\n",
|
|
||||||
// Pa_GetDeviceInfo(Pa_GetDefaultInputDevice())->defaultLowInputLatency,
|
QAudioDeviceInfo outputDeviceInfo = defaultAudioDeviceForMode(QAudio::AudioOutput);
|
||||||
// Pa_GetDeviceInfo(Pa_GetDefaultOutputDevice())->defaultLowOutputLatency);
|
|
||||||
|
qDebug() << outputDeviceInfo.supportedSampleRates() << "\n";
|
||||||
|
|
||||||
|
qDebug() << "Audio output device is" << outputDeviceInfo.deviceName() << "\n";
|
||||||
|
|
||||||
|
if (!outputDeviceInfo.isFormatSupported(audioFormat)) {
|
||||||
|
qDebug() << "The desired audio output format is not supported by this device.\n";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
_audioOutput = new QAudioOutput(outputDeviceInfo, audioFormat, this);
|
||||||
|
_audioOutput->setBufferSize(BUFFER_LENGTH_BYTES_STEREO * SAMPLE_RATE_RATIO);
|
||||||
|
_outputDevice = _audioOutput->start();
|
||||||
|
|
||||||
const PaStreamInfo* streamInfo = Pa_GetStreamInfo(_stream);
|
|
||||||
qDebug("Started audio with reported latency msecs In/Out: %.0f, %.0f\n", streamInfo->inputLatency * 1000.f,
|
|
||||||
streamInfo->outputLatency * 1000.f);
|
|
||||||
|
|
||||||
gettimeofday(&_lastReceiveTime, NULL);
|
gettimeofday(&_lastReceiveTime, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::shutdown() {
|
void Audio::handleAudioInput() {
|
||||||
if (_stream) {
|
static int16_t stereoInputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2 * SAMPLE_RATE_RATIO];
|
||||||
outputPortAudioError(Pa_CloseStream(_stream));
|
static int16_t stereoOutputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2 * SAMPLE_RATE_RATIO];
|
||||||
outputPortAudioError(Pa_Terminate());
|
static char monoAudioDataPacket[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
|
// read out the current samples from the _inputDevice
|
||||||
|
_inputDevice->read((char*) stereoInputBuffer, BUFFER_LENGTH_BYTES_STEREO * SAMPLE_RATE_RATIO);
|
||||||
|
|
||||||
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
|
||||||
|
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted) {
|
||||||
|
// if local loopback enabled, copy input to output
|
||||||
|
memcpy(stereoOutputBuffer, stereoInputBuffer, sizeof(stereoOutputBuffer));
|
||||||
|
} else {
|
||||||
|
// zero out the stereoOutputBuffer
|
||||||
|
memset(stereoOutputBuffer, 0, sizeof(stereoOutputBuffer));
|
||||||
}
|
}
|
||||||
delete[] _echoSamplesLeft;
|
|
||||||
|
if (audioMixer) {
|
||||||
|
if (audioMixer->getActiveSocket()) {
|
||||||
|
Avatar* interfaceAvatar = Application::getInstance()->getAvatar();
|
||||||
|
|
||||||
|
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
|
||||||
|
glm::quat headOrientation = interfaceAvatar->getHead().getOrientation();
|
||||||
|
|
||||||
|
int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO);
|
||||||
|
int leadingBytes = numBytesPacketHeader + sizeof(headPosition) + sizeof(headOrientation);
|
||||||
|
|
||||||
|
// we need the amount of bytes in the buffer + 1 for type
|
||||||
|
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||||
|
|
||||||
|
PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
|
||||||
|
? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO
|
||||||
|
: PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO;
|
||||||
|
|
||||||
|
char* currentPacketPtr = monoAudioDataPacket + populateTypeAndVersion((unsigned char*) monoAudioDataPacket, packetType);
|
||||||
|
|
||||||
|
// pack Source Data
|
||||||
|
QByteArray rfcUUID = NodeList::getInstance()->getOwnerUUID().toRfc4122();
|
||||||
|
memcpy(currentPacketPtr, rfcUUID.constData(), rfcUUID.size());
|
||||||
|
currentPacketPtr += rfcUUID.size();
|
||||||
|
leadingBytes += rfcUUID.size();
|
||||||
|
|
||||||
|
// memcpy the three float positions
|
||||||
|
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
|
||||||
|
currentPacketPtr += (sizeof(headPosition));
|
||||||
|
|
||||||
|
// memcpy our orientation
|
||||||
|
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||||
|
currentPacketPtr += sizeof(headOrientation);
|
||||||
|
|
||||||
|
if (!_muted) {
|
||||||
|
// we aren't muted, average each set of four samples together to set up the mono input buffers
|
||||||
|
for (int i = 2; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2 * SAMPLE_RATE_RATIO; i += 4) {
|
||||||
|
int16_t averagedSample = (stereoInputBuffer[i - 2] / 4) + (stereoInputBuffer[i] / 2)
|
||||||
|
+ (stereoInputBuffer[i + 2] / 4);
|
||||||
|
// copy the averaged sample to our array
|
||||||
|
memcpy(currentPacketPtr + (((i - 2) / 4) * sizeof(int16_t)), &averagedSample, sizeof(int16_t));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// zero out the audio part of the array
|
||||||
|
memset(currentPacketPtr, 0, BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add procedural effects to input samples
|
||||||
|
addProceduralSounds((int16_t*) currentPacketPtr, stereoOutputBuffer, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
|
|
||||||
|
nodeList->getNodeSocket()->send(audioMixer->getActiveSocket(),
|
||||||
|
monoAudioDataPacket,
|
||||||
|
BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
|
||||||
|
} else {
|
||||||
|
nodeList->pingPublicAndLocalSocketsForInactiveNode(audioMixer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioRingBuffer* ringBuffer = &_ringBuffer;
|
||||||
|
|
||||||
|
// if there is anything in the ring buffer, decide what to do
|
||||||
|
|
||||||
|
if (ringBuffer->getEndOfLastWrite()) {
|
||||||
|
if (ringBuffer->isStarved() && ringBuffer->diffLastWriteNextOutput() <
|
||||||
|
(PACKET_LENGTH_SAMPLES + _jitterBufferSamples * (ringBuffer->isStereo() ? 2 : 1))) {
|
||||||
|
// If not enough audio has arrived to start playback, keep waiting
|
||||||
|
} else if (!ringBuffer->isStarved() && ringBuffer->diffLastWriteNextOutput() == 0) {
|
||||||
|
// If we have started and now have run out of audio to send to the audio device,
|
||||||
|
// this means we've starved and should restart.
|
||||||
|
ringBuffer->setIsStarved(true);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// We are either already playing back, or we have enough audio to start playing back.
|
||||||
|
if (ringBuffer->isStarved()) {
|
||||||
|
ringBuffer->setIsStarved(false);
|
||||||
|
ringBuffer->setHasStarted(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
// play whatever we have in the audio buffer
|
||||||
|
for (int s = 0; s < PACKET_LENGTH_SAMPLES_PER_CHANNEL; s++) {
|
||||||
|
int16_t leftSample = ringBuffer->getNextOutput()[s];
|
||||||
|
int16_t rightSample = ringBuffer->getNextOutput()[s + PACKET_LENGTH_SAMPLES_PER_CHANNEL];
|
||||||
|
|
||||||
|
stereoOutputBuffer[(s * 4)] += leftSample;
|
||||||
|
stereoOutputBuffer[(s * 4) + 2] += leftSample;
|
||||||
|
|
||||||
|
stereoOutputBuffer[(s * 4) + 1] += rightSample;
|
||||||
|
stereoOutputBuffer[(s * 4) + 3] += rightSample;
|
||||||
|
}
|
||||||
|
|
||||||
|
ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES);
|
||||||
|
|
||||||
|
if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
|
||||||
|
ringBuffer->setNextOutput(ringBuffer->getBuffer());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy the audio data to the output device
|
||||||
|
_outputDevice->write((char*) stereoOutputBuffer, sizeof(stereoOutputBuffer));
|
||||||
|
_outputDevice->write((char*) stereoOutputBuffer, sizeof(stereoOutputBuffer));
|
||||||
|
|
||||||
|
// add output (@speakers) data just written to the scope
|
||||||
|
// _scope->addSamples(1, outputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
// _scope->addSamples(2, outputRight, BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
|
gettimeofday(&_lastCallbackTime, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes) {
|
void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes) {
|
||||||
|
@ -463,14 +324,6 @@ void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_ringBuffer.isStarved()) {
|
|
||||||
_packetsReceivedThisPlayback++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_packetsReceivedThisPlayback == 1) {
|
|
||||||
gettimeofday(&_firstPacketReceivedTime, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_ringBuffer.diffLastWriteNextOutput() + PACKET_LENGTH_SAMPLES >
|
if (_ringBuffer.diffLastWriteNextOutput() + PACKET_LENGTH_SAMPLES >
|
||||||
PACKET_LENGTH_SAMPLES + (ceilf((float) (_jitterBufferSamples * 2) / PACKET_LENGTH_SAMPLES) * PACKET_LENGTH_SAMPLES)) {
|
PACKET_LENGTH_SAMPLES + (ceilf((float) (_jitterBufferSamples * 2) / PACKET_LENGTH_SAMPLES) * PACKET_LENGTH_SAMPLES)) {
|
||||||
// this packet would give us more than the required amount for play out
|
// this packet would give us more than the required amount for play out
|
||||||
|
@ -483,8 +336,6 @@ void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//printf("Got audio packet %d\n", _packetsReceivedThisPlayback);
|
|
||||||
|
|
||||||
_ringBuffer.parseData((unsigned char*) receivedData, receivedBytes);
|
_ringBuffer.parseData((unsigned char*) receivedData, receivedBytes);
|
||||||
|
|
||||||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO)
|
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO)
|
||||||
|
@ -502,7 +353,7 @@ bool Audio::mousePressEvent(int x, int y) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::render(int screenWidth, int screenHeight) {
|
void Audio::render(int screenWidth, int screenHeight) {
|
||||||
if (_stream) {
|
if (false) {
|
||||||
glLineWidth(2.0);
|
glLineWidth(2.0);
|
||||||
glBegin(GL_LINES);
|
glBegin(GL_LINES);
|
||||||
glColor3f(1,1,1);
|
glColor3f(1,1,1);
|
||||||
|
@ -598,40 +449,8 @@ void Audio::render(int screenWidth, int screenHeight) {
|
||||||
renderToolIcon(screenHeight);
|
renderToolIcon(screenHeight);
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// Very Simple LowPass filter which works by averaging a bunch of samples with a moving window
|
|
||||||
//
|
|
||||||
//#define lowpass 1
|
|
||||||
void Audio::lowPassFilter(int16_t* inputBuffer) {
|
|
||||||
static int16_t outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL];
|
|
||||||
for (int i = 2; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2; i++) {
|
|
||||||
#ifdef lowpass
|
|
||||||
outputBuffer[i] = (int16_t)(0.125f * (float)inputBuffer[i - 2] +
|
|
||||||
0.25f * (float)inputBuffer[i - 1] +
|
|
||||||
0.25f * (float)inputBuffer[i] +
|
|
||||||
0.25f * (float)inputBuffer[i + 1] +
|
|
||||||
0.125f * (float)inputBuffer[i + 2] );
|
|
||||||
#else
|
|
||||||
outputBuffer[i] = (int16_t)(0.125f * -(float)inputBuffer[i - 2] +
|
|
||||||
0.25f * -(float)inputBuffer[i - 1] +
|
|
||||||
1.75f * (float)inputBuffer[i] +
|
|
||||||
0.25f * -(float)inputBuffer[i + 1] +
|
|
||||||
0.125f * -(float)inputBuffer[i + 2] );
|
|
||||||
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
outputBuffer[0] = inputBuffer[0];
|
|
||||||
outputBuffer[1] = inputBuffer[1];
|
|
||||||
outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2] = inputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 2];
|
|
||||||
outputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 1] = inputBuffer[BUFFER_LENGTH_SAMPLES_PER_CHANNEL - 1];
|
|
||||||
memcpy(inputBuffer, outputBuffer, BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take a pointer to the acquired microphone input samples and add procedural sounds
|
// Take a pointer to the acquired microphone input samples and add procedural sounds
|
||||||
void Audio::addProceduralSounds(int16_t* inputBuffer,
|
void Audio::addProceduralSounds(int16_t* inputBuffer, int16_t* stereoOutput, int numSamples) {
|
||||||
int16_t* outputLeft,
|
|
||||||
int16_t* outputRight,
|
|
||||||
int numSamples) {
|
|
||||||
const float MAX_AUDIBLE_VELOCITY = 6.0;
|
const float MAX_AUDIBLE_VELOCITY = 6.0;
|
||||||
const float MIN_AUDIBLE_VELOCITY = 0.1;
|
const float MIN_AUDIBLE_VELOCITY = 0.1;
|
||||||
const int VOLUME_BASELINE = 400;
|
const int VOLUME_BASELINE = 400;
|
||||||
|
@ -642,13 +461,12 @@ void Audio::addProceduralSounds(int16_t* inputBuffer,
|
||||||
|
|
||||||
float sample;
|
float sample;
|
||||||
|
|
||||||
//
|
|
||||||
// Travelling noise
|
// Travelling noise
|
||||||
//
|
|
||||||
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
||||||
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
||||||
for (int i = 0; i < numSamples; i++) {
|
for (int i = 0; i < numSamples; i++) {
|
||||||
inputBuffer[i] += (int16_t)(sinf((float) (_proceduralEffectSample + i) / SOUND_PITCH ) * volume * (1.f + randFloat() * 0.25f) * speed);
|
inputBuffer[i] += (int16_t)(sinf((float) (_proceduralEffectSample + i) / SOUND_PITCH )
|
||||||
|
* volume * (1.f + randFloat() * 0.25f) * speed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const float COLLISION_SOUND_CUTOFF_LEVEL = 0.01f;
|
const float COLLISION_SOUND_CUTOFF_LEVEL = 0.01f;
|
||||||
|
@ -665,20 +483,22 @@ void Audio::addProceduralSounds(int16_t* inputBuffer,
|
||||||
sinf(t * _collisionSoundFrequency / DOWN_TWO_OCTAVES) +
|
sinf(t * _collisionSoundFrequency / DOWN_TWO_OCTAVES) +
|
||||||
sinf(t * _collisionSoundFrequency / DOWN_FOUR_OCTAVES * UP_MAJOR_FIFTH);
|
sinf(t * _collisionSoundFrequency / DOWN_FOUR_OCTAVES * UP_MAJOR_FIFTH);
|
||||||
sample *= _collisionSoundMagnitude * COLLISION_SOUND_MAX_VOLUME;
|
sample *= _collisionSoundMagnitude * COLLISION_SOUND_MAX_VOLUME;
|
||||||
|
|
||||||
|
|
||||||
inputBuffer[i] += (int) sample;
|
int16_t collisionSample = (int16_t) sample;
|
||||||
outputLeft[i] += (int) sample;
|
|
||||||
outputRight[i] += (int) sample;
|
inputBuffer[i] += collisionSample;
|
||||||
|
|
||||||
|
for (int j = (i * 4); j < (i * 4) + 4; j++) {
|
||||||
|
stereoOutput[j] = collisionSample;
|
||||||
|
}
|
||||||
|
|
||||||
_collisionSoundMagnitude *= _collisionSoundDuration;
|
_collisionSoundMagnitude *= _collisionSoundDuration;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_proceduralEffectSample += numSamples;
|
_proceduralEffectSample += numSamples;
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
// Starts a collision sound. magnitude is 0-1, with 1 the loudest possible sound.
|
||||||
// Starts a collision sound. magnitude is 0-1, with 1 the loudest possible sound.
|
|
||||||
//
|
|
||||||
void Audio::startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen) {
|
void Audio::startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen) {
|
||||||
_collisionSoundMagnitude = magnitude;
|
_collisionSoundMagnitude = magnitude;
|
||||||
_collisionSoundFrequency = frequency;
|
_collisionSoundFrequency = frequency;
|
||||||
|
@ -686,163 +506,6 @@ void Audio::startCollisionSound(float magnitude, float frequency, float noise, f
|
||||||
_collisionSoundDuration = duration;
|
_collisionSoundDuration = duration;
|
||||||
_collisionFlashesScreen = flashScreen;
|
_collisionFlashesScreen = flashScreen;
|
||||||
}
|
}
|
||||||
// -----------------------------------------------------------
|
|
||||||
// Accoustic ping (audio system round trip time determination)
|
|
||||||
// -----------------------------------------------------------
|
|
||||||
|
|
||||||
void Audio::ping() {
|
|
||||||
|
|
||||||
_pingFramesToRecord = PING_FRAMES_TO_RECORD;
|
|
||||||
_isSendingEchoPing = true;
|
|
||||||
_scope->setDownsampleRatio(8);
|
|
||||||
_scope->inputPaused = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void Audio::eventuallySendRecvPing(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight) {
|
|
||||||
|
|
||||||
if (_isSendingEchoPing) {
|
|
||||||
|
|
||||||
// Overwrite output with ping signal.
|
|
||||||
//
|
|
||||||
// Using a signed variant of sinc because it's speaker-reproducible
|
|
||||||
// with a unique, characteristic point in time (its center), aligned
|
|
||||||
// to the right of the output buffer.
|
|
||||||
//
|
|
||||||
// |
|
|
||||||
// | |
|
|
||||||
// ...--- t --------+-+-+-+-+------->
|
|
||||||
// | | :
|
|
||||||
// | :
|
|
||||||
// buffer :<- start of next buffer
|
|
||||||
// : : :
|
|
||||||
// :---: sine period
|
|
||||||
// :-: half sine period
|
|
||||||
//
|
|
||||||
memset(outputLeft, 0, PING_BUFFER_OFFSET * sizeof(int16_t));
|
|
||||||
outputLeft += PING_BUFFER_OFFSET;
|
|
||||||
memset(outputRight, 0, PING_BUFFER_OFFSET * sizeof(int16_t));
|
|
||||||
outputRight += PING_BUFFER_OFFSET;
|
|
||||||
for (int s = -PING_PERIOD; s < PING_PERIOD; ++s) {
|
|
||||||
float t = float(s) / PING_PITCH;
|
|
||||||
*outputLeft++ = *outputRight++ = int16_t(PING_VOLUME *
|
|
||||||
sinf(t) / fmaxf(1.0f, pow((abs(t)-1.5f) / 1.5f, 1.2f)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// As of the next frame, we'll be recoding PING_FRAMES_TO_RECORD from
|
|
||||||
// the mic (pointless to start now as we can't record unsent audio).
|
|
||||||
_isSendingEchoPing = false;
|
|
||||||
qDebug("Send audio ping\n");
|
|
||||||
|
|
||||||
} else if (_pingFramesToRecord > 0) {
|
|
||||||
|
|
||||||
// Store input samples
|
|
||||||
int offset = BUFFER_LENGTH_SAMPLES_PER_CHANNEL * (
|
|
||||||
PING_FRAMES_TO_RECORD - _pingFramesToRecord);
|
|
||||||
memcpy(_echoSamplesLeft + offset,
|
|
||||||
inputLeft, BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
|
||||||
|
|
||||||
--_pingFramesToRecord;
|
|
||||||
|
|
||||||
if (_pingFramesToRecord == 0) {
|
|
||||||
_pingAnalysisPending = true;
|
|
||||||
qDebug("Received ping echo\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int findExtremum(int16_t const* samples, int length, int sign) {
|
|
||||||
|
|
||||||
int x0 = -1;
|
|
||||||
int y0 = -PING_VOLUME;
|
|
||||||
for (int x = 0; x < length; ++samples, ++x) {
|
|
||||||
int y = *samples * sign;
|
|
||||||
if (y > y0) {
|
|
||||||
x0 = x;
|
|
||||||
y0 = y;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return x0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void Audio::analyzePing() {
|
|
||||||
|
|
||||||
// Determine extrema
|
|
||||||
int botAt = findExtremum(_echoSamplesLeft, PING_SAMPLES_TO_ANALYZE, -1);
|
|
||||||
if (botAt == -1) {
|
|
||||||
qDebug("Audio Ping: Minimum not found.\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
int topAt = findExtremum(_echoSamplesLeft, PING_SAMPLES_TO_ANALYZE, 1);
|
|
||||||
if (topAt == -1) {
|
|
||||||
qDebug("Audio Ping: Maximum not found.\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine peak amplitude - warn if low
|
|
||||||
int ampli = (_echoSamplesLeft[topAt] - _echoSamplesLeft[botAt]) / 2;
|
|
||||||
if (ampli < PING_MIN_AMPLI) {
|
|
||||||
qDebug("Audio Ping unreliable - low amplitude %d.\n", ampli);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine period - warn if doesn't look like our signal
|
|
||||||
int halfPeriod = abs(topAt - botAt);
|
|
||||||
if (abs(halfPeriod-PING_HALF_PERIOD) > PING_MAX_PERIOD_DIFFERENCE) {
|
|
||||||
qDebug("Audio Ping unreliable - peak distance %d vs. %d\n", halfPeriod, PING_HALF_PERIOD);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ping is sent:
|
|
||||||
//
|
|
||||||
// ---[ record ]--[ play ]--- audio in space/time --->
|
|
||||||
// : : :
|
|
||||||
// : : ping: ->X<-
|
|
||||||
// : : :
|
|
||||||
// : : |+| (buffer end - signal center = t1-t0)
|
|
||||||
// : |<----------+
|
|
||||||
// : : : :
|
|
||||||
// : ->X<- (corresponding input buffer position t0)
|
|
||||||
// : : : :
|
|
||||||
// : : : :
|
|
||||||
// : : : :
|
|
||||||
// Next frame (we're recording from now on):
|
|
||||||
// : : :
|
|
||||||
// : - - --[ record ]--[ play ]------------------>
|
|
||||||
// : : : :
|
|
||||||
// : : |<-- (start of recording t1)
|
|
||||||
// : : :
|
|
||||||
// : : :
|
|
||||||
// At some frame, the signal is picked up:
|
|
||||||
// : : : :
|
|
||||||
// : : : :
|
|
||||||
// : : : V
|
|
||||||
// : : : - - --[ record ]--[ play ]---------->
|
|
||||||
// : V : :
|
|
||||||
// : |<--------->|
|
|
||||||
// |+|<------->| period + measured samples
|
|
||||||
//
|
|
||||||
// If we could pick up the signal at t0 we'd have zero round trip
|
|
||||||
// time - in this case we had recorded the output buffer instantly
|
|
||||||
// in its entirety (we can't - but there's the proper reference
|
|
||||||
// point). We know the number of samples from t1 and, knowing that
|
|
||||||
// data is streaming continuously, we know that t1-t0 is the distance
|
|
||||||
// of the characterisic point from the end of the buffer.
|
|
||||||
|
|
||||||
int delay = (botAt + topAt) / 2 + PING_PERIOD;
|
|
||||||
|
|
||||||
qDebug("\n| Audio Ping results:\n+----- ---- --- - - - - -\n\n"
|
|
||||||
"Delay = %d samples (%d ms)\nPeak amplitude = %d\n\n",
|
|
||||||
delay, (delay * 1000) / int(SAMPLE_RATE), ampli);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Audio::eventuallyAnalyzePing() {
|
|
||||||
|
|
||||||
if (! _pingAnalysisPending) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
_scope->inputPaused = true;
|
|
||||||
analyzePing();
|
|
||||||
_pingAnalysisPending = false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Audio::renderToolIcon(int screenHeight) {
|
void Audio::renderToolIcon(int screenHeight) {
|
||||||
|
|
||||||
|
@ -888,5 +551,3 @@ void Audio::renderToolIcon(int screenHeight) {
|
||||||
|
|
||||||
glDisable(GL_TEXTURE_2D);
|
glDisable(GL_TEXTURE_2D);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -14,9 +14,7 @@
|
||||||
|
|
||||||
#include "InterfaceConfig.h"
|
#include "InterfaceConfig.h"
|
||||||
|
|
||||||
#include <QObject>
|
#include <QtCore/QObject>
|
||||||
|
|
||||||
#include <portaudio.h>
|
|
||||||
|
|
||||||
#include <AudioRingBuffer.h>
|
#include <AudioRingBuffer.h>
|
||||||
#include <StdDev.h>
|
#include <StdDev.h>
|
||||||
|
@ -32,13 +30,15 @@ static const int PACKET_LENGTH_BYTES_PER_CHANNEL = PACKET_LENGTH_BYTES / 2;
|
||||||
static const int PACKET_LENGTH_SAMPLES = PACKET_LENGTH_BYTES / sizeof(int16_t);
|
static const int PACKET_LENGTH_SAMPLES = PACKET_LENGTH_BYTES / sizeof(int16_t);
|
||||||
static const int PACKET_LENGTH_SAMPLES_PER_CHANNEL = PACKET_LENGTH_SAMPLES / 2;
|
static const int PACKET_LENGTH_SAMPLES_PER_CHANNEL = PACKET_LENGTH_SAMPLES / 2;
|
||||||
|
|
||||||
|
class QAudioInput;
|
||||||
|
class QAudioOutput;
|
||||||
|
class QIODevice;
|
||||||
|
|
||||||
class Audio : public QObject {
|
class Audio : public QObject {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
// initializes audio I/O
|
// setup for audio I/O
|
||||||
Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples);
|
Audio(int16_t initialJitterBufferSamples, QObject* parent = 0);
|
||||||
|
|
||||||
void shutdown();
|
|
||||||
|
|
||||||
void reset();
|
void reset();
|
||||||
void render(int screenWidth, int screenHeight);
|
void render(int screenWidth, int screenHeight);
|
||||||
|
@ -61,19 +61,18 @@ public:
|
||||||
|
|
||||||
bool getCollisionFlashesScreen() { return _collisionFlashesScreen; }
|
bool getCollisionFlashesScreen() { return _collisionFlashesScreen; }
|
||||||
|
|
||||||
void ping();
|
|
||||||
|
|
||||||
void init(QGLWidget *parent = 0);
|
void init(QGLWidget *parent = 0);
|
||||||
bool mousePressEvent(int x, int y);
|
bool mousePressEvent(int x, int y);
|
||||||
|
|
||||||
// Call periodically to eventually perform round trip time analysis,
|
public slots:
|
||||||
// in which case 'true' is returned - otherwise the return value is 'false'.
|
void start();
|
||||||
// The results of the analysis are written to the log.
|
void handleAudioInput();
|
||||||
bool eventuallyAnalyzePing();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
QAudioInput* _audioInput;
|
||||||
PaStream* _stream;
|
QIODevice* _inputDevice;
|
||||||
|
QAudioOutput* _audioOutput;
|
||||||
|
QIODevice* _outputDevice;
|
||||||
AudioRingBuffer _ringBuffer;
|
AudioRingBuffer _ringBuffer;
|
||||||
Oscilloscope* _scope;
|
Oscilloscope* _scope;
|
||||||
StDev _stdev;
|
StDev _stdev;
|
||||||
|
@ -88,19 +87,6 @@ private:
|
||||||
glm::vec3 _lastVelocity;
|
glm::vec3 _lastVelocity;
|
||||||
glm::vec3 _lastAcceleration;
|
glm::vec3 _lastAcceleration;
|
||||||
int _totalPacketsReceived;
|
int _totalPacketsReceived;
|
||||||
timeval _firstPacketReceivedTime;
|
|
||||||
int _packetsReceivedThisPlayback;
|
|
||||||
// Ping analysis
|
|
||||||
int16_t* _echoSamplesLeft;
|
|
||||||
volatile bool _isSendingEchoPing;
|
|
||||||
volatile bool _pingAnalysisPending;
|
|
||||||
int _pingFramesToRecord;
|
|
||||||
// Flange effect
|
|
||||||
int _samplesLeftForFlange;
|
|
||||||
int _lastYawMeasuredMaximum;
|
|
||||||
float _flangeIntensity;
|
|
||||||
float _flangeRate;
|
|
||||||
float _flangeWeight;
|
|
||||||
float _collisionSoundMagnitude;
|
float _collisionSoundMagnitude;
|
||||||
float _collisionSoundFrequency;
|
float _collisionSoundFrequency;
|
||||||
float _collisionSoundNoise;
|
float _collisionSoundNoise;
|
||||||
|
@ -118,24 +104,8 @@ private:
|
||||||
// Audio callback in class context.
|
// Audio callback in class context.
|
||||||
inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
|
inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
|
||||||
|
|
||||||
// When requested, sends/receives a signal for round trip time determination.
|
|
||||||
// Called from 'performIO'.
|
|
||||||
inline void eventuallySendRecvPing(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
|
|
||||||
|
|
||||||
// Determines round trip time of the audio system. Called from 'eventuallyAnalyzePing'.
|
|
||||||
inline void analyzePing();
|
|
||||||
|
|
||||||
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
||||||
void addProceduralSounds(int16_t* inputBuffer, int16_t* outputLeft, int16_t* outputRight, int numSamples);
|
void addProceduralSounds(int16_t* inputBuffer, int16_t* stereoOutput, int numSamples);
|
||||||
|
|
||||||
|
|
||||||
// Audio callback called by portaudio. Calls 'performIO'.
|
|
||||||
static int audioCallback(const void *inputBuffer,
|
|
||||||
void *outputBuffer,
|
|
||||||
unsigned long framesPerBuffer,
|
|
||||||
const PaStreamCallbackTimeInfo *timeInfo,
|
|
||||||
PaStreamCallbackFlags statusFlags,
|
|
||||||
void *userData);
|
|
||||||
|
|
||||||
void renderToolIcon(int screenHeight);
|
void renderToolIcon(int screenHeight);
|
||||||
};
|
};
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
#include "NodeData.h"
|
#include "NodeData.h"
|
||||||
|
|
||||||
const float SAMPLE_RATE = 22050.0;
|
const int SAMPLE_RATE = 22050;
|
||||||
|
|
||||||
const int BUFFER_LENGTH_BYTES_STEREO = 1024;
|
const int BUFFER_LENGTH_BYTES_STEREO = 1024;
|
||||||
const int BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
|
const int BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
|
||||||
|
|
Loading…
Reference in a new issue