mirror of
https://github.com/JulianGro/overte.git
synced 2025-08-13 10:47:12 +02:00
removed clang formatting issues :"
This commit is contained in:
parent
aa6cf466d9
commit
92d1f7bdcb
2 changed files with 204 additions and 147 deletions
|
@ -82,7 +82,8 @@ static const int STARVE_DETECTION_PERIOD = 10 * 1000; // 10 Seconds
|
||||||
|
|
||||||
Setting::Handle<bool> dynamicJitterBufferEnabled("dynamicJitterBuffersEnabled",
|
Setting::Handle<bool> dynamicJitterBufferEnabled("dynamicJitterBuffersEnabled",
|
||||||
InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED);
|
InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED);
|
||||||
Setting::Handle<int> staticJitterBufferFrames("staticJitterBufferFrames", InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES);
|
Setting::Handle<int> staticJitterBufferFrames("staticJitterBufferFrames",
|
||||||
|
InboundAudioStream::DEFAULT_STATIC_JITTER_FRAMES);
|
||||||
|
|
||||||
// protect the Qt internal device list
|
// protect the Qt internal device list
|
||||||
using Mutex = std::mutex;
|
using Mutex = std::mutex;
|
||||||
|
@ -156,6 +157,7 @@ QList<HifiAudioDeviceInfo> AudioClient::getAudioDevices(QAudio::Mode mode) const
|
||||||
|
|
||||||
static void channelUpmix(int16_t* source, int16_t* dest, int numSamples, int numExtraChannels) {
|
static void channelUpmix(int16_t* source, int16_t* dest, int numSamples, int numExtraChannels) {
|
||||||
for (int i = 0; i < numSamples/2; i++) {
|
for (int i = 0; i < numSamples/2; i++) {
|
||||||
|
|
||||||
// read 2 samples
|
// read 2 samples
|
||||||
int16_t left = *source++;
|
int16_t left = *source++;
|
||||||
int16_t right = *source++;
|
int16_t right = *source++;
|
||||||
|
@ -171,6 +173,7 @@ static void channelUpmix(int16_t* source, int16_t* dest, int numSamples, int num
|
||||||
|
|
||||||
static void channelDownmix(int16_t* source, int16_t* dest, int numSamples) {
|
static void channelDownmix(int16_t* source, int16_t* dest, int numSamples) {
|
||||||
for (int i = 0; i < numSamples / 2; i++) {
|
for (int i = 0; i < numSamples / 2; i++) {
|
||||||
|
|
||||||
// read 2 samples
|
// read 2 samples
|
||||||
int16_t left = *source++;
|
int16_t left = *source++;
|
||||||
int16_t right = *source++;
|
int16_t right = *source++;
|
||||||
|
@ -181,6 +184,7 @@ static void channelDownmix(int16_t* source, int16_t* dest, int numSamples) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool detectClipping(int16_t* samples, int numSamples, int numChannels) {
|
static bool detectClipping(int16_t* samples, int numSamples, int numChannels) {
|
||||||
|
|
||||||
const int32_t CLIPPING_THRESHOLD = 32392; // -0.1 dBFS
|
const int32_t CLIPPING_THRESHOLD = 32392; // -0.1 dBFS
|
||||||
const int CLIPPING_DETECTION = 3; // consecutive samples over threshold
|
const int CLIPPING_DETECTION = 3; // consecutive samples over threshold
|
||||||
|
|
||||||
|
@ -223,6 +227,7 @@ static bool detectClipping(int16_t* samples, int numSamples, int numChannels) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static float computeLoudness(int16_t* samples, int numSamples) {
|
static float computeLoudness(int16_t* samples, int numSamples) {
|
||||||
|
|
||||||
float scale = numSamples ? 1.0f / numSamples : 0.0f;
|
float scale = numSamples ? 1.0f / numSamples : 0.0f;
|
||||||
|
|
||||||
int32_t loudness = 0;
|
int32_t loudness = 0;
|
||||||
|
@ -234,6 +239,7 @@ static float computeLoudness(int16_t* samples, int numSamples) {
|
||||||
|
|
||||||
template <int NUM_CHANNELS>
|
template <int NUM_CHANNELS>
|
||||||
static void applyGainSmoothing(float* buffer, int numFrames, float gain0, float gain1) {
|
static void applyGainSmoothing(float* buffer, int numFrames, float gain0, float gain1) {
|
||||||
|
|
||||||
// fast path for unity gain
|
// fast path for unity gain
|
||||||
if (gain0 == 1.0f && gain1 == 1.0f) {
|
if (gain0 == 1.0f && gain1 == 1.0f) {
|
||||||
return;
|
return;
|
||||||
|
@ -248,6 +254,7 @@ static void applyGainSmoothing(float* buffer, int numFrames, float gain0, float
|
||||||
float tStep = 1.0f / numFrames;
|
float tStep = 1.0f / numFrames;
|
||||||
|
|
||||||
for (int i = 0; i < numFrames; i++) {
|
for (int i = 0; i < numFrames; i++) {
|
||||||
|
|
||||||
// evaluate poly over t=[0,1)
|
// evaluate poly over t=[0,1)
|
||||||
float gain = (c3 * t + c2) * t * t + c0;
|
float gain = (c3 * t + c2) * t * t + c0;
|
||||||
t += tStep;
|
t += tStep;
|
||||||
|
@ -264,22 +271,52 @@ static inline float convertToFloat(int16_t sample) {
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioClient::AudioClient() :
|
AudioClient::AudioClient() :
|
||||||
AbstractAudioInterface(), _gate(this), _audioInput(NULL), _dummyAudioInput(NULL), _desiredInputFormat(), _inputFormat(),
|
AbstractAudioInterface(),
|
||||||
_numInputCallbackBytes(0), _audioOutput(NULL), _desiredOutputFormat(), _outputFormat(), _outputFrameSize(0),
|
_gate(this),
|
||||||
_numOutputCallbackBytes(0), _loopbackAudioOutput(NULL), _loopbackOutputDevice(NULL), _inputRingBuffer(0),
|
_audioInput(NULL),
|
||||||
_localInjectorsStream(0, 1), _receivedAudioStream(RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES), _isStereoInput(false),
|
_dummyAudioInput(NULL),
|
||||||
_outputStarveDetectionStartTimeMsec(0), _outputStarveDetectionCount(0),
|
_desiredInputFormat(),
|
||||||
|
_inputFormat(),
|
||||||
|
_numInputCallbackBytes(0),
|
||||||
|
_audioOutput(NULL),
|
||||||
|
_desiredOutputFormat(),
|
||||||
|
_outputFormat(),
|
||||||
|
_outputFrameSize(0),
|
||||||
|
_numOutputCallbackBytes(0),
|
||||||
|
_loopbackAudioOutput(NULL),
|
||||||
|
_loopbackOutputDevice(NULL),
|
||||||
|
_inputRingBuffer(0),
|
||||||
|
_localInjectorsStream(0, 1),
|
||||||
|
_receivedAudioStream(RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES),
|
||||||
|
_isStereoInput(false),
|
||||||
|
_outputStarveDetectionStartTimeMsec(0),
|
||||||
|
_outputStarveDetectionCount(0),
|
||||||
_outputBufferSizeFrames("audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES),
|
_outputBufferSizeFrames("audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES),
|
||||||
_sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
|
_sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
|
||||||
_outputStarveDetectionEnabled("audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED),
|
_outputStarveDetectionEnabled("audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED),
|
||||||
_lastRawInputLoudness(0.0f), _lastSmoothedRawInputLoudness(0.0f), _lastInputLoudness(0.0f), _timeSinceLastClip(-1.0f),
|
_lastRawInputLoudness(0.0f),
|
||||||
_muted(false), _shouldEchoLocally(false), _shouldEchoToServer(false), _isNoiseGateEnabled(true), _isAECEnabled(true),
|
_lastSmoothedRawInputLoudness(0.0f),
|
||||||
_reverb(false), _reverbOptions(&_scriptReverbOptions), _inputToNetworkResampler(NULL), _networkToOutputResampler(NULL),
|
_lastInputLoudness(0.0f),
|
||||||
_localToOutputResampler(NULL), _loopbackResampler(NULL), _audioLimiter(AudioConstants::SAMPLE_RATE, OUTPUT_CHANNEL_COUNT),
|
_timeSinceLastClip(-1.0f),
|
||||||
_outgoingAvatarAudioSequenceNumber(0), _audioOutputIODevice(_localInjectorsStream, _receivedAudioStream, this),
|
_muted(false),
|
||||||
_stats(&_receivedAudioStream), _positionGetter(DEFAULT_POSITION_GETTER),
|
_shouldEchoLocally(false),
|
||||||
|
_shouldEchoToServer(false),
|
||||||
|
_isNoiseGateEnabled(true),
|
||||||
|
_isAECEnabled(true),
|
||||||
|
_reverb(false),
|
||||||
|
_reverbOptions(&_scriptReverbOptions),
|
||||||
|
_inputToNetworkResampler(NULL),
|
||||||
|
_networkToOutputResampler(NULL),
|
||||||
|
_localToOutputResampler(NULL),
|
||||||
|
_loopbackResampler(NULL),
|
||||||
|
_audioLimiter(AudioConstants::SAMPLE_RATE, OUTPUT_CHANNEL_COUNT),
|
||||||
|
_outgoingAvatarAudioSequenceNumber(0),
|
||||||
|
_audioOutputIODevice(_localInjectorsStream, _receivedAudioStream, this),
|
||||||
|
_stats(&_receivedAudioStream),
|
||||||
|
_positionGetter(DEFAULT_POSITION_GETTER),
|
||||||
#if defined(Q_OS_ANDROID)
|
#if defined(Q_OS_ANDROID)
|
||||||
_checkInputTimer(this), _isHeadsetPluggedIn(false),
|
_checkInputTimer(this),
|
||||||
|
_isHeadsetPluggedIn(false),
|
||||||
#endif
|
#endif
|
||||||
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
|
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
|
||||||
|
|
||||||
|
@ -290,20 +327,16 @@ AudioClient::AudioClient() :
|
||||||
{
|
{
|
||||||
Setting::Handle<int>::Deprecated("maxFramesOverDesired", InboundAudioStream::MAX_FRAMES_OVER_DESIRED);
|
Setting::Handle<int>::Deprecated("maxFramesOverDesired", InboundAudioStream::MAX_FRAMES_OVER_DESIRED);
|
||||||
Setting::Handle<int>::Deprecated("windowStarveThreshold", InboundAudioStream::WINDOW_STARVE_THRESHOLD);
|
Setting::Handle<int>::Deprecated("windowStarveThreshold", InboundAudioStream::WINDOW_STARVE_THRESHOLD);
|
||||||
Setting::Handle<int>::Deprecated("windowSecondsForDesiredCalcOnTooManyStarves",
|
Setting::Handle<int>::Deprecated("windowSecondsForDesiredCalcOnTooManyStarves", InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES);
|
||||||
InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES);
|
Setting::Handle<int>::Deprecated("windowSecondsForDesiredReduction", InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION);
|
||||||
Setting::Handle<int>::Deprecated("windowSecondsForDesiredReduction",
|
|
||||||
InboundAudioStream::WINDOW_SECONDS_FOR_DESIRED_REDUCTION);
|
|
||||||
Setting::Handle<bool>::Deprecated("useStDevForJitterCalc", InboundAudioStream::USE_STDEV_FOR_JITTER);
|
Setting::Handle<bool>::Deprecated("useStDevForJitterCalc", InboundAudioStream::USE_STDEV_FOR_JITTER);
|
||||||
Setting::Handle<bool>::Deprecated("repetitionWithFade", InboundAudioStream::REPETITION_WITH_FADE);
|
Setting::Handle<bool>::Deprecated("repetitionWithFade", InboundAudioStream::REPETITION_WITH_FADE);
|
||||||
}
|
}
|
||||||
|
|
||||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &AudioClient::processReceivedSamples,
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples,
|
||||||
Qt::DirectConnection);
|
this, &AudioClient::processReceivedSamples, Qt::DirectConnection);
|
||||||
connect(this, &AudioClient::changeDevice, this, [=](const HifiAudioDeviceInfo& outputDeviceInfo) {
|
connect(this, &AudioClient::changeDevice, this, [=](const HifiAudioDeviceInfo& outputDeviceInfo) {
|
||||||
qCDebug(audioclient)
|
qCDebug(audioclient)<< "got AudioClient::changeDevice signal, about to call switchOutputToAudioDevice() outputDeviceInfo: ["<< outputDeviceInfo.deviceName() << "]";
|
||||||
<< "got AudioClient::changeDevice signal, about to call switchOutputToAudioDevice() outputDeviceInfo: ["
|
|
||||||
<< outputDeviceInfo.deviceName() << "]";
|
|
||||||
switchOutputToAudioDevice(outputDeviceInfo.getDevice());
|
switchOutputToAudioDevice(outputDeviceInfo.getDevice());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -345,8 +378,9 @@ AudioClient::AudioClient() :
|
||||||
|
|
||||||
// start a thread to detect peak value changes
|
// start a thread to detect peak value changes
|
||||||
_checkPeakValuesTimer = new QTimer(this);
|
_checkPeakValuesTimer = new QTimer(this);
|
||||||
connect(_checkPeakValuesTimer, &QTimer::timeout, this,
|
connect(_checkPeakValuesTimer, &QTimer::timeout, this, [this] {
|
||||||
[this] { QtConcurrent::run(QThreadPool::globalInstance(), [this] { checkPeakValues(); }); });
|
QtConcurrent::run(QThreadPool::globalInstance(), [this] { checkPeakValues(); });
|
||||||
|
});
|
||||||
const unsigned long PEAK_VALUES_CHECK_INTERVAL_MSECS = 50;
|
const unsigned long PEAK_VALUES_CHECK_INTERVAL_MSECS = 50;
|
||||||
_checkPeakValuesTimer->start(PEAK_VALUES_CHECK_INTERVAL_MSECS);
|
_checkPeakValuesTimer->start(PEAK_VALUES_CHECK_INTERVAL_MSECS);
|
||||||
|
|
||||||
|
@ -367,7 +401,9 @@ AudioClient::AudioClient() :
|
||||||
packetReceiver.registerListener(PacketType::SelectedAudioFormat, this, "handleSelectedAudioFormat");
|
packetReceiver.registerListener(PacketType::SelectedAudioFormat, this, "handleSelectedAudioFormat");
|
||||||
|
|
||||||
auto& domainHandler = nodeList->getDomainHandler();
|
auto& domainHandler = nodeList->getDomainHandler();
|
||||||
connect(&domainHandler, &DomainHandler::disconnectedFromDomain, this, [this] { _solo.reset(); });
|
connect(&domainHandler, &DomainHandler::disconnectedFromDomain, this, [this] {
|
||||||
|
_solo.reset();
|
||||||
|
});
|
||||||
connect(nodeList.data(), &NodeList::nodeActivated, this, [this](SharedNodePointer node) {
|
connect(nodeList.data(), &NodeList::nodeActivated, this, [this](SharedNodePointer node) {
|
||||||
if (node->getType() == NodeType::AudioMixer) {
|
if (node->getType() == NodeType::AudioMixer) {
|
||||||
_solo.resend();
|
_solo.resend();
|
||||||
|
@ -376,6 +412,7 @@ AudioClient::AudioClient() :
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioClient::~AudioClient() {
|
AudioClient::~AudioClient() {
|
||||||
|
|
||||||
stop();
|
stop();
|
||||||
|
|
||||||
if (_codec && _encoder) {
|
if (_codec && _encoder) {
|
||||||
|
@ -392,11 +429,11 @@ void AudioClient::customDeleter() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::handleMismatchAudioFormat(SharedNodePointer node, const QString& currentCodec, const QString& recievedCodec) {
|
void AudioClient::handleMismatchAudioFormat(SharedNodePointer node, const QString& currentCodec, const QString& recievedCodec) {
|
||||||
qCDebug(audioclient) << __FUNCTION__ << "sendingNode:" << *node << "currentCodec:" << currentCodec
|
qCDebug(audioclient) << __FUNCTION__ << "sendingNode:" << *node << "currentCodec:" << currentCodec << "recievedCodec:" << recievedCodec;
|
||||||
<< "recievedCodec:" << recievedCodec;
|
|
||||||
selectAudioFormat(recievedCodec);
|
selectAudioFormat(recievedCodec);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void AudioClient::reset() {
|
void AudioClient::reset() {
|
||||||
_receivedAudioStream.reset();
|
_receivedAudioStream.reset();
|
||||||
_stats.reset();
|
_stats.reset();
|
||||||
|
@ -474,8 +511,7 @@ QString AudioClient::getWinDeviceName(wchar_t* guid) {
|
||||||
HRESULT hr = S_OK;
|
HRESULT hr = S_OK;
|
||||||
CoInitialize(nullptr);
|
CoInitialize(nullptr);
|
||||||
IMMDeviceEnumerator* pMMDeviceEnumerator = nullptr;
|
IMMDeviceEnumerator* pMMDeviceEnumerator = nullptr;
|
||||||
CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator),
|
CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&pMMDeviceEnumerator);
|
||||||
(void**)&pMMDeviceEnumerator);
|
|
||||||
IMMDevice* pEndpoint;
|
IMMDevice* pEndpoint;
|
||||||
hr = pMMDeviceEnumerator->GetDevice(guid, &pEndpoint);
|
hr = pMMDeviceEnumerator->GetDevice(guid, &pEndpoint);
|
||||||
if (hr == E_NOTFOUND) {
|
if (hr == E_NOTFOUND) {
|
||||||
|
@ -499,22 +535,30 @@ QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
||||||
if (getAvailableDevices(mode).size() > 1) {
|
if (getAvailableDevices(mode).size() > 1) {
|
||||||
AudioDeviceID defaultDeviceID = 0;
|
AudioDeviceID defaultDeviceID = 0;
|
||||||
uint32_t propertySize = sizeof(AudioDeviceID);
|
uint32_t propertySize = sizeof(AudioDeviceID);
|
||||||
AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDefaultInputDevice,
|
AudioObjectPropertyAddress propertyAddress = {
|
||||||
kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
|
kAudioHardwarePropertyDefaultInputDevice,
|
||||||
|
kAudioObjectPropertyScopeGlobal,
|
||||||
|
kAudioObjectPropertyElementMaster
|
||||||
|
};
|
||||||
|
|
||||||
if (mode == QAudio::AudioOutput) {
|
if (mode == QAudio::AudioOutput) {
|
||||||
propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
|
propertyAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
|
||||||
}
|
}
|
||||||
|
|
||||||
OSStatus getPropertyError =
|
|
||||||
AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propertySize, &defaultDeviceID);
|
OSStatus getPropertyError = AudioObjectGetPropertyData(kAudioObjectSystemObject,
|
||||||
|
&propertyAddress,
|
||||||
|
0,
|
||||||
|
NULL,
|
||||||
|
&propertySize,
|
||||||
|
&defaultDeviceID);
|
||||||
|
|
||||||
if (!getPropertyError && propertySize) {
|
if (!getPropertyError && propertySize) {
|
||||||
CFStringRef deviceName = NULL;
|
CFStringRef deviceName = NULL;
|
||||||
propertySize = sizeof(deviceName);
|
propertySize = sizeof(deviceName);
|
||||||
propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
|
propertyAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
|
||||||
getPropertyError =
|
getPropertyError = AudioObjectGetPropertyData(defaultDeviceID, &propertyAddress, 0,
|
||||||
AudioObjectGetPropertyData(defaultDeviceID, &propertyAddress, 0, NULL, &propertySize, &deviceName);
|
NULL, &propertySize, &deviceName);
|
||||||
|
|
||||||
if (!getPropertyError && propertySize) {
|
if (!getPropertyError && propertySize) {
|
||||||
// find a device in the list that matches the name we have and return it
|
// find a device in the list that matches the name we have and return it
|
||||||
|
@ -552,11 +596,9 @@ QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
||||||
HRESULT hr = S_OK;
|
HRESULT hr = S_OK;
|
||||||
CoInitialize(NULL);
|
CoInitialize(NULL);
|
||||||
IMMDeviceEnumerator* pMMDeviceEnumerator = NULL;
|
IMMDeviceEnumerator* pMMDeviceEnumerator = NULL;
|
||||||
CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator),
|
CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&pMMDeviceEnumerator);
|
||||||
(void**)&pMMDeviceEnumerator);
|
|
||||||
IMMDevice* pEndpoint;
|
IMMDevice* pEndpoint;
|
||||||
hr = pMMDeviceEnumerator->GetDefaultAudioEndpoint(mode == QAudio::AudioOutput ? eRender : eCapture, eMultimedia,
|
hr = pMMDeviceEnumerator->GetDefaultAudioEndpoint(mode == QAudio::AudioOutput ? eRender : eCapture, eMultimedia, &pEndpoint);
|
||||||
&pEndpoint);
|
|
||||||
if (hr == E_NOTFOUND) {
|
if (hr == E_NOTFOUND) {
|
||||||
printf("Audio Error: device not found\n");
|
printf("Audio Error: device not found\n");
|
||||||
deviceName = QString("NONE");
|
deviceName = QString("NONE");
|
||||||
|
@ -570,8 +612,8 @@ QAudioDeviceInfo defaultAudioDeviceForMode(QAudio::Mode mode) {
|
||||||
CoUninitialize();
|
CoUninitialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
qCDebug(audioclient) << "defaultAudioDeviceForMode mode: " << (mode == QAudio::AudioOutput ? "Output" : "Input") << " ["
|
qCDebug(audioclient) << "defaultAudioDeviceForMode mode: " << (mode == QAudio::AudioOutput ? "Output" : "Input")
|
||||||
<< deviceName << "] [" << getNamedAudioDeviceForMode(mode, deviceName).deviceName() << "]";
|
<< " [" << deviceName << "] [" << getNamedAudioDeviceForMode(mode, deviceName).deviceName() << "]";
|
||||||
|
|
||||||
return getNamedAudioDeviceForMode(mode, deviceName);
|
return getNamedAudioDeviceForMode(mode, deviceName);
|
||||||
#endif
|
#endif
|
||||||
|
@ -599,8 +641,10 @@ bool AudioClient::getNamedAudioDeviceForModeExists(QAudio::Mode mode, const QStr
|
||||||
return (getNamedAudioDeviceForMode(mode, deviceName).deviceName() == deviceName);
|
return (getNamedAudioDeviceForMode(mode, deviceName).deviceName() == deviceName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// attempt to use the native sample rate and channel count
|
// attempt to use the native sample rate and channel count
|
||||||
bool nativeFormatForAudioDevice(const QAudioDeviceInfo& audioDevice, QAudioFormat& audioFormat) {
|
bool nativeFormatForAudioDevice(const QAudioDeviceInfo& audioDevice, QAudioFormat& audioFormat) {
|
||||||
|
|
||||||
audioFormat = audioDevice.preferredFormat();
|
audioFormat = audioDevice.preferredFormat();
|
||||||
|
|
||||||
// converting to/from this rate must produce an integral number of samples
|
// converting to/from this rate must produce an integral number of samples
|
||||||
|
@ -621,6 +665,7 @@ bool nativeFormatForAudioDevice(const QAudioDeviceInfo& audioDevice, QAudioForma
|
||||||
// attempt the native sample rate, with channels forced to 2
|
// attempt the native sample rate, with channels forced to 2
|
||||||
audioFormat.setChannelCount(2);
|
audioFormat.setChannelCount(2);
|
||||||
if (!audioDevice.isFormatSupported(audioFormat)) {
|
if (!audioDevice.isFormatSupported(audioFormat)) {
|
||||||
|
|
||||||
// attempt the native sample rate, with channels forced to 1
|
// attempt the native sample rate, with channels forced to 1
|
||||||
audioFormat.setChannelCount(1);
|
audioFormat.setChannelCount(1);
|
||||||
if (!audioDevice.isFormatSupported(audioFormat)) {
|
if (!audioDevice.isFormatSupported(audioFormat)) {
|
||||||
|
@ -634,6 +679,7 @@ bool nativeFormatForAudioDevice(const QAudioDeviceInfo& audioDevice, QAudioForma
|
||||||
bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
|
bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
|
||||||
const QAudioFormat& desiredAudioFormat,
|
const QAudioFormat& desiredAudioFormat,
|
||||||
QAudioFormat& adjustedAudioFormat) {
|
QAudioFormat& adjustedAudioFormat) {
|
||||||
|
|
||||||
qCDebug(audioclient) << "The desired format for audio I/O is" << desiredAudioFormat;
|
qCDebug(audioclient) << "The desired format for audio I/O is" << desiredAudioFormat;
|
||||||
|
|
||||||
#if defined(Q_OS_WIN)
|
#if defined(Q_OS_WIN)
|
||||||
|
@ -666,6 +712,7 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
|
||||||
|
|
||||||
for (int channelCount : (desiredAudioFormat.channelCount() == 1 ? inputChannels : outputChannels)) {
|
for (int channelCount : (desiredAudioFormat.channelCount() == 1 ? inputChannels : outputChannels)) {
|
||||||
for (int sampleRate : sampleRates) {
|
for (int sampleRate : sampleRates) {
|
||||||
|
|
||||||
adjustedAudioFormat.setChannelCount(channelCount);
|
adjustedAudioFormat.setChannelCount(channelCount);
|
||||||
adjustedAudioFormat.setSampleRate(sampleRate);
|
adjustedAudioFormat.setSampleRate(sampleRate);
|
||||||
|
|
||||||
|
@ -678,11 +725,8 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
|
||||||
return false; // a supported format could not be found
|
return false; // a supported format could not be found
|
||||||
}
|
}
|
||||||
|
|
||||||
bool sampleChannelConversion(const int16_t* sourceSamples,
|
bool sampleChannelConversion(const int16_t* sourceSamples, int16_t* destinationSamples, int numSourceSamples,
|
||||||
int16_t* destinationSamples,
|
const int sourceChannelCount, const int destinationChannelCount) {
|
||||||
int numSourceSamples,
|
|
||||||
const int sourceChannelCount,
|
|
||||||
const int destinationChannelCount) {
|
|
||||||
if (sourceChannelCount == 2 && destinationChannelCount == 1) {
|
if (sourceChannelCount == 2 && destinationChannelCount == 1) {
|
||||||
// loop through the stereo input audio samples and average every two samples
|
// loop through the stereo input audio samples and average every two samples
|
||||||
for (int i = 0; i < numSourceSamples; i += 2) {
|
for (int i = 0; i < numSourceSamples; i += 2) {
|
||||||
|
@ -691,6 +735,7 @@ bool sampleChannelConversion(const int16_t* sourceSamples,
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
} else if (sourceChannelCount == 1 && destinationChannelCount == 2) {
|
} else if (sourceChannelCount == 1 && destinationChannelCount == 2) {
|
||||||
|
|
||||||
// loop through the mono input audio and repeat each sample twice
|
// loop through the mono input audio and repeat each sample twice
|
||||||
for (int i = 0; i < numSourceSamples; ++i) {
|
for (int i = 0; i < numSourceSamples; ++i) {
|
||||||
destinationSamples[i * 2] = destinationSamples[(i * 2) + 1] = sourceSamples[i];
|
destinationSamples[i * 2] = destinationSamples[(i * 2) + 1] = sourceSamples[i];
|
||||||
|
@ -703,29 +748,28 @@ bool sampleChannelConversion(const int16_t* sourceSamples,
|
||||||
}
|
}
|
||||||
|
|
||||||
int possibleResampling(AudioSRC* resampler,
|
int possibleResampling(AudioSRC* resampler,
|
||||||
const int16_t* sourceSamples,
|
const int16_t* sourceSamples, int16_t* destinationSamples,
|
||||||
int16_t* destinationSamples,
|
int numSourceSamples, int maxDestinationSamples,
|
||||||
int numSourceSamples,
|
const int sourceChannelCount, const int destinationChannelCount) {
|
||||||
int maxDestinationSamples,
|
|
||||||
const int sourceChannelCount,
|
|
||||||
const int destinationChannelCount) {
|
|
||||||
int numSourceFrames = numSourceSamples / sourceChannelCount;
|
int numSourceFrames = numSourceSamples / sourceChannelCount;
|
||||||
int numDestinationFrames = 0;
|
int numDestinationFrames = 0;
|
||||||
|
|
||||||
if (numSourceSamples > 0) {
|
if (numSourceSamples > 0) {
|
||||||
if (!resampler) {
|
if (!resampler) {
|
||||||
if (!sampleChannelConversion(sourceSamples, destinationSamples, numSourceSamples, sourceChannelCount,
|
if (!sampleChannelConversion(sourceSamples, destinationSamples, numSourceSamples,
|
||||||
destinationChannelCount)) {
|
sourceChannelCount, destinationChannelCount)) {
|
||||||
// no conversion, we can copy the samples directly across
|
// no conversion, we can copy the samples directly across
|
||||||
memcpy(destinationSamples, sourceSamples, numSourceSamples * AudioConstants::SAMPLE_SIZE);
|
memcpy(destinationSamples, sourceSamples, numSourceSamples * AudioConstants::SAMPLE_SIZE);
|
||||||
}
|
}
|
||||||
numDestinationFrames = numSourceFrames;
|
numDestinationFrames = numSourceFrames;
|
||||||
} else {
|
} else {
|
||||||
if (sourceChannelCount != destinationChannelCount) {
|
if (sourceChannelCount != destinationChannelCount) {
|
||||||
|
|
||||||
int16_t* channelConversionSamples = new int16_t[numSourceFrames * destinationChannelCount];
|
int16_t* channelConversionSamples = new int16_t[numSourceFrames * destinationChannelCount];
|
||||||
|
|
||||||
sampleChannelConversion(sourceSamples, channelConversionSamples, numSourceSamples, sourceChannelCount,
|
sampleChannelConversion(sourceSamples, channelConversionSamples, numSourceSamples,
|
||||||
destinationChannelCount);
|
sourceChannelCount, destinationChannelCount);
|
||||||
|
|
||||||
numDestinationFrames = resampler->render(channelConversionSamples, destinationSamples, numSourceFrames);
|
numDestinationFrames = resampler->render(channelConversionSamples, destinationSamples, numSourceFrames);
|
||||||
|
|
||||||
|
@ -745,6 +789,7 @@ int possibleResampling(AudioSRC* resampler,
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::start() {
|
void AudioClient::start() {
|
||||||
|
|
||||||
// set up the desired audio format
|
// set up the desired audio format
|
||||||
_desiredInputFormat.setSampleRate(AudioConstants::SAMPLE_RATE);
|
_desiredInputFormat.setSampleRate(AudioConstants::SAMPLE_RATE);
|
||||||
_desiredInputFormat.setSampleSize(16);
|
_desiredInputFormat.setSampleSize(16);
|
||||||
|
@ -833,6 +878,7 @@ void AudioClient::handleAudioDataPacket(QSharedPointer<ReceivedMessage> message)
|
||||||
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::ReceiveFirstAudioPacket);
|
nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::ReceiveFirstAudioPacket);
|
||||||
|
|
||||||
if (_audioOutput) {
|
if (_audioOutput) {
|
||||||
|
|
||||||
if (!_hasReceivedFirstPacket) {
|
if (!_hasReceivedFirstPacket) {
|
||||||
_hasReceivedFirstPacket = true;
|
_hasReceivedFirstPacket = true;
|
||||||
|
|
||||||
|
@ -849,8 +895,8 @@ void AudioClient::handleAudioDataPacket(QSharedPointer<ReceivedMessage> message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioClient::Gate::Gate(AudioClient* audioClient) : _audioClient(audioClient) {
|
AudioClient::Gate::Gate(AudioClient* audioClient) :
|
||||||
}
|
_audioClient(audioClient) {}
|
||||||
|
|
||||||
void AudioClient::Gate::setIsSimulatingJitter(bool enable) {
|
void AudioClient::Gate::setIsSimulatingJitter(bool enable) {
|
||||||
std::lock_guard<std::mutex> lock(_mutex);
|
std::lock_guard<std::mutex> lock(_mutex);
|
||||||
|
@ -903,6 +949,7 @@ void AudioClient::Gate::flush() {
|
||||||
_index = 0;
|
_index = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void AudioClient::handleNoisyMutePacket(QSharedPointer<ReceivedMessage> message) {
|
void AudioClient::handleNoisyMutePacket(QSharedPointer<ReceivedMessage> message) {
|
||||||
if (!_muted) {
|
if (!_muted) {
|
||||||
setMuted(true);
|
setMuted(true);
|
||||||
|
@ -948,6 +995,7 @@ void AudioClient::handleSelectedAudioFormat(QSharedPointer<ReceivedMessage> mess
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::selectAudioFormat(const QString& selectedCodecName) {
|
void AudioClient::selectAudioFormat(const QString& selectedCodecName) {
|
||||||
|
|
||||||
_selectedCodecName = selectedCodecName;
|
_selectedCodecName = selectedCodecName;
|
||||||
|
|
||||||
qCDebug(audioclient) << "Selected Codec:" << _selectedCodecName << "isStereoInput:" << _isStereoInput;
|
qCDebug(audioclient) << "Selected Codec:" << _selectedCodecName << "isStereoInput:" << _isStereoInput;
|
||||||
|
@ -965,12 +1013,12 @@ void AudioClient::selectAudioFormat(const QString& selectedCodecName) {
|
||||||
if (_selectedCodecName == plugin->getName()) {
|
if (_selectedCodecName == plugin->getName()) {
|
||||||
_codec = plugin;
|
_codec = plugin;
|
||||||
_receivedAudioStream.setupCodec(plugin, _selectedCodecName, AudioConstants::STEREO);
|
_receivedAudioStream.setupCodec(plugin, _selectedCodecName, AudioConstants::STEREO);
|
||||||
_encoder = plugin->createEncoder(AudioConstants::SAMPLE_RATE,
|
_encoder = plugin->createEncoder(AudioConstants::SAMPLE_RATE, _isStereoInput ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||||
_isStereoInput ? AudioConstants::STEREO : AudioConstants::MONO);
|
|
||||||
qCDebug(audioclient) << "Selected Codec Plugin:" << _codec.get();
|
qCDebug(audioclient) << "Selected Codec Plugin:" << _codec.get();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioClient::switchAudioDevice(QAudio::Mode mode, const QAudioDeviceInfo& deviceInfo) {
|
bool AudioClient::switchAudioDevice(QAudio::Mode mode, const QAudioDeviceInfo& deviceInfo) {
|
||||||
|
@ -1035,6 +1083,7 @@ void AudioClient::configureReverb() {
|
||||||
void AudioClient::updateReverbOptions() {
|
void AudioClient::updateReverbOptions() {
|
||||||
bool reverbChanged = false;
|
bool reverbChanged = false;
|
||||||
if (_receivedAudioStream.hasReverb()) {
|
if (_receivedAudioStream.hasReverb()) {
|
||||||
|
|
||||||
if (_zoneReverbOptions.getReverbTime() != _receivedAudioStream.getRevebTime()) {
|
if (_zoneReverbOptions.getReverbTime() != _receivedAudioStream.getRevebTime()) {
|
||||||
_zoneReverbOptions.setReverbTime(_receivedAudioStream.getRevebTime());
|
_zoneReverbOptions.setReverbTime(_receivedAudioStream.getRevebTime());
|
||||||
reverbChanged = true;
|
reverbChanged = true;
|
||||||
|
@ -1147,6 +1196,7 @@ void AudioClient::configureWebrtc() {
|
||||||
|
|
||||||
// rebuffer into 10ms chunks
|
// rebuffer into 10ms chunks
|
||||||
void AudioClient::processWebrtcFarEnd(const int16_t* samples, int numFrames, int numChannels, int sampleRate) {
|
void AudioClient::processWebrtcFarEnd(const int16_t* samples, int numFrames, int numChannels, int sampleRate) {
|
||||||
|
|
||||||
const webrtc::StreamConfig streamConfig = webrtc::StreamConfig(sampleRate, numChannels);
|
const webrtc::StreamConfig streamConfig = webrtc::StreamConfig(sampleRate, numChannels);
|
||||||
const int numChunk = (int)streamConfig.num_frames();
|
const int numChunk = (int)streamConfig.num_frames();
|
||||||
|
|
||||||
|
@ -1160,6 +1210,7 @@ void AudioClient::processWebrtcFarEnd(const int16_t* samples, int numFrames, int
|
||||||
}
|
}
|
||||||
|
|
||||||
while (numFrames > 0) {
|
while (numFrames > 0) {
|
||||||
|
|
||||||
// number of frames to fill
|
// number of frames to fill
|
||||||
int numFill = std::min(numFrames, numChunk - _numFifoFarEnd);
|
int numFill = std::min(numFrames, numChunk - _numFifoFarEnd);
|
||||||
|
|
||||||
|
@ -1170,6 +1221,7 @@ void AudioClient::processWebrtcFarEnd(const int16_t* samples, int numFrames, int
|
||||||
_numFifoFarEnd += numFill;
|
_numFifoFarEnd += numFill;
|
||||||
|
|
||||||
if (_numFifoFarEnd == numChunk) {
|
if (_numFifoFarEnd == numChunk) {
|
||||||
|
|
||||||
// convert audio format
|
// convert audio format
|
||||||
float buffer[WEBRTC_CHANNELS_MAX][WEBRTC_FRAMES_MAX];
|
float buffer[WEBRTC_CHANNELS_MAX][WEBRTC_FRAMES_MAX];
|
||||||
float* const buffers[WEBRTC_CHANNELS_MAX] = { buffer[0], buffer[1] };
|
float* const buffers[WEBRTC_CHANNELS_MAX] = { buffer[0], buffer[1] };
|
||||||
|
@ -1186,6 +1238,7 @@ void AudioClient::processWebrtcFarEnd(const int16_t* samples, int numFrames, int
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::processWebrtcNearEnd(int16_t* samples, int numFrames, int numChannels, int sampleRate) {
|
void AudioClient::processWebrtcNearEnd(int16_t* samples, int numFrames, int numChannels, int sampleRate) {
|
||||||
|
|
||||||
const webrtc::StreamConfig streamConfig = webrtc::StreamConfig(sampleRate, numChannels);
|
const webrtc::StreamConfig streamConfig = webrtc::StreamConfig(sampleRate, numChannels);
|
||||||
const int numChunk = (int)streamConfig.num_frames();
|
const int numChunk = (int)streamConfig.num_frames();
|
||||||
|
|
||||||
|
@ -1244,8 +1297,7 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
|
|
||||||
// if required, create loopback resampler
|
// if required, create loopback resampler
|
||||||
if (_inputFormat.sampleRate() != _outputFormat.sampleRate() && !_loopbackResampler) {
|
if (_inputFormat.sampleRate() != _outputFormat.sampleRate() && !_loopbackResampler) {
|
||||||
qCDebug(audioclient) << "Resampling from" << _inputFormat.sampleRate() << "to" << _outputFormat.sampleRate()
|
qCDebug(audioclient) << "Resampling from" << _inputFormat.sampleRate() << "to" << _outputFormat.sampleRate() << "for audio loopback.";
|
||||||
<< "for audio loopback.";
|
|
||||||
_loopbackResampler = new AudioSRC(_inputFormat.sampleRate(), _outputFormat.sampleRate(), OUTPUT_CHANNEL_COUNT);
|
_loopbackResampler = new AudioSRC(_inputFormat.sampleRate(), _outputFormat.sampleRate(), OUTPUT_CHANNEL_COUNT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1261,8 +1313,10 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
int16_t* inputSamples = reinterpret_cast<int16_t*>(inputByteArray.data());
|
int16_t* inputSamples = reinterpret_cast<int16_t*>(inputByteArray.data());
|
||||||
int16_t* loopbackSamples = reinterpret_cast<int16_t*>(loopBackByteArray.data());
|
int16_t* loopbackSamples = reinterpret_cast<int16_t*>(loopBackByteArray.data());
|
||||||
|
|
||||||
int numLoopbackSamples = possibleResampling(_loopbackResampler, inputSamples, loopbackSamples, numInputSamples,
|
int numLoopbackSamples = possibleResampling(_loopbackResampler,
|
||||||
maxLoopbackSamples, _inputFormat.channelCount(), OUTPUT_CHANNEL_COUNT);
|
inputSamples, loopbackSamples,
|
||||||
|
numInputSamples, maxLoopbackSamples,
|
||||||
|
_inputFormat.channelCount(), OUTPUT_CHANNEL_COUNT);
|
||||||
|
|
||||||
loopBackByteArray.resize(numLoopbackSamples * AudioConstants::SAMPLE_SIZE);
|
loopBackByteArray.resize(numLoopbackSamples * AudioConstants::SAMPLE_SIZE);
|
||||||
|
|
||||||
|
@ -1275,9 +1329,11 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
// if required, upmix or downmix to deviceChannelCount
|
// if required, upmix or downmix to deviceChannelCount
|
||||||
int deviceChannelCount = _outputFormat.channelCount();
|
int deviceChannelCount = _outputFormat.channelCount();
|
||||||
if (deviceChannelCount == OUTPUT_CHANNEL_COUNT) {
|
if (deviceChannelCount == OUTPUT_CHANNEL_COUNT) {
|
||||||
|
|
||||||
_loopbackOutputDevice->write(loopBackByteArray);
|
_loopbackOutputDevice->write(loopBackByteArray);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
static QByteArray deviceByteArray;
|
static QByteArray deviceByteArray;
|
||||||
|
|
||||||
int numDeviceSamples = (numLoopbackSamples * deviceChannelCount) / OUTPUT_CHANNEL_COUNT;
|
int numDeviceSamples = (numLoopbackSamples * deviceChannelCount) / OUTPUT_CHANNEL_COUNT;
|
||||||
|
@ -1297,6 +1353,7 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
|
|
||||||
void AudioClient::handleAudioInput(QByteArray& audioBuffer) {
|
void AudioClient::handleAudioInput(QByteArray& audioBuffer) {
|
||||||
if (!_audioPaused) {
|
if (!_audioPaused) {
|
||||||
|
|
||||||
bool audioGateOpen = false;
|
bool audioGateOpen = false;
|
||||||
|
|
||||||
if (!_muted) {
|
if (!_muted) {
|
||||||
|
@ -1350,7 +1407,8 @@ void AudioClient::handleAudioInput(QByteArray& audioBuffer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, _isStereoInput,
|
emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, _isStereoInput,
|
||||||
audioTransform, avatarBoundingBoxCorner, avatarBoundingBoxScale, packetType, _selectedCodecName);
|
audioTransform, avatarBoundingBoxCorner, avatarBoundingBoxScale,
|
||||||
|
packetType, _selectedCodecName);
|
||||||
_stats.sentPacket();
|
_stats.sentPacket();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1365,10 +1423,9 @@ void AudioClient::handleMicAudioInput() {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// input samples required to produce exactly NETWORK_FRAME_SAMPLES of output
|
// input samples required to produce exactly NETWORK_FRAME_SAMPLES of output
|
||||||
const int inputSamplesRequired =
|
const int inputSamplesRequired = (_inputToNetworkResampler ?
|
||||||
(_inputToNetworkResampler ? _inputToNetworkResampler->getMinInput(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL)
|
_inputToNetworkResampler->getMinInput(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL) :
|
||||||
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL) *
|
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL) * _inputFormat.channelCount();
|
||||||
_inputFormat.channelCount();
|
|
||||||
|
|
||||||
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
|
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
|
||||||
QByteArray inputByteArray = _inputDevice->readAll();
|
QByteArray inputByteArray = _inputDevice->readAll();
|
||||||
|
@ -1380,14 +1437,17 @@ void AudioClient::handleMicAudioInput() {
|
||||||
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
_stats.updateInputMsRead(audioInputMsecsRead);
|
_stats.updateInputMsRead(audioInputMsecsRead);
|
||||||
|
|
||||||
const int numNetworkBytes =
|
const int numNetworkBytes = _isStereoInput
|
||||||
_isStereoInput ? AudioConstants::NETWORK_FRAME_BYTES_STEREO : AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||||
const int numNetworkSamples =
|
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||||
_isStereoInput ? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO : AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
const int numNetworkSamples = _isStereoInput
|
||||||
|
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
||||||
|
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
|
||||||
|
|
||||||
static int16_t networkAudioSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
static int16_t networkAudioSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||||
|
|
||||||
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
while (_inputRingBuffer.samplesAvailable() >= inputSamplesRequired) {
|
||||||
|
|
||||||
_inputRingBuffer.readSamples(inputAudioSamples.get(), inputSamplesRequired);
|
_inputRingBuffer.readSamples(inputAudioSamples.get(), inputSamplesRequired);
|
||||||
|
|
||||||
// detect clipping on the raw input
|
// detect clipping on the raw input
|
||||||
|
@ -1417,8 +1477,10 @@ void AudioClient::handleMicAudioInput() {
|
||||||
emit inputLoudnessChanged(_lastSmoothedRawInputLoudness, isClipping);
|
emit inputLoudnessChanged(_lastSmoothedRawInputLoudness, isClipping);
|
||||||
|
|
||||||
if (!_muted) {
|
if (!_muted) {
|
||||||
possibleResampling(_inputToNetworkResampler, inputAudioSamples.get(), networkAudioSamples, inputSamplesRequired,
|
possibleResampling(_inputToNetworkResampler,
|
||||||
numNetworkSamples, _inputFormat.channelCount(), _desiredInputFormat.channelCount());
|
inputAudioSamples.get(), networkAudioSamples,
|
||||||
|
inputSamplesRequired, numNetworkSamples,
|
||||||
|
_inputFormat.channelCount(), _desiredInputFormat.channelCount());
|
||||||
}
|
}
|
||||||
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * AudioConstants::SAMPLE_SIZE;
|
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * AudioConstants::SAMPLE_SIZE;
|
||||||
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
|
@ -1430,8 +1492,9 @@ void AudioClient::handleMicAudioInput() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::handleDummyAudioInput() {
|
void AudioClient::handleDummyAudioInput() {
|
||||||
const int numNetworkBytes =
|
const int numNetworkBytes = _isStereoInput
|
||||||
_isStereoInput ? AudioConstants::NETWORK_FRAME_BYTES_STEREO : AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||||
|
: AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||||
|
|
||||||
QByteArray audioBuffer(numNetworkBytes, 0); // silent
|
QByteArray audioBuffer(numNetworkBytes, 0); // silent
|
||||||
handleAudioInput(audioBuffer);
|
handleAudioInput(audioBuffer);
|
||||||
|
@ -1464,7 +1527,8 @@ void AudioClient::prepareLocalAudioInjectors(std::unique_ptr<Lock> localAudioLoc
|
||||||
int bufferCapacity = _localInjectorsStream.getSampleCapacity();
|
int bufferCapacity = _localInjectorsStream.getSampleCapacity();
|
||||||
int maxOutputSamples = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * AudioConstants::STEREO;
|
int maxOutputSamples = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * AudioConstants::STEREO;
|
||||||
if (_localToOutputResampler) {
|
if (_localToOutputResampler) {
|
||||||
maxOutputSamples = _localToOutputResampler->getMaxOutput(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL) *
|
maxOutputSamples =
|
||||||
|
_localToOutputResampler->getMaxOutput(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL) *
|
||||||
AudioConstants::STEREO;
|
AudioConstants::STEREO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1497,7 +1561,8 @@ void AudioClient::prepareLocalAudioInjectors(std::unique_ptr<Lock> localAudioLoc
|
||||||
} else {
|
} else {
|
||||||
// write to local injectors' ring buffer
|
// write to local injectors' ring buffer
|
||||||
samples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
samples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
||||||
_localInjectorsStream.writeSamples(_localMixBuffer, AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
|
_localInjectorsStream.writeSamples(_localMixBuffer,
|
||||||
|
AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
|
||||||
}
|
}
|
||||||
|
|
||||||
_localSamplesAvailable.fetch_add(samples, std::memory_order_release);
|
_localSamplesAvailable.fetch_add(samples, std::memory_order_release);
|
||||||
|
@ -1522,23 +1587,26 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
||||||
// the lock guarantees that injectorBuffer, if found, is invariant
|
// the lock guarantees that injectorBuffer, if found, is invariant
|
||||||
auto injectorBuffer = injector->getLocalBuffer();
|
auto injectorBuffer = injector->getLocalBuffer();
|
||||||
if (injectorBuffer) {
|
if (injectorBuffer) {
|
||||||
|
|
||||||
auto options = injector->getOptions();
|
auto options = injector->getOptions();
|
||||||
|
|
||||||
static const int HRTF_DATASET_INDEX = 1;
|
static const int HRTF_DATASET_INDEX = 1;
|
||||||
|
|
||||||
int numChannels = options.ambisonic ? AudioConstants::AMBISONIC
|
int numChannels = options.ambisonic ? AudioConstants::AMBISONIC : (options.stereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||||
: (options.stereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
|
||||||
size_t bytesToRead = numChannels * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
size_t bytesToRead = numChannels * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||||
|
|
||||||
// get one frame from the injector
|
// get one frame from the injector
|
||||||
memset(_localScratchBuffer, 0, bytesToRead);
|
memset(_localScratchBuffer, 0, bytesToRead);
|
||||||
if (0 < injectorBuffer->readData((char*)_localScratchBuffer, bytesToRead)) {
|
if (0 < injectorBuffer->readData((char*)_localScratchBuffer, bytesToRead)) {
|
||||||
|
|
||||||
bool isSystemSound = !options.positionSet && !options.ambisonic;
|
bool isSystemSound = !options.positionSet && !options.ambisonic;
|
||||||
|
|
||||||
float gain = options.volume * (isSystemSound ? _systemInjectorGain : _localInjectorGain);
|
float gain = options.volume * (isSystemSound ? _systemInjectorGain : _localInjectorGain);
|
||||||
|
|
||||||
if (options.ambisonic) {
|
if (options.ambisonic) {
|
||||||
|
|
||||||
if (options.positionSet) {
|
if (options.positionSet) {
|
||||||
|
|
||||||
// distance attenuation
|
// distance attenuation
|
||||||
glm::vec3 relativePosition = options.position - _positionGetter();
|
glm::vec3 relativePosition = options.position - _positionGetter();
|
||||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||||
|
@ -1558,10 +1626,12 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
||||||
float qz = relativeOrientation.y;
|
float qz = relativeOrientation.y;
|
||||||
|
|
||||||
// spatialize into mixBuffer
|
// spatialize into mixBuffer
|
||||||
injector->getLocalFOA().render(_localScratchBuffer, mixBuffer, HRTF_DATASET_INDEX, qw, qx, qy, qz, gain,
|
injector->getLocalFOA().render(_localScratchBuffer, mixBuffer, HRTF_DATASET_INDEX,
|
||||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
qw, qx, qy, qz, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
} else if (options.stereo) {
|
} else if (options.stereo) {
|
||||||
|
|
||||||
if (options.positionSet) {
|
if (options.positionSet) {
|
||||||
|
|
||||||
// distance attenuation
|
// distance attenuation
|
||||||
glm::vec3 relativePosition = options.position - _positionGetter();
|
glm::vec3 relativePosition = options.position - _positionGetter();
|
||||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||||
|
@ -1574,6 +1644,7 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
||||||
} else { // injector is mono
|
} else { // injector is mono
|
||||||
|
|
||||||
if (options.positionSet) {
|
if (options.positionSet) {
|
||||||
|
|
||||||
// distance attenuation
|
// distance attenuation
|
||||||
glm::vec3 relativePosition = options.position - _positionGetter();
|
glm::vec3 relativePosition = options.position - _positionGetter();
|
||||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||||
|
@ -1582,9 +1653,10 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
||||||
float azimuth = azimuthForSource(relativePosition);
|
float azimuth = azimuthForSource(relativePosition);
|
||||||
|
|
||||||
// spatialize into mixBuffer
|
// spatialize into mixBuffer
|
||||||
injector->getLocalHRTF().render(_localScratchBuffer, mixBuffer, HRTF_DATASET_INDEX, azimuth, distance,
|
injector->getLocalHRTF().render(_localScratchBuffer, mixBuffer, HRTF_DATASET_INDEX,
|
||||||
gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
azimuth, distance, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// direct mix into mixBuffer
|
// direct mix into mixBuffer
|
||||||
injector->getLocalHRTF().mixMono(_localScratchBuffer, mixBuffer, gain,
|
injector->getLocalHRTF().mixMono(_localScratchBuffer, mixBuffer, gain,
|
||||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
|
@ -1592,12 +1664,14 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
//qCDebug(audioclient) << "injector has no more data, marking finished for removal";
|
//qCDebug(audioclient) << "injector has no more data, marking finished for removal";
|
||||||
injector->finishLocalInjection();
|
injector->finishLocalInjection();
|
||||||
injectorsToRemove.append(injector);
|
injectorsToRemove.append(injector);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
//qCDebug(audioclient) << "injector has no local buffer, marking as finished for removal";
|
//qCDebug(audioclient) << "injector has no local buffer, marking as finished for removal";
|
||||||
injector->finishLocalInjection();
|
injector->finishLocalInjection();
|
||||||
injectorsToRemove.append(injector);
|
injectorsToRemove.append(injector);
|
||||||
|
@ -1616,6 +1690,7 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::processReceivedSamples(const QByteArray& decodedBuffer, QByteArray& outputBuffer) {
|
void AudioClient::processReceivedSamples(const QByteArray& decodedBuffer, QByteArray& outputBuffer) {
|
||||||
|
|
||||||
const int16_t* decodedSamples = reinterpret_cast<const int16_t*>(decodedBuffer.data());
|
const int16_t* decodedSamples = reinterpret_cast<const int16_t*>(decodedBuffer.data());
|
||||||
assert(decodedBuffer.size() == AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
assert(decodedBuffer.size() == AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||||
|
|
||||||
|
@ -1719,8 +1794,7 @@ bool AudioClient::setIsStereoInput(bool isStereoInput) {
|
||||||
if (_encoder) {
|
if (_encoder) {
|
||||||
_codec->releaseEncoder(_encoder);
|
_codec->releaseEncoder(_encoder);
|
||||||
}
|
}
|
||||||
_encoder = _codec->createEncoder(AudioConstants::SAMPLE_RATE,
|
_encoder = _codec->createEncoder(AudioConstants::SAMPLE_RATE, _isStereoInput ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||||
_isStereoInput ? AudioConstants::STEREO : AudioConstants::MONO);
|
|
||||||
}
|
}
|
||||||
qCDebug(audioclient) << "Reset Codec:" << _selectedCodecName << "isStereoInput:" << _isStereoInput;
|
qCDebug(audioclient) << "Reset Codec:" << _selectedCodecName << "isStereoInput:" << _isStereoInput;
|
||||||
|
|
||||||
|
@ -1760,6 +1834,7 @@ int AudioClient::getNumLocalInjectors() {
|
||||||
Lock lock(_injectorsMutex);
|
Lock lock(_injectorsMutex);
|
||||||
return _activeLocalAudioInjectors.size();
|
return _activeLocalAudioInjectors.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::outputFormatChanged() {
|
void AudioClient::outputFormatChanged() {
|
||||||
_outputFrameSize = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * OUTPUT_CHANNEL_COUNT * _outputFormat.sampleRate()) /
|
_outputFrameSize = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * OUTPUT_CHANNEL_COUNT * _outputFormat.sampleRate()) /
|
||||||
_desiredOutputFormat.sampleRate();
|
_desiredOutputFormat.sampleRate();
|
||||||
|
@ -1833,15 +1908,15 @@ bool AudioClient::switchInputToAudioDevice(const QAudioDeviceInfo inputDeviceInf
|
||||||
|
|
||||||
// we've got the best we can get for input
|
// we've got the best we can get for input
|
||||||
// if required, setup a resampler for this input to our desired network format
|
// if required, setup a resampler for this input to our desired network format
|
||||||
if (_inputFormat != _desiredInputFormat && _inputFormat.sampleRate() != _desiredInputFormat.sampleRate()) {
|
if (_inputFormat != _desiredInputFormat
|
||||||
|
&& _inputFormat.sampleRate() != _desiredInputFormat.sampleRate()) {
|
||||||
qCDebug(audioclient) << "Attemping to create a resampler for input format to network format.";
|
qCDebug(audioclient) << "Attemping to create a resampler for input format to network format.";
|
||||||
|
|
||||||
assert(_inputFormat.sampleSize() == 16);
|
assert(_inputFormat.sampleSize() == 16);
|
||||||
assert(_desiredInputFormat.sampleSize() == 16);
|
assert(_desiredInputFormat.sampleSize() == 16);
|
||||||
int channelCount = (_inputFormat.channelCount() == 2 && _desiredInputFormat.channelCount() == 2) ? 2 : 1;
|
int channelCount = (_inputFormat.channelCount() == 2 && _desiredInputFormat.channelCount() == 2) ? 2 : 1;
|
||||||
|
|
||||||
_inputToNetworkResampler =
|
_inputToNetworkResampler = new AudioSRC(_inputFormat.sampleRate(), _desiredInputFormat.sampleRate(), channelCount);
|
||||||
new AudioSRC(_inputFormat.sampleRate(), _desiredInputFormat.sampleRate(), channelCount);
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
qCDebug(audioclient) << "No resampling required for audio input to match desired network format.";
|
qCDebug(audioclient) << "No resampling required for audio input to match desired network format.";
|
||||||
|
@ -1983,8 +2058,8 @@ void AudioClient::outputNotify() {
|
||||||
int newOutputBufferSizeFrames = setOutputBufferSize(oldOutputBufferSizeFrames + 1, false);
|
int newOutputBufferSizeFrames = setOutputBufferSize(oldOutputBufferSizeFrames + 1, false);
|
||||||
|
|
||||||
if (newOutputBufferSizeFrames > oldOutputBufferSizeFrames) {
|
if (newOutputBufferSizeFrames > oldOutputBufferSizeFrames) {
|
||||||
qCDebug(audioclient, "Starve threshold surpassed (%d starves in %d ms)", _outputStarveDetectionCount,
|
qCDebug(audioclient,
|
||||||
dt);
|
"Starve threshold surpassed (%d starves in %d ms)", _outputStarveDetectionCount, dt);
|
||||||
}
|
}
|
||||||
|
|
||||||
_outputStarveDetectionStartTimeMsec = now;
|
_outputStarveDetectionStartTimeMsec = now;
|
||||||
|
@ -1998,8 +2073,7 @@ void AudioClient::outputNotify() {
|
||||||
bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceInfo, bool isShutdownRequest) {
|
bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceInfo, bool isShutdownRequest) {
|
||||||
Q_ASSERT_X(QThread::currentThread() == thread(), Q_FUNC_INFO, "Function invoked on wrong thread");
|
Q_ASSERT_X(QThread::currentThread() == thread(), Q_FUNC_INFO, "Function invoked on wrong thread");
|
||||||
|
|
||||||
qCDebug(audioclient) << "AudioClient::switchOutputToAudioDevice() outputDeviceInfo: [" << outputDeviceInfo.deviceName()
|
qCDebug(audioclient) << "AudioClient::switchOutputToAudioDevice() outputDeviceInfo: [" << outputDeviceInfo.deviceName() << "]";
|
||||||
<< "]";
|
|
||||||
bool supportedFormat = false;
|
bool supportedFormat = false;
|
||||||
|
|
||||||
// NOTE: device start() uses the Qt internal device list
|
// NOTE: device start() uses the Qt internal device list
|
||||||
|
@ -2068,16 +2142,15 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceI
|
||||||
|
|
||||||
// we've got the best we can get for input
|
// we've got the best we can get for input
|
||||||
// if required, setup a resampler for this input to our desired network format
|
// if required, setup a resampler for this input to our desired network format
|
||||||
if (_desiredOutputFormat != _outputFormat && _desiredOutputFormat.sampleRate() != _outputFormat.sampleRate()) {
|
if (_desiredOutputFormat != _outputFormat
|
||||||
|
&& _desiredOutputFormat.sampleRate() != _outputFormat.sampleRate()) {
|
||||||
qCDebug(audioclient) << "Attemping to create a resampler for network format to output format.";
|
qCDebug(audioclient) << "Attemping to create a resampler for network format to output format.";
|
||||||
|
|
||||||
assert(_desiredOutputFormat.sampleSize() == 16);
|
assert(_desiredOutputFormat.sampleSize() == 16);
|
||||||
assert(_outputFormat.sampleSize() == 16);
|
assert(_outputFormat.sampleSize() == 16);
|
||||||
|
|
||||||
_networkToOutputResampler =
|
_networkToOutputResampler = new AudioSRC(_desiredOutputFormat.sampleRate(), _outputFormat.sampleRate(), OUTPUT_CHANNEL_COUNT);
|
||||||
new AudioSRC(_desiredOutputFormat.sampleRate(), _outputFormat.sampleRate(), OUTPUT_CHANNEL_COUNT);
|
_localToOutputResampler = new AudioSRC(_desiredOutputFormat.sampleRate(), _outputFormat.sampleRate(), OUTPUT_CHANNEL_COUNT);
|
||||||
_localToOutputResampler =
|
|
||||||
new AudioSRC(_desiredOutputFormat.sampleRate(), _outputFormat.sampleRate(), OUTPUT_CHANNEL_COUNT);
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
qCDebug(audioclient) << "No resampling required for network output to match actual output format.";
|
qCDebug(audioclient) << "No resampling required for network output to match actual output format.";
|
||||||
|
@ -2089,9 +2162,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceI
|
||||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
|
|
||||||
int deviceChannelCount = _outputFormat.channelCount();
|
int deviceChannelCount = _outputFormat.channelCount();
|
||||||
int frameSize =
|
int frameSize = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * deviceChannelCount * _outputFormat.sampleRate()) / _desiredOutputFormat.sampleRate();
|
||||||
(AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * deviceChannelCount * _outputFormat.sampleRate()) /
|
|
||||||
_desiredOutputFormat.sampleRate();
|
|
||||||
int requestedSize = _sessionOutputBufferSizeFrames * frameSize * AudioConstants::SAMPLE_SIZE;
|
int requestedSize = _sessionOutputBufferSizeFrames * frameSize * AudioConstants::SAMPLE_SIZE;
|
||||||
_audioOutput->setBufferSize(requestedSize);
|
_audioOutput->setBufferSize(requestedSize);
|
||||||
|
|
||||||
|
@ -2112,9 +2183,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceI
|
||||||
_outputScratchBuffer = new int16_t[_outputPeriod];
|
_outputScratchBuffer = new int16_t[_outputPeriod];
|
||||||
|
|
||||||
// size local output mix buffer based on resampled network frame size
|
// size local output mix buffer based on resampled network frame size
|
||||||
int networkPeriod = _localToOutputResampler
|
int networkPeriod = _localToOutputResampler ? _localToOutputResampler->getMaxOutput(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO) : AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
||||||
? _localToOutputResampler->getMaxOutput(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO)
|
|
||||||
: AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
|
||||||
_localOutputMixBuffer = new float[networkPeriod];
|
_localOutputMixBuffer = new float[networkPeriod];
|
||||||
|
|
||||||
// local period should be at least twice the output period,
|
// local period should be at least twice the output period,
|
||||||
|
@ -2157,8 +2226,7 @@ int AudioClient::setOutputBufferSize(int numFrames, bool persist) {
|
||||||
qCDebug(audioclient) << __FUNCTION__ << "numFrames:" << numFrames << "persist:" << persist;
|
qCDebug(audioclient) << __FUNCTION__ << "numFrames:" << numFrames << "persist:" << persist;
|
||||||
|
|
||||||
numFrames = std::min(std::max(numFrames, MIN_BUFFER_FRAMES), MAX_BUFFER_FRAMES);
|
numFrames = std::min(std::max(numFrames, MIN_BUFFER_FRAMES), MAX_BUFFER_FRAMES);
|
||||||
qCDebug(audioclient) << __FUNCTION__ << "clamped numFrames:" << numFrames
|
qCDebug(audioclient) << __FUNCTION__ << "clamped numFrames:" << numFrames << "_sessionOutputBufferSizeFrames:" << _sessionOutputBufferSizeFrames;
|
||||||
<< "_sessionOutputBufferSizeFrames:" << _sessionOutputBufferSizeFrames;
|
|
||||||
|
|
||||||
if (numFrames != _sessionOutputBufferSizeFrames) {
|
if (numFrames != _sessionOutputBufferSizeFrames) {
|
||||||
qCInfo(audioclient, "Audio output buffer set to %d frames", numFrames);
|
qCInfo(audioclient, "Audio output buffer set to %d frames", numFrames);
|
||||||
|
@ -2189,10 +2257,10 @@ const float AudioClient::CALLBACK_ACCELERATOR_RATIO = 2.0f;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int AudioClient::calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const {
|
int AudioClient::calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const {
|
||||||
int numInputCallbackBytes = (int)(((AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL * format.channelCount() *
|
int numInputCallbackBytes = (int)(((AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||||
((float)format.sampleRate() / AudioConstants::SAMPLE_RATE)) /
|
* format.channelCount()
|
||||||
CALLBACK_ACCELERATOR_RATIO) +
|
* ((float) format.sampleRate() / AudioConstants::SAMPLE_RATE))
|
||||||
0.5f);
|
/ CALLBACK_ACCELERATOR_RATIO) + 0.5f);
|
||||||
|
|
||||||
return numInputCallbackBytes;
|
return numInputCallbackBytes;
|
||||||
}
|
}
|
||||||
|
@ -2214,6 +2282,7 @@ float AudioClient::azimuthForSource(const glm::vec3& relativePosition) {
|
||||||
|
|
||||||
float rotatedSourcePositionLength2 = glm::length2(rotatedSourcePosition);
|
float rotatedSourcePositionLength2 = glm::length2(rotatedSourcePosition);
|
||||||
if (rotatedSourcePositionLength2 > SOURCE_DISTANCE_THRESHOLD) {
|
if (rotatedSourcePositionLength2 > SOURCE_DISTANCE_THRESHOLD) {
|
||||||
|
|
||||||
// produce an oriented angle about the y-axis
|
// produce an oriented angle about the y-axis
|
||||||
glm::vec3 direction = rotatedSourcePosition * (1.0f / fastSqrtf(rotatedSourcePositionLength2));
|
glm::vec3 direction = rotatedSourcePosition * (1.0f / fastSqrtf(rotatedSourcePositionLength2));
|
||||||
float angle = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward"
|
float angle = fastAcosf(glm::clamp(-direction.z, -1.0f, 1.0f)); // UNIT_NEG_Z is "forward"
|
||||||
|
@ -2226,6 +2295,7 @@ float AudioClient::azimuthForSource(const glm::vec3& relativePosition) {
|
||||||
}
|
}
|
||||||
|
|
||||||
float AudioClient::gainForSource(float distance, float volume) {
|
float AudioClient::gainForSource(float distance, float volume) {
|
||||||
|
|
||||||
// attenuation = -6dB * log2(distance)
|
// attenuation = -6dB * log2(distance)
|
||||||
// reference attenuation of 0dB at distance = ATTN_DISTANCE_REF
|
// reference attenuation of 0dB at distance = ATTN_DISTANCE_REF
|
||||||
float d = (1.0f / ATTN_DISTANCE_REF) * std::max(distance, HRTF_NEARFIELD_MIN);
|
float d = (1.0f / ATTN_DISTANCE_REF) * std::max(distance, HRTF_NEARFIELD_MIN);
|
||||||
|
@ -2236,6 +2306,7 @@ float AudioClient::gainForSource(float distance, float volume) {
|
||||||
}
|
}
|
||||||
|
|
||||||
qint64 AudioClient::AudioOutputIODevice::readData(char* data, qint64 maxSize) {
|
qint64 AudioClient::AudioOutputIODevice::readData(char* data, qint64 maxSize) {
|
||||||
|
|
||||||
// lock-free wait for initialization to avoid races
|
// lock-free wait for initialization to avoid races
|
||||||
if (!_audio->_audioOutputInitialized.load(std::memory_order_acquire)) {
|
if (!_audio->_audioOutputInitialized.load(std::memory_order_acquire)) {
|
||||||
memset(data, 0, maxSize);
|
memset(data, 0, maxSize);
|
||||||
|
@ -2254,8 +2325,7 @@ qint64 AudioClient::AudioOutputIODevice::readData(char* data, qint64 maxSize) {
|
||||||
int samplesRequested = maxSamplesRequested;
|
int samplesRequested = maxSamplesRequested;
|
||||||
int networkSamplesPopped;
|
int networkSamplesPopped;
|
||||||
if ((networkSamplesPopped = _receivedAudioStream.popSamples(samplesRequested, false)) > 0) {
|
if ((networkSamplesPopped = _receivedAudioStream.popSamples(samplesRequested, false)) > 0) {
|
||||||
qCDebug(audiostream, "Read %d samples from buffer (%d available, %d requested)", networkSamplesPopped,
|
qCDebug(audiostream, "Read %d samples from buffer (%d available, %d requested)", networkSamplesPopped, _receivedAudioStream.getSamplesAvailable(), samplesRequested);
|
||||||
_receivedAudioStream.getSamplesAvailable(), samplesRequested);
|
|
||||||
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
|
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
|
||||||
lastPopOutput.readSamples(scratchBuffer, networkSamplesPopped);
|
lastPopOutput.readSamples(scratchBuffer, networkSamplesPopped);
|
||||||
for (int i = 0; i < networkSamplesPopped; i++) {
|
for (int i = 0; i < networkSamplesPopped; i++) {
|
||||||
|
@ -2287,13 +2357,14 @@ qint64 AudioClient::AudioOutputIODevice::readData(char* data, qint64 maxSize) {
|
||||||
samplesRequested = std::min(samplesRequested, samplesAvailable);
|
samplesRequested = std::min(samplesRequested, samplesAvailable);
|
||||||
if ((injectorSamplesPopped = _localInjectorsStream.appendSamples(mixBuffer, samplesRequested, append)) > 0) {
|
if ((injectorSamplesPopped = _localInjectorsStream.appendSamples(mixBuffer, samplesRequested, append)) > 0) {
|
||||||
_audio->_localSamplesAvailable.fetch_sub(injectorSamplesPopped, std::memory_order_release);
|
_audio->_localSamplesAvailable.fetch_sub(injectorSamplesPopped, std::memory_order_release);
|
||||||
qCDebug(audiostream, "Read %d samples from injectors (%d available, %d requested)", injectorSamplesPopped,
|
qCDebug(audiostream, "Read %d samples from injectors (%d available, %d requested)", injectorSamplesPopped, _localInjectorsStream.samplesAvailable(), samplesRequested);
|
||||||
_localInjectorsStream.samplesAvailable(), samplesRequested);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare injectors for the next callback
|
// prepare injectors for the next callback
|
||||||
QtConcurrent::run(QThreadPool::globalInstance(), [this] { _audio->prepareLocalAudioInjectors(); });
|
QtConcurrent::run(QThreadPool::globalInstance(), [this] {
|
||||||
|
_audio->prepareLocalAudioInjectors();
|
||||||
|
});
|
||||||
|
|
||||||
int samplesPopped = std::max(networkSamplesPopped, injectorSamplesPopped);
|
int samplesPopped = std::max(networkSamplesPopped, injectorSamplesPopped);
|
||||||
if (samplesPopped == 0) {
|
if (samplesPopped == 0) {
|
||||||
|
@ -2373,6 +2444,7 @@ void AudioClient::loadSettings() {
|
||||||
for (auto& plugin : codecPlugins) {
|
for (auto& plugin : codecPlugins) {
|
||||||
qCDebug(audioclient) << "Codec available:" << plugin->getName();
|
qCDebug(audioclient) << "Codec available:" << plugin->getName();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::saveSettings() {
|
void AudioClient::saveSettings() {
|
||||||
|
@ -2385,9 +2457,9 @@ void AudioClient::setAvatarBoundingBoxParameters(glm::vec3 corner, glm::vec3 sca
|
||||||
avatarBoundingBoxScale = scale;
|
avatarBoundingBoxScale = scale;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void AudioClient::startThread() {
|
void AudioClient::startThread() {
|
||||||
moveToNewNamedThread(
|
moveToNewNamedThread(this, "Audio Thread", [this] { start(); }, QThread::TimeCriticalPriority);
|
||||||
this, "Audio Thread", [this] { start(); }, QThread::TimeCriticalPriority);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioClient::setInputVolume(float volume, bool emitSignal) {
|
void AudioClient::setInputVolume(float volume, bool emitSignal) {
|
||||||
|
|
|
@ -80,14 +80,11 @@ class QIODevice;
|
||||||
class Transform;
|
class Transform;
|
||||||
class NLPacket;
|
class NLPacket;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class AudioClient : public AbstractAudioInterface, public Dependency {
|
class AudioClient : public AbstractAudioInterface, public Dependency {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
SINGLETON_DEPENDENCY
|
SINGLETON_DEPENDENCY
|
||||||
|
|
||||||
using LocalInjectorsStream = AudioMixRingBuffer;
|
using LocalInjectorsStream = AudioMixRingBuffer;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static const int MIN_BUFFER_FRAMES;
|
static const int MIN_BUFFER_FRAMES;
|
||||||
static const int MAX_BUFFER_FRAMES;
|
static const int MAX_BUFFER_FRAMES;
|
||||||
|
@ -100,21 +97,15 @@ public:
|
||||||
|
|
||||||
class AudioOutputIODevice : public QIODevice {
|
class AudioOutputIODevice : public QIODevice {
|
||||||
public:
|
public:
|
||||||
AudioOutputIODevice(LocalInjectorsStream& localInjectorsStream,
|
AudioOutputIODevice(LocalInjectorsStream& localInjectorsStream, MixedProcessedAudioStream& receivedAudioStream,
|
||||||
MixedProcessedAudioStream& receivedAudioStream,
|
|
||||||
AudioClient* audio) :
|
AudioClient* audio) :
|
||||||
_localInjectorsStream(localInjectorsStream),
|
_localInjectorsStream(localInjectorsStream), _receivedAudioStream(receivedAudioStream),
|
||||||
_receivedAudioStream(receivedAudioStream), _audio(audio), _unfulfilledReads(0) {}
|
_audio(audio), _unfulfilledReads(0) {}
|
||||||
|
|
||||||
void start() { open(QIODevice::ReadOnly | QIODevice::Unbuffered); }
|
void start() { open(QIODevice::ReadOnly | QIODevice::Unbuffered); }
|
||||||
qint64 readData(char* data, qint64 maxSize) override;
|
qint64 readData(char* data, qint64 maxSize) override;
|
||||||
qint64 writeData(const char* data, qint64 maxSize) override { return 0; }
|
qint64 writeData(const char* data, qint64 maxSize) override { return 0; }
|
||||||
int getRecentUnfulfilledReads() {
|
int getRecentUnfulfilledReads() { int unfulfilledReads = _unfulfilledReads; _unfulfilledReads = 0; return unfulfilledReads; }
|
||||||
int unfulfilledReads = _unfulfilledReads;
|
|
||||||
_unfulfilledReads = 0;
|
|
||||||
return unfulfilledReads;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
LocalInjectorsStream& _localInjectorsStream;
|
LocalInjectorsStream& _localInjectorsStream;
|
||||||
MixedProcessedAudioStream& _receivedAudioStream;
|
MixedProcessedAudioStream& _receivedAudioStream;
|
||||||
|
@ -132,7 +123,6 @@ public:
|
||||||
Q_INVOKABLE float getAudioInboundPPS() const { return _audioInbound.rate(); }
|
Q_INVOKABLE float getAudioInboundPPS() const { return _audioInbound.rate(); }
|
||||||
Q_INVOKABLE float getSilentOutboundPPS() const { return _silentOutbound.rate(); }
|
Q_INVOKABLE float getSilentOutboundPPS() const { return _silentOutbound.rate(); }
|
||||||
Q_INVOKABLE float getAudioOutboundPPS() const { return _audioOutbound.rate(); }
|
Q_INVOKABLE float getAudioOutboundPPS() const { return _audioOutbound.rate(); }
|
||||||
Q_INVOKABLE void setDefaultDevice(QList<HifiAudioDeviceInfo>& devices, QAudio::Mode mode);
|
|
||||||
|
|
||||||
const MixedProcessedAudioStream& getReceivedAudioStream() const { return _receivedAudioStream; }
|
const MixedProcessedAudioStream& getReceivedAudioStream() const { return _receivedAudioStream; }
|
||||||
MixedProcessedAudioStream& getReceivedAudioStream() { return _receivedAudioStream; }
|
MixedProcessedAudioStream& getReceivedAudioStream() { return _receivedAudioStream; }
|
||||||
|
@ -479,12 +469,6 @@ private:
|
||||||
QList<HifiAudioDeviceInfo> _inputDevices;
|
QList<HifiAudioDeviceInfo> _inputDevices;
|
||||||
QList<HifiAudioDeviceInfo> _outputDevices;
|
QList<HifiAudioDeviceInfo> _outputDevices;
|
||||||
|
|
||||||
//QAudioDeviceInfo _inputDeviceInfo;
|
|
||||||
// QAudioDeviceInfo _outputDeviceInfo;
|
|
||||||
|
|
||||||
// QList<QAudioDeviceInfo> _inputDevices;
|
|
||||||
/// QList<QAudioDeviceInfo> _outputDevices;
|
|
||||||
|
|
||||||
AudioFileWav _audioFileWav;
|
AudioFileWav _audioFileWav;
|
||||||
|
|
||||||
bool _hasReceivedFirstPacket { false };
|
bool _hasReceivedFirstPacket { false };
|
||||||
|
@ -517,4 +501,5 @@ private:
|
||||||
bool _isRecording { false };
|
bool _isRecording { false };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#endif // hifi_AudioClient_h
|
#endif // hifi_AudioClient_h
|
Loading…
Reference in a new issue