diff --git a/cmake/modules/MacOSXBundleInfo.plist.in b/cmake/modules/MacOSXBundleInfo.plist.in
index b9558cf361..2cea158814 100644
--- a/cmake/modules/MacOSXBundleInfo.plist.in
+++ b/cmake/modules/MacOSXBundleInfo.plist.in
@@ -53,5 +53,7 @@
NSHighResolutionCapable
+ NSMicrophoneUsageDescription
+
diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp
index ef0e70a31d..8797b90860 100644
--- a/libraries/audio-client/src/AudioClient.cpp
+++ b/libraries/audio-client/src/AudioClient.cpp
@@ -291,6 +291,7 @@ AudioClient::AudioClient() :
_inputToNetworkResampler(NULL),
_networkToOutputResampler(NULL),
_localToOutputResampler(NULL),
+ _loopbackResampler(NULL),
_audioLimiter(AudioConstants::SAMPLE_RATE, OUTPUT_CHANNEL_COUNT),
_outgoingAvatarAudioSequenceNumber(0),
_audioOutputIODevice(_localInjectorsStream, _receivedAudioStream, this),
@@ -656,11 +657,11 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
return false; // a supported format could not be found
}
-bool sampleChannelConversion(const int16_t* sourceSamples, int16_t* destinationSamples, unsigned int numSourceSamples,
+bool sampleChannelConversion(const int16_t* sourceSamples, int16_t* destinationSamples, int numSourceSamples,
const int sourceChannelCount, const int destinationChannelCount) {
if (sourceChannelCount == 2 && destinationChannelCount == 1) {
// loop through the stereo input audio samples and average every two samples
- for (uint i = 0; i < numSourceSamples; i += 2) {
+ for (int i = 0; i < numSourceSamples; i += 2) {
destinationSamples[i / 2] = (sourceSamples[i] / 2) + (sourceSamples[i + 1] / 2);
}
@@ -668,7 +669,7 @@ bool sampleChannelConversion(const int16_t* sourceSamples, int16_t* destinationS
} else if (sourceChannelCount == 1 && destinationChannelCount == 2) {
// loop through the mono input audio and repeat each sample twice
- for (uint i = 0; i < numSourceSamples; ++i) {
+ for (int i = 0; i < numSourceSamples; ++i) {
destinationSamples[i * 2] = destinationSamples[(i * 2) + 1] = sourceSamples[i];
}
@@ -678,10 +679,13 @@ bool sampleChannelConversion(const int16_t* sourceSamples, int16_t* destinationS
return false;
}
-void possibleResampling(AudioSRC* resampler,
- const int16_t* sourceSamples, int16_t* destinationSamples,
- unsigned int numSourceSamples, unsigned int numDestinationSamples,
- const int sourceChannelCount, const int destinationChannelCount) {
+int possibleResampling(AudioSRC* resampler,
+ const int16_t* sourceSamples, int16_t* destinationSamples,
+ int numSourceSamples, int maxDestinationSamples,
+ const int sourceChannelCount, const int destinationChannelCount) {
+
+ int numSourceFrames = numSourceSamples / sourceChannelCount;
+ int numDestinationFrames = 0;
if (numSourceSamples > 0) {
if (!resampler) {
@@ -690,33 +694,30 @@ void possibleResampling(AudioSRC* resampler,
// no conversion, we can copy the samples directly across
memcpy(destinationSamples, sourceSamples, numSourceSamples * AudioConstants::SAMPLE_SIZE);
}
+ numDestinationFrames = numSourceFrames;
} else {
-
if (sourceChannelCount != destinationChannelCount) {
- int numChannelCoversionSamples = (numSourceSamples * destinationChannelCount) / sourceChannelCount;
- int16_t* channelConversionSamples = new int16_t[numChannelCoversionSamples];
+ int16_t* channelConversionSamples = new int16_t[numSourceFrames * destinationChannelCount];
sampleChannelConversion(sourceSamples, channelConversionSamples, numSourceSamples,
sourceChannelCount, destinationChannelCount);
- resampler->render(channelConversionSamples, destinationSamples, numChannelCoversionSamples);
+ numDestinationFrames = resampler->render(channelConversionSamples, destinationSamples, numSourceFrames);
delete[] channelConversionSamples;
} else {
-
- unsigned int numAdjustedSourceSamples = numSourceSamples;
- unsigned int numAdjustedDestinationSamples = numDestinationSamples;
-
- if (sourceChannelCount == 2 && destinationChannelCount == 2) {
- numAdjustedSourceSamples /= 2;
- numAdjustedDestinationSamples /= 2;
- }
-
- resampler->render(sourceSamples, destinationSamples, numAdjustedSourceSamples);
+ numDestinationFrames = resampler->render(sourceSamples, destinationSamples, numSourceFrames);
}
}
}
+
+ int numDestinationSamples = numDestinationFrames * destinationChannelCount;
+ if (numDestinationSamples > maxDestinationSamples) {
+ qCWarning(audioclient) << "Resampler overflow! numDestinationSamples =" << numDestinationSamples
+ << "but maxDestinationSamples =" << maxDestinationSamples;
+ }
+ return numDestinationSamples;
}
void AudioClient::start() {
@@ -1085,13 +1086,6 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
return;
}
- // NOTE: we assume the inputFormat and the outputFormat are the same, since on any modern
- // multimedia OS they should be. If there is a device that this is not true for, we can
- // add back support to do resampling.
- if (_inputFormat.sampleRate() != _outputFormat.sampleRate()) {
- return;
- }
-
// if this person wants local loopback add that to the locally injected audio
// if there is reverb apply it to local audio and substract the origin samples
@@ -1108,21 +1102,30 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
}
}
+ // if required, create loopback resampler
+ if (_inputFormat.sampleRate() != _outputFormat.sampleRate() && !_loopbackResampler) {
+ qCDebug(audioclient) << "Resampling from" << _inputFormat.sampleRate() << "to" << _outputFormat.sampleRate() << "for audio loopback.";
+ _loopbackResampler = new AudioSRC(_inputFormat.sampleRate(), _outputFormat.sampleRate(), OUTPUT_CHANNEL_COUNT);
+ }
+
static QByteArray loopBackByteArray;
int numInputSamples = inputByteArray.size() / AudioConstants::SAMPLE_SIZE;
- int numLoopbackSamples = (numInputSamples * OUTPUT_CHANNEL_COUNT) / _inputFormat.channelCount();
+ int numInputFrames = numInputSamples / _inputFormat.channelCount();
+ int maxLoopbackFrames = _loopbackResampler ? _loopbackResampler->getMaxOutput(numInputFrames) : numInputFrames;
+ int maxLoopbackSamples = maxLoopbackFrames * OUTPUT_CHANNEL_COUNT;
- loopBackByteArray.resize(numLoopbackSamples * AudioConstants::SAMPLE_SIZE);
+ loopBackByteArray.resize(maxLoopbackSamples * AudioConstants::SAMPLE_SIZE);
int16_t* inputSamples = reinterpret_cast(inputByteArray.data());
int16_t* loopbackSamples = reinterpret_cast(loopBackByteArray.data());
- // upmix mono to stereo
- if (!sampleChannelConversion(inputSamples, loopbackSamples, numInputSamples, _inputFormat.channelCount(), OUTPUT_CHANNEL_COUNT)) {
- // no conversion, just copy the samples
- memcpy(loopbackSamples, inputSamples, numInputSamples * AudioConstants::SAMPLE_SIZE);
- }
+ int numLoopbackSamples = possibleResampling(_loopbackResampler,
+ inputSamples, loopbackSamples,
+ numInputSamples, maxLoopbackSamples,
+ _inputFormat.channelCount(), OUTPUT_CHANNEL_COUNT);
+
+ loopBackByteArray.resize(numLoopbackSamples * AudioConstants::SAMPLE_SIZE);
// apply stereo reverb at the source, to the loopback audio
if (!_shouldEchoLocally && hasReverb) {
@@ -1665,12 +1668,17 @@ bool AudioClient::switchInputToAudioDevice(const QAudioDeviceInfo inputDeviceInf
_dummyAudioInput = NULL;
}
+ // cleanup any resamplers
if (_inputToNetworkResampler) {
- // if we were using an input to network resampler, delete it here
delete _inputToNetworkResampler;
_inputToNetworkResampler = NULL;
}
+ if (_loopbackResampler) {
+ delete _loopbackResampler;
+ _loopbackResampler = NULL;
+ }
+
if (_audioGate) {
delete _audioGate;
_audioGate = nullptr;
@@ -1892,15 +1900,22 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo outputDeviceI
_outputDeviceInfo = QAudioDeviceInfo();
}
+ // cleanup any resamplers
if (_networkToOutputResampler) {
- // if we were using an input to network resampler, delete it here
delete _networkToOutputResampler;
_networkToOutputResampler = NULL;
+ }
+ if (_localToOutputResampler) {
delete _localToOutputResampler;
_localToOutputResampler = NULL;
}
+ if (_loopbackResampler) {
+ delete _loopbackResampler;
+ _loopbackResampler = NULL;
+ }
+
if (isShutdownRequest) {
qCDebug(audioclient) << "The audio output device has shut down.";
return true;
diff --git a/libraries/audio-client/src/AudioClient.h b/libraries/audio-client/src/AudioClient.h
index e209628689..decf0f7751 100644
--- a/libraries/audio-client/src/AudioClient.h
+++ b/libraries/audio-client/src/AudioClient.h
@@ -390,6 +390,7 @@ private:
AudioSRC* _inputToNetworkResampler;
AudioSRC* _networkToOutputResampler;
AudioSRC* _localToOutputResampler;
+ AudioSRC* _loopbackResampler;
// for network audio (used by network audio thread)
int16_t _networkScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
diff --git a/libraries/audio/src/avx2/AudioSRC_avx2.cpp b/libraries/audio/src/avx2/AudioSRC_avx2.cpp
index 0e31a58ce7..e5ac08746c 100644
--- a/libraries/audio/src/avx2/AudioSRC_avx2.cpp
+++ b/libraries/audio/src/avx2/AudioSRC_avx2.cpp
@@ -34,15 +34,26 @@ int AudioSRC::multirateFilter1_AVX2(const float* input0, float* output0, int inp
const float* c0 = &_polyphaseFilter[_numTaps * _phase];
__m256 acc0 = _mm256_setzero_ps();
+ __m256 acc1 = _mm256_setzero_ps();
- for (int j = 0; j < _numTaps; j += 8) {
+ int j = 0;
+ for (; j < _numTaps - 15; j += 16) { // unrolled x 2
//float coef = c0[j];
- __m256 coef0 = _mm256_loadu_ps(&c0[j]);
+ __m256 coef0 = _mm256_loadu_ps(&c0[j + 0]);
+ __m256 coef1 = _mm256_loadu_ps(&c0[j + 8]);
//acc += input[i + j] * coef;
+ acc0 = _mm256_fmadd_ps(_mm256_loadu_ps(&input0[i + j + 0]), coef0, acc0);
+ acc1 = _mm256_fmadd_ps(_mm256_loadu_ps(&input0[i + j + 8]), coef1, acc1);
+ }
+ if (j < _numTaps) {
+
+ __m256 coef0 = _mm256_loadu_ps(&c0[j]);
+
acc0 = _mm256_fmadd_ps(_mm256_loadu_ps(&input0[i + j]), coef0, acc0);
}
+ acc0 = _mm256_add_ps(acc0, acc1);
// horizontal sum
acc0 = _mm256_hadd_ps(acc0, acc0);
@@ -73,19 +84,36 @@ int AudioSRC::multirateFilter1_AVX2(const float* input0, float* output0, int inp
const float* c1 = &_polyphaseFilter[_numTaps * (phase + 1)];
__m256 acc0 = _mm256_setzero_ps();
+ __m256 acc1 = _mm256_setzero_ps();
__m256 frac = _mm256_broadcast_ss(&ftmp);
- for (int j = 0; j < _numTaps; j += 8) {
+ int j = 0;
+ for (; j < _numTaps - 15; j += 16) { // unrolled x 2
//float coef = c0[j] + frac * (c1[j] - c0[j]);
+ __m256 coef0 = _mm256_loadu_ps(&c0[j + 0]);
+ __m256 coef1 = _mm256_loadu_ps(&c1[j + 0]);
+ __m256 coef2 = _mm256_loadu_ps(&c0[j + 8]);
+ __m256 coef3 = _mm256_loadu_ps(&c1[j + 8]);
+ coef1 = _mm256_sub_ps(coef1, coef0);
+ coef3 = _mm256_sub_ps(coef3, coef2);
+ coef0 = _mm256_fmadd_ps(coef1, frac, coef0);
+ coef2 = _mm256_fmadd_ps(coef3, frac, coef2);
+
+ //acc += input[i + j] * coef;
+ acc0 = _mm256_fmadd_ps(_mm256_loadu_ps(&input0[i + j + 0]), coef0, acc0);
+ acc1 = _mm256_fmadd_ps(_mm256_loadu_ps(&input0[i + j + 8]), coef2, acc1);
+ }
+ if (j < _numTaps) {
+
__m256 coef0 = _mm256_loadu_ps(&c0[j]);
__m256 coef1 = _mm256_loadu_ps(&c1[j]);
coef1 = _mm256_sub_ps(coef1, coef0);
coef0 = _mm256_fmadd_ps(coef1, frac, coef0);
- //acc += input[i + j] * coef;
acc0 = _mm256_fmadd_ps(_mm256_loadu_ps(&input0[i + j]), coef0, acc0);
}
+ acc0 = _mm256_add_ps(acc0, acc1);
// horizontal sum
acc0 = _mm256_hadd_ps(acc0, acc0);
diff --git a/libraries/entities-renderer/src/RenderableImageEntityItem.cpp b/libraries/entities-renderer/src/RenderableImageEntityItem.cpp
index 6638bc0687..20d10c6fd4 100644
--- a/libraries/entities-renderer/src/RenderableImageEntityItem.cpp
+++ b/libraries/entities-renderer/src/RenderableImageEntityItem.cpp
@@ -163,7 +163,7 @@ void ImageEntityRenderer::doRender(RenderArgs* args) {
transform = _renderTransform;
});
- if (!_visible || !texture || !texture->isLoaded()) {
+ if (!_visible || !texture || !texture->isLoaded() || color.a == 0.0f) {
return;
}
@@ -214,4 +214,4 @@ void ImageEntityRenderer::doRender(RenderArgs* args) {
);
batch->setResourceTexture(0, nullptr);
-}
\ No newline at end of file
+}
diff --git a/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp b/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp
index 6a0d7b001c..fbc24cb4a5 100644
--- a/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp
+++ b/libraries/entities-renderer/src/RenderableShapeEntityItem.cpp
@@ -270,6 +270,10 @@ void ShapeEntityRenderer::doRender(RenderArgs* args) {
}
});
+ if (outColor.a == 0.0f) {
+ return;
+ }
+
if (proceduralRender) {
if (render::ShapeKey(args->_globalShapeKey).isWireframe() || primitiveMode == PrimitiveMode::LINES) {
geometryCache->renderWireShape(batch, geometryShape, outColor);
diff --git a/libraries/render-utils/src/ShadingModel.slh b/libraries/render-utils/src/ShadingModel.slh
index 99aa01cc5e..5af986bd9d 100644
--- a/libraries/render-utils/src/ShadingModel.slh
+++ b/libraries/render-utils/src/ShadingModel.slh
@@ -222,7 +222,7 @@ void evalFragShadingGloss(out vec3 diffuse, out vec3 specular,
}
vec3 evalSpecularWithOpacity(vec3 specular, float opacity) {
- return specular / opacity;
+ return specular / mix(1.0, opacity, float(opacity > 0.0));
}
<@if not GETFRESNEL0@>