Merge pull request #7053 from birarda/integrate-new-hrtf

Integrate the new audio HRTF in AudioMixer
This commit is contained in:
Ken Cooke 2016-02-10 07:34:50 -08:00
commit a875542142
32 changed files with 447 additions and 2276 deletions

View file

@ -82,9 +82,6 @@ AudioMixer::AudioMixer(ReceivedMessage& message) :
_performanceThrottlingRatio(0.0f),
_attenuationPerDoublingInDistance(DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE),
_noiseMutingThreshold(DEFAULT_NOISE_MUTING_THRESHOLD),
_numStatFrames(0),
_sumListeners(0),
_sumMixes(0),
_lastPerSecondCallbackTime(usecTimestampNow()),
_sendAudioStreamStats(false),
_datagramsReadPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
@ -92,66 +89,23 @@ AudioMixer::AudioMixer(ReceivedMessage& message) :
_timeSpentPerHashMatchCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_readPendingCallsPerSecondStats(1, READ_DATAGRAMS_STATS_WINDOW_SECONDS)
{
// constant defined in AudioMixer.h. However, we don't want to include this here
// we will soon find a better common home for these audio-related constants
// SOON
auto& packetReceiver = DependencyManager::get<NodeList>()->getPacketReceiver();
auto nodeList = DependencyManager::get<NodeList>();
auto& packetReceiver = nodeList->getPacketReceiver();
packetReceiver.registerListenerForTypes({ PacketType::MicrophoneAudioNoEcho, PacketType::MicrophoneAudioWithEcho,
PacketType::InjectAudio, PacketType::SilentAudioFrame,
PacketType::AudioStreamStats },
this, "handleNodeAudioPacket");
packetReceiver.registerListener(PacketType::MuteEnvironment, this, "handleMuteEnvironmentPacket");
connect(nodeList.data(), &NodeList::nodeKilled, this, &AudioMixer::handleNodeKilled);
}
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
const float RADIUS_OF_HEAD = 0.076f;
int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* listenerNodeData,
const QUuid& streamUUID,
PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream) {
// If repetition with fade is enabled:
// If streamToAdd could not provide a frame (it was starved), then we'll mix its previously-mixed frame
// This is preferable to not mixing it at all since that's equivalent to inserting silence.
// Basically, we'll repeat that last frame until it has a frame to mix. Depending on how many times
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
// This improves the perceived quality of the audio slightly.
bool showDebug = false; // (randFloat() < 0.05f);
float repeatedFrameFadeFactor = 1.0f;
if (!streamToAdd->lastPopSucceeded()) {
if (_streamSettings._repetitionWithFade && !streamToAdd->getLastPopOutput().isNull()) {
// reptition with fade is enabled, and we do have a valid previous frame to repeat.
// calculate its fade factor, which depends on how many times it's already been repeated.
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd->getConsecutiveNotMixedCount() - 1);
if (repeatedFrameFadeFactor == 0.0f) {
return 0;
}
} else {
return 0;
}
}
// at this point, we know streamToAdd's last pop output is valid
// if the frame we're about to mix is silent, bail
if (streamToAdd->getLastPopOutputLoudness() == 0.0f) {
return 0;
}
float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0;
float weakChannelAmplitudeRatio = 1.0f;
// Is the source that I am mixing my own?
bool sourceIsSelf = (streamToAdd == listeningNodeStream);
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
float AudioMixer::gainForSource(const PositionalAudioStream& streamToAdd,
const AvatarAudioStream& listeningNodeStream, const glm::vec3& relativePosition, bool isEcho) {
float gain = 1.0f;
float distanceBetween = glm::length(relativePosition);
@ -159,30 +113,13 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
distanceBetween = EPSILON;
}
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
// according to mixer performance we have decided this does not get to be mixed in
// bail out
return 0;
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
}
++_sumMixes;
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
if (showDebug) {
qDebug() << "AttenuationRatio: " << reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
}
}
if (showDebug) {
qDebug() << "distance: " << distanceBetween;
}
glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
if (!sourceIsSelf && (streamToAdd->getType() == PositionalAudioStream::Microphone)) {
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition;
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
@ -191,21 +128,16 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
if (showDebug) {
qDebug() << "angleOfDelivery" << angleOfDelivery << "offAxisCoefficient: " << offAxisCoefficient;
}
// multiply the current attenuation coefficient by the calculated off axis coefficient
attenuationCoefficient *= offAxisCoefficient;
gain *= offAxisCoefficient;
}
float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance;
for (int i = 0; i < _zonesSettings.length(); ++i) {
if (_audioZones[_zonesSettings[i].source].contains(streamToAdd->getPosition()) &&
_audioZones[_zonesSettings[i].listener].contains(listeningNodeStream->getPosition())) {
if (_audioZones[_zonesSettings[i].source].contains(streamToAdd.getPosition()) &&
_audioZones[_zonesSettings[i].listener].contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = _zonesSettings[i].coefficient;
break;
}
@ -213,264 +145,222 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = 1 - (logf(distanceBetween / ATTENUATION_BEGINS_AT_DISTANCE) / logf(2.0f)
* attenuationPerDoublingInDistance);
float distanceCoefficient = 1.0f - (logf(distanceBetween / ATTENUATION_BEGINS_AT_DISTANCE) / logf(2.0f)
* attenuationPerDoublingInDistance);
if (distanceCoefficient < 0) {
distanceCoefficient = 0;
}
// multiply the current attenuation coefficient by the distance coefficient
attenuationCoefficient *= distanceCoefficient;
if (showDebug) {
qDebug() << "distanceCoefficient: " << distanceCoefficient;
}
gain *= distanceCoefficient;
}
if (!sourceIsSelf) {
// Compute sample delay for the two ears to create phase panning
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
// project the rotated source position vector onto the XZ plane
rotatedSourcePosition.y = 0.0f;
// produce an oriented angle about the y-axis
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedSourcePosition),
glm::vec3(0.0f, 1.0f, 0.0f));
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
// figure out the number of samples of delay and the ratio of the amplitude
// in the weak channel for audio spatialization
float sinRatio = fabsf(sinf(bearingRelativeAngleToSource));
numSamplesDelay = SAMPLE_PHASE_DELAY_AT_90 * sinRatio;
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
if (distanceBetween < RADIUS_OF_HEAD) {
// Diminish phase panning if source would be inside head
numSamplesDelay *= distanceBetween / RADIUS_OF_HEAD;
weakChannelAmplitudeRatio += (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio) * distanceBetween / RADIUS_OF_HEAD;
}
}
if (showDebug) {
qDebug() << "attenuation: " << attenuationCoefficient;
qDebug() << "bearingRelativeAngleToSource: " << bearingRelativeAngleToSource << " numSamplesDelay: " << numSamplesDelay;
}
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
if (!streamToAdd->isStereo()) {
// this is a mono stream, which means it gets full attenuation and spatialization
// we need to do several things in this process:
// 1) convert from mono to stereo by copying each input sample into the left and right output samples
// 2)
// 2) apply an attenuation AND fade to all samples (left and right)
// 3) based on the bearing relative angle to the source we will weaken and delay either the left or
// right channel of the input into the output
// 4) because one of these channels is delayed, we will need to use historical samples from
// the input stream for that delayed channel
// Mono input to stereo output (item 1 above)
int OUTPUT_SAMPLES_PER_INPUT_SAMPLE = 2;
int inputSampleCount = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / OUTPUT_SAMPLES_PER_INPUT_SAMPLE;
int maxOutputIndex = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
// attenuation and fade applied to all samples (item 2 above)
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
// determine which side is weak and delayed (item 3 above)
bool rightSideWeakAndDelayed = (bearingRelativeAngleToSource > 0.0f);
// since we're converting from mono to stereo, we'll use these two indices to step through
// the output samples. we'll increment each index independently in the loop
int leftDestinationIndex = 0;
int rightDestinationIndex = 1;
// One of our two channels will be delayed (determined below). We'll use this index to step
// through filling in our output with the historical samples for the delayed channel. (item 4 above)
int delayedChannelHistoricalAudioOutputIndex;
// All samples will be attenuated by at least this much
float leftSideAttenuation = attenuationAndFade;
float rightSideAttenuation = attenuationAndFade;
// The weak/delayed channel will be attenuated by this additional amount
float attenuationAndWeakChannelRatioAndFade = attenuationAndFade * weakChannelAmplitudeRatio;
// Now, based on the determination of which side is weak and delayed, set up our true starting point
// for our indexes, as well as the appropriate attenuation for each channel
if (rightSideWeakAndDelayed) {
delayedChannelHistoricalAudioOutputIndex = rightDestinationIndex;
rightSideAttenuation = attenuationAndWeakChannelRatioAndFade;
rightDestinationIndex += (numSamplesDelay * OUTPUT_SAMPLES_PER_INPUT_SAMPLE);
} else {
delayedChannelHistoricalAudioOutputIndex = leftDestinationIndex;
leftSideAttenuation = attenuationAndWeakChannelRatioAndFade;
leftDestinationIndex += (numSamplesDelay * OUTPUT_SAMPLES_PER_INPUT_SAMPLE);
}
// If there was a sample delay for this stream, we need to pull samples prior to the official start of the input
// and stick those samples at the beginning of the output. We only need to loop through this for the weak/delayed
// side, since the normal side is fully handled below. (item 4 above)
if (numSamplesDelay > 0) {
// TODO: delayStreamSourceSamples may be inside the last frame written if the ringbuffer is completely full
// maybe make AudioRingBuffer have 1 extra frame in its buffer
AudioRingBuffer::ConstIterator delayStreamSourceSamples = streamPopOutput - numSamplesDelay;
for (int i = 0; i < numSamplesDelay; i++) {
int16_t originalHistoricalSample = *delayStreamSourceSamples;
_preMixSamples[delayedChannelHistoricalAudioOutputIndex] += originalHistoricalSample
* attenuationAndWeakChannelRatioAndFade;
++delayStreamSourceSamples; // move our input pointer
delayedChannelHistoricalAudioOutputIndex += OUTPUT_SAMPLES_PER_INPUT_SAMPLE; // move our output sample
}
}
// Here's where we copy the MONO input to the STEREO output, and account for delay and weak side attenuation
for (int inputSample = 0; inputSample < inputSampleCount; inputSample++) {
int16_t originalSample = streamPopOutput[inputSample];
int16_t leftSideSample = originalSample * leftSideAttenuation;
int16_t rightSideSample = originalSample * rightSideAttenuation;
// since we might be delayed, don't write beyond our maxOutputIndex
if (leftDestinationIndex <= maxOutputIndex) {
_preMixSamples[leftDestinationIndex] += leftSideSample;
}
if (rightDestinationIndex <= maxOutputIndex) {
_preMixSamples[rightDestinationIndex] += rightSideSample;
}
leftDestinationIndex += OUTPUT_SAMPLES_PER_INPUT_SAMPLE;
rightDestinationIndex += OUTPUT_SAMPLES_PER_INPUT_SAMPLE;
}
} else {
int stereoDivider = streamToAdd->isStereo() ? 1 : 2;
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
for (int s = 0; s < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; s++) {
_preMixSamples[s] = glm::clamp(_preMixSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationAndFade),
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
}
}
if (!sourceIsSelf && _enableFilter && !streamToAdd->ignorePenumbraFilter()) {
const float TWO_OVER_PI = 2.0f / PI;
const float ZERO_DB = 1.0f;
const float NEGATIVE_ONE_DB = 0.891f;
const float NEGATIVE_THREE_DB = 0.708f;
const float FILTER_GAIN_AT_0 = ZERO_DB; // source is in front
const float FILTER_GAIN_AT_90 = NEGATIVE_ONE_DB; // source is incident to left or right ear
const float FILTER_GAIN_AT_180 = NEGATIVE_THREE_DB; // source is behind
const float FILTER_CUTOFF_FREQUENCY_HZ = 1000.0f;
const float penumbraFilterFrequency = FILTER_CUTOFF_FREQUENCY_HZ; // constant frequency
const float penumbraFilterSlope = NEGATIVE_THREE_DB; // constant slope
float penumbraFilterGainL;
float penumbraFilterGainR;
// variable gain calculation broken down by quadrant
if (-bearingRelativeAngleToSource < -PI_OVER_TWO && -bearingRelativeAngleToSource > -PI) {
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_0;
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_90;
} else if (-bearingRelativeAngleToSource <= PI && -bearingRelativeAngleToSource > PI_OVER_TWO) {
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
} else if (-bearingRelativeAngleToSource <= PI_OVER_TWO && -bearingRelativeAngleToSource > 0) {
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI_OVER_TWO) + FILTER_GAIN_AT_90;
penumbraFilterGainR = FILTER_GAIN_AT_0;
} else {
penumbraFilterGainL = FILTER_GAIN_AT_0;
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource) + FILTER_GAIN_AT_0;
}
if (distanceBetween < RADIUS_OF_HEAD) {
// Diminish effect if source would be inside head
penumbraFilterGainL += (1.0f - penumbraFilterGainL) * (1.0f - distanceBetween / RADIUS_OF_HEAD);
penumbraFilterGainR += (1.0f - penumbraFilterGainR) * (1.0f - distanceBetween / RADIUS_OF_HEAD);
}
bool wantDebug = false;
if (wantDebug) {
qDebug() << "gainL=" << penumbraFilterGainL
<< "gainR=" << penumbraFilterGainR
<< "angle=" << -bearingRelativeAngleToSource;
}
// Get our per listener/source data so we can get our filter
AudioFilterHSF1s& penumbraFilter = listenerNodeData->getListenerSourcePairData(streamUUID)->getPenumbraFilter();
// set the gain on both filter channels
penumbraFilter.setParameters(0, 0, AudioConstants::SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainL, penumbraFilterSlope);
penumbraFilter.setParameters(0, 1, AudioConstants::SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainR, penumbraFilterSlope);
penumbraFilter.render(_preMixSamples, _preMixSamples, AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / 2);
}
// Actually mix the _preMixSamples into the _mixSamples here.
for (int s = 0; s < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; s++) {
_mixSamples[s] = glm::clamp(_mixSamples[s] + _preMixSamples[s], AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE);
}
return 1;
return gain;
}
int AudioMixer::prepareMixForListeningNode(Node* node) {
float AudioMixer::azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
const glm::vec3& relativePosition) {
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
// Compute sample delay for the two ears to create phase panning
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
// project the rotated source position vector onto the XZ plane
rotatedSourcePosition.y = 0.0f;
static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;
if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
// produce an oriented angle about the y-axis
return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
} else {
// there is no distance between listener and source - return no azimuth
return 0;
}
}
void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
const PositionalAudioStream& streamToAdd,
const QUuid& sourceNodeID,
const AvatarAudioStream& listeningNodeStream) {
// to reduce artifacts we calculate the gain and azimuth for every source for this listener
// even if we are not going to end up mixing in this source
++_totalMixes;
// this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct
// check if this is a server echo of a source back to itself
bool isEcho = (&streamToAdd == &listeningNodeStream);
// figure out the gain for this source at the listener
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
float gain = gainForSource(streamToAdd, listeningNodeStream, relativePosition, isEcho);
// figure out the azimuth to this source at the listener
float azimuth = isEcho ? 0.0f : azimuthForSource(streamToAdd, listeningNodeStream, relativePosition);
float repeatedFrameFadeFactor = 1.0f;
static const int HRTF_DATASET_INDEX = 1;
if (!streamToAdd.lastPopSucceeded()) {
bool forceSilentBlock = true;
if (_streamSettings._repetitionWithFade && !streamToAdd.getLastPopOutput().isNull()) {
// reptition with fade is enabled, and we do have a valid previous frame to repeat
// so we mix the previously-mixed block
// this is preferable to not mixing it at all to avoid the harsh jump to silence
// we'll repeat the last block until it has a block to mix
// and we'll gradually fade that repeated block into silence.
// calculate its fade factor, which depends on how many times it's already been repeated.
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
if (repeatedFrameFadeFactor > 0.0f) {
// apply the repeatedFrameFadeFactor to the gain
gain *= repeatedFrameFadeFactor;
forceSilentBlock = false;
}
}
if (forceSilentBlock) {
// we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
// in this case we will call renderSilent with a forced silent block
// this ensures the correct tail from the previously mixed block and the correct spatialization of first block
// of any upcoming audio
if (!streamToAdd.isStereo() && !isEcho) {
// get the existing listener-source HRTF object, or create a new one
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
// this is not done for stereo streams since they do not go through the HRTF
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
hrtf.renderSilent(silentMonoBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++_hrtfSilentRenders;;
}
return;
}
}
// grab the stream from the ring buffer
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
if (streamToAdd.isStereo() || isEcho) {
// this is a stereo source or server echo so we do not pass it through the HRTF
// simply apply our calculated gain to each sample
if (streamToAdd.isStereo()) {
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
_mixedSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
}
++_manualStereoMixes;
} else {
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
_mixedSamples[i] += monoSample;
_mixedSamples[i + 1] += monoSample;
}
++_manualEchoMixes;
}
return;
}
// get the existing listener-source HRTF object, or create a new one
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
static int16_t streamBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL];
streamPopOutput.readSamples(streamBlock, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// if the frame we're about to mix is silent, simply call render silent and move on
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
// silent frame from source
// we still need to call renderSilent via the HRTF for mono source
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++_hrtfSilentRenders;
return;
}
if (_performanceThrottlingRatio > 0.0f
&& streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= _minAudibilityThreshold) {
// the mixer is struggling so we're going to drop off some streams
// we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, 0.0f,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++_hrtfStruggleRenders;
return;
}
++_hrtfRenders;
// mono stream, call the HRTF with our block and calculated azimuth and gain
hrtf.render(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
bool AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// zero out the client mix for this node
memset(_mixSamples, 0, sizeof(_mixSamples));
memset(_mixedSamples, 0, sizeof(_mixedSamples));
// loop through all other nodes that have sufficient audio to mix
int streamsMixed = 0;
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
auto streamsCopy = otherNodeClientData->getAudioStreams();
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
QUuid streamUUID = i.key();
for (auto& streamPair : streamsCopy) {
if (otherNodeStream->getType() == PositionalAudioStream::Microphone) {
streamUUID = otherNode->getUUID();
}
// clear out the pre-mix samples before filling it up with this source
memset(_preMixSamples, 0, sizeof(_preMixSamples));
auto otherNodeStream = streamPair.second;
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
streamsMixed += addStreamToMixForListeningNodeWithStream(listenerNodeData, streamUUID,
otherNodeStream, nodeAudioStream);
addStreamToMixForListeningNodeWithStream(*listenerNodeData, *otherNodeStream, otherNode->getUUID(),
*nodeAudioStream);
}
}
}
});
return streamsMixed;
int nonZeroSamples = 0;
// enumerate the mixed samples and clamp any samples outside the min/max
// also check if we ended up with a silent frame
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
_clampedSamples[i] = int16_t(glm::clamp(int(_mixedSamples[i] * AudioConstants::MAX_SAMPLE_VALUE),
AudioConstants::MIN_SAMPLE_VALUE,
AudioConstants::MAX_SAMPLE_VALUE));
if (_clampedSamples[i] != 0.0f) {
++nonZeroSamples;
}
}
return (nonZeroSamples > 0);
}
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
@ -574,6 +464,31 @@ void AudioMixer::handleMuteEnvironmentPacket(QSharedPointer<ReceivedMessage> mes
}
}
void AudioMixer::handleNodeKilled(SharedNodePointer killedNode) {
// enumerate the connected listeners to remove HRTF objects for the disconnected node
auto nodeList = DependencyManager::get<NodeList>();
nodeList->eachNode([](const SharedNodePointer& node) {
auto clientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
if (clientData) {
clientData->removeHRTFsForNode(node->getUUID());
}
});
}
void AudioMixer::removeHRTFsForFinishedInjector(const QUuid& streamID) {
auto injectorClientData = qobject_cast<AudioMixerClientData*>(sender());
if (injectorClientData) {
// enumerate the connected listeners to remove HRTF objects for the disconnected injector
auto nodeList = DependencyManager::get<NodeList>();
nodeList->eachNode([injectorClientData, &streamID](const SharedNodePointer& node){
auto listenerClientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
listenerClientData->removeHRTFForStream(injectorClientData->getNodeID(), streamID);
});
}
}
void AudioMixer::sendStatsPacket() {
static QJsonObject statsObject;
@ -581,16 +496,27 @@ void AudioMixer::sendStatsPacket() {
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
statsObject["average_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;
statsObject["avg_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;
if (_sumListeners > 0) {
statsObject["average_mixes_per_listener"] = (float) _sumMixes / (float) _sumListeners;
} else {
statsObject["average_mixes_per_listener"] = 0.0;
}
QJsonObject mixStats;
mixStats["%_hrtf_mixes"] = (_totalMixes > 0) ? (_hrtfRenders / _totalMixes) * 100.0f : 0;
mixStats["%_hrtf_silent_mixes"] = (_totalMixes > 0) ? (_hrtfSilentRenders / _totalMixes) * 100.0f : 0;
mixStats["%_hrtf_struggle_mixes"] = (_totalMixes > 0) ? (_hrtfStruggleRenders / _totalMixes) * 100.0f : 0;
mixStats["%_manual_stereo_mixes"] = (_totalMixes > 0) ? (_manualStereoMixes / _totalMixes) * 100.0f : 0;
mixStats["%_manual_echo_mixes"] = (_totalMixes > 0) ? (_manualEchoMixes / _totalMixes) * 100.0f : 0;
mixStats["total_mixes"] = _totalMixes;
mixStats["avg_mixes_per_block"] = _totalMixes / _numStatFrames;
statsObject["mix_stats"] = mixStats;
_sumListeners = 0;
_sumMixes = 0;
_hrtfRenders = 0;
_hrtfSilentRenders = 0;
_hrtfStruggleRenders = 0;
_manualStereoMixes = 0;
_manualEchoMixes = 0;
_totalMixes = 0;
_numStatFrames = 0;
QJsonObject readPendingDatagramStats;
@ -649,7 +575,7 @@ void AudioMixer::sendStatsPacket() {
});
// add the listeners object to the root object
statsObject["listeners"] = listenerStats;
statsObject["z_listeners"] = listenerStats;
// send off the stats packets
ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
@ -672,8 +598,11 @@ void AudioMixer::domainSettingsRequestComplete() {
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
nodeList->linkedDataCreateCallback = [](Node* node) {
node->setLinkedData(std::unique_ptr<AudioMixerClientData> { new AudioMixerClientData });
nodeList->linkedDataCreateCallback = [&](Node* node) {
node->setLinkedData(std::unique_ptr<NodeData> { new AudioMixerClientData(node->getUUID()) });
auto clientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
connect(clientData, &AudioMixerClientData::injectorStreamFinished, this, &AudioMixer::removeHRTFsForFinishedInjector);
};
DomainHandler& domainHandler = nodeList->getDomainHandler();
@ -777,11 +706,11 @@ void AudioMixer::broadcastMixes() {
if (node->getType() == NodeType::Agent && node->getActiveSocket()
&& nodeData->getAvatarAudioStream()) {
int streamsMixed = prepareMixForListeningNode(node.data());
bool mixHasAudio = prepareMixForListeningNode(node.data());
std::unique_ptr<NLPacket> mixPacket;
if (streamsMixed > 0) {
if (mixHasAudio) {
int mixPacketBytes = sizeof(quint16) + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);
@ -790,7 +719,7 @@ void AudioMixer::broadcastMixes() {
mixPacket->writePrimitive(sequence);
// pack mixed audio samples
mixPacket->write(reinterpret_cast<char*>(_mixSamples),
mixPacket->write(reinterpret_cast<char*>(_clampedSamples),
AudioConstants::NETWORK_FRAME_BYTES_STEREO);
} else {
int silentPacketBytes = sizeof(quint16) + sizeof(quint16);

View file

@ -13,11 +13,14 @@
#define hifi_AudioMixer_h
#include <AABox.h>
#include <AudioHRTF.h>
#include <AudioRingBuffer.h>
#include <ThreadedAssignment.h>
#include <UUIDHasher.h>
class PositionalAudioStream;
class AvatarAudioStream;
class AudioHRTF;
class AudioMixerClientData;
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
@ -30,7 +33,6 @@ class AudioMixer : public ThreadedAssignment {
public:
AudioMixer(ReceivedMessage& message);
void deleteLater() { qDebug() << "DELETE LATER CALLED?"; QObject::deleteLater(); }
public slots:
/// threaded run of assignment
void run();
@ -43,30 +45,30 @@ private slots:
void broadcastMixes();
void handleNodeAudioPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
void handleMuteEnvironmentPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
void handleNodeKilled(SharedNodePointer killedNode);
private:
void removeHRTFsForFinishedInjector(const QUuid& streamID);
private:
void domainSettingsRequestComplete();
/// adds one stream to the mix for a listening node
int addStreamToMixForListeningNodeWithStream(AudioMixerClientData* listenerNodeData,
const QUuid& streamUUID,
PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream);
void addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
const PositionalAudioStream& streamToAdd,
const QUuid& sourceNodeID,
const AvatarAudioStream& listeningNodeStream);
float gainForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
const glm::vec3& relativePosition, bool isEcho);
float azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
const glm::vec3& relativePosition);
/// prepares and sends a mix to one Node
int prepareMixForListeningNode(Node* node);
bool prepareMixForListeningNode(Node* node);
/// Send Audio Environment packet for a single node
void sendAudioEnvironmentPacket(SharedNodePointer node);
// used on a per stream basis to run the filter on before mixing, large enough to handle the historical
// data from a phase delay as well as an entire network buffer
int16_t _preMixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
// client samples capacity is larger than what will be sent to optimize mixing
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
int16_t _mixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
void perSecondActions();
bool shouldMute(float quietestFrame);
@ -78,9 +80,17 @@ private:
float _performanceThrottlingRatio;
float _attenuationPerDoublingInDistance;
float _noiseMutingThreshold;
int _numStatFrames;
int _sumListeners;
int _sumMixes;
int _numStatFrames { 0 };
int _sumListeners { 0 };
int _hrtfRenders { 0 };
int _hrtfSilentRenders { 0 };
int _hrtfStruggleRenders { 0 };
int _manualStereoMixes { 0 };
int _manualEchoMixes { 0 };
int _totalMixes { 0 };
float _mixedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
int16_t _clampedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
QHash<QString, AABox> _audioZones;
struct ZonesSettings {

View file

@ -21,34 +21,40 @@
#include "AudioMixerClientData.h"
AudioMixerClientData::AudioMixerClientData() :
_audioStreams(),
AudioMixerClientData::AudioMixerClientData(const QUuid& nodeID) :
NodeData(nodeID),
_outgoingMixedAudioSequenceNumber(0),
_downstreamAudioStreamStats()
{
}
AudioMixerClientData::~AudioMixerClientData() {
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
// delete this attached InboundAudioStream
delete i.value();
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() {
QReadLocker readLocker { &_streamsLock };
auto it = _audioStreams.find(QUuid());
if (it != _audioStreams.end()) {
return dynamic_cast<AvatarAudioStream*>(it->second.get());
}
// clean up our pair data...
foreach(PerListenerSourcePairData* pairData, _listenerSourcePairData) {
delete pairData;
}
}
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() const {
if (_audioStreams.contains(QUuid())) {
return (AvatarAudioStream*)_audioStreams.value(QUuid());
}
// no mic stream found - return NULL
return NULL;
}
void AudioMixerClientData::removeHRTFForStream(const QUuid& nodeID, const QUuid& streamID) {
auto it = _nodeSourcesHRTFMap.find(nodeID);
if (it != _nodeSourcesHRTFMap.end()) {
// erase the stream with the given ID from the given node
it->second.erase(streamID);
// is the map for this node now empty?
// if so we can remove it
if (it->second.size() == 0) {
_nodeSourcesHRTFMap.erase(it);
}
}
}
int AudioMixerClientData::parseData(ReceivedMessage& message) {
PacketType packetType = message.getType();
@ -63,7 +69,7 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
return message.getPosition();
} else {
PositionalAudioStream* matchingStream = NULL;
SharedStreamPointer matchingStream;
bool isMicStream = false;
@ -71,8 +77,10 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
|| packetType == PacketType::MicrophoneAudioNoEcho
|| packetType == PacketType::SilentAudioFrame) {
QUuid nullUUID = QUuid();
if (!_audioStreams.contains(nullUUID)) {
QWriteLocker writeLocker { &_streamsLock };
auto micStreamIt = _audioStreams.find(QUuid());
if (micStreamIt == _audioStreams.end()) {
// we don't have a mic stream yet, so add it
// read the channel flag to see if our stream is stereo or not
@ -83,11 +91,18 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
bool isStereo = channelFlag == 1;
_audioStreams.insert(nullUUID, matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getStreamSettings()));
} else {
matchingStream = _audioStreams.value(nullUUID);
auto emplaced = _audioStreams.emplace(
QUuid(),
std::unique_ptr<PositionalAudioStream> { new AvatarAudioStream(isStereo, AudioMixer::getStreamSettings()) }
);
micStreamIt = emplaced.first;
}
matchingStream = micStreamIt->second;
writeLocker.unlock();
isMicStream = true;
} else if (packetType == PacketType::InjectAudio) {
// this is injected audio
@ -99,13 +114,23 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
bool isStereo;
message.readPrimitive(&isStereo);
if (!_audioStreams.contains(streamIdentifier)) {
QWriteLocker writeLock { &_streamsLock };
auto streamIt = _audioStreams.find(streamIdentifier);
if (streamIt == _audioStreams.end()) {
// we don't have this injected stream yet, so add it
_audioStreams.insert(streamIdentifier,
matchingStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStreamSettings()));
} else {
matchingStream = _audioStreams.value(streamIdentifier);
auto emplaced = _audioStreams.emplace(
streamIdentifier,
std::unique_ptr<InjectedAudioStream> { new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStreamSettings()) }
);
streamIt = emplaced.first;
}
matchingStream = streamIt->second;
writeLock.unlock();
}
// seek to the beginning of the packet so that the next reader is in the right spot
@ -126,45 +151,38 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
}
void AudioMixerClientData::checkBuffersBeforeFrameSend() {
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
PositionalAudioStream* stream = i.value();
QWriteLocker writeLocker { &_streamsLock };
if (stream->popFrames(1, true) > 0) {
stream->updateLastPopOutputLoudnessAndTrailingLoudness();
}
}
}
auto it = _audioStreams.begin();
while (it != _audioStreams.end()) {
SharedStreamPointer stream = it->second;
void AudioMixerClientData::removeDeadInjectedStreams() {
static const int INJECTOR_INACTIVITY_USECS = 5 * USECS_PER_SECOND;
const int INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD = 100;
// if we don't have new data for an injected stream in the last INJECTOR_INACTIVITY_MSECS then
// we remove the injector from our streams
if (stream->getType() == PositionalAudioStream::Injector
&& stream->usecsSinceLastPacket() > INJECTOR_INACTIVITY_USECS) {
// this is an inactive injector, pull it from our streams
// we have this second threshold in case the injected audio is so short that the injected stream
// never even reaches its desired size, which means it will never start.
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 1000;
// first emit that it is finished so that the HRTF objects for this source can be cleaned up
emit injectorStreamFinished(it->second->getStreamIdentifier());
QHash<QUuid, PositionalAudioStream*>::Iterator i = _audioStreams.begin(), end = _audioStreams.end();
while (i != end) {
PositionalAudioStream* audioStream = i.value();
if (audioStream->getType() == PositionalAudioStream::Injector && audioStream->isStarved()) {
int notMixedThreshold = audioStream->hasStarted() ? INJECTOR_CONSECUTIVE_NOT_MIXED_AFTER_STARTED_THRESHOLD
: INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD;
if (audioStream->getConsecutiveNotMixedCount() >= notMixedThreshold) {
delete audioStream;
i = _audioStreams.erase(i);
continue;
// erase the stream to drop our ref to the shared pointer and remove it
it = _audioStreams.erase(it);
} else {
if (stream->popFrames(1, true) > 0) {
stream->updateLastPopOutputLoudnessAndTrailingLoudness();
}
++it;
}
++i;
}
}
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
// since audio stream stats packets are sent periodically, this is a good place to remove our dead injected streams.
removeDeadInjectedStreams();
auto nodeList = DependencyManager::get<NodeList>();
// The append flag is a boolean value that will be packed right after the header. The first packet sent
@ -173,9 +191,11 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// it receives a packet with an appendFlag of 0. This prevents the buildup of dead audio stream stats in the client.
quint8 appendFlag = 0;
auto streamsCopy = getAudioStreams();
// pack and send stream stats packets until all audio streams' stats are sent
int numStreamStatsRemaining = _audioStreams.size();
QHash<QUuid, PositionalAudioStream*>::ConstIterator audioStreamsIterator = _audioStreams.constBegin();
int numStreamStatsRemaining = int(streamsCopy.size());
auto it = streamsCopy.cbegin();
while (numStreamStatsRemaining > 0) {
auto statsPacket = NLPacket::create(PacketType::AudioStreamStats);
@ -192,14 +212,14 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// pack the calculated number of stream stats
for (int i = 0; i < numStreamStatsToPack; i++) {
PositionalAudioStream* stream = audioStreamsIterator.value();
PositionalAudioStream* stream = it->second.get();
stream->perSecondCallbackForUpdatingStats();
AudioStreamStats streamStats = stream->getAudioStreamStats();
statsPacket->writePrimitive(streamStats);
audioStreamsIterator++;
++it;
}
numStreamStatsRemaining -= numStreamStatsToPack;
@ -209,7 +229,7 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
}
}
QJsonObject AudioMixerClientData::getAudioStreamStats() const {
QJsonObject AudioMixerClientData::getAudioStreamStats() {
QJsonObject result;
QJsonObject downstreamStats;
@ -259,15 +279,15 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() const {
result["upstream"] = "mic unknown";
}
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
QJsonArray injectorArray;
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
if (i.value()->getType() == PositionalAudioStream::Injector) {
auto streamsCopy = getAudioStreams();
for (auto& injectorPair : streamsCopy) {
if (injectorPair.second->getType() == PositionalAudioStream::Injector) {
QJsonObject upstreamStats;
AudioStreamStats streamStats = i.value()->getAudioStreamStats();
AudioStreamStats streamStats = injectorPair.second->getAudioStreamStats();
upstreamStats["inj.desired"] = streamStats._desiredJitterBufferFrames;
upstreamStats["desired_calc"] = i.value()->getCalculatedJitterBufferFrames();
upstreamStats["desired_calc"] = injectorPair.second->getCalculatedJitterBufferFrames();
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
upstreamStats["available"] = (double) streamStats._framesAvailable;
upstreamStats["starves"] = (double) streamStats._starveCount;
@ -292,11 +312,14 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() const {
return result;
}
void AudioMixerClientData::printUpstreamDownstreamStats() const {
void AudioMixerClientData::printUpstreamDownstreamStats() {
auto streamsCopy = getAudioStreams();
// print the upstream (mic stream) stats if the mic stream exists
if (_audioStreams.contains(QUuid())) {
auto it = streamsCopy.find(QUuid());
if (it != streamsCopy.end()) {
printf("Upstream:\n");
printAudioStreamStats(_audioStreams.value(QUuid())->getAudioStreamStats());
printAudioStreamStats(it->second->getAudioStreamStats());
}
// print the downstream stats if they contain valid info
if (_downstreamAudioStreamStats._packetStreamStats._received > 0) {
@ -333,12 +356,3 @@ void AudioMixerClientData::printAudioStreamStats(const AudioStreamStats& streamS
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
}
PerListenerSourcePairData* AudioMixerClientData::getListenerSourcePairData(const QUuid& sourceUUID) {
if (!_listenerSourcePairData.contains(sourceUUID)) {
PerListenerSourcePairData* newData = new PerListenerSourcePairData();
_listenerSourcePairData[sourceUUID] = newData;
}
return _listenerSourcePairData[sourceUUID];
}

View file

@ -15,32 +15,35 @@
#include <QtCore/QJsonObject>
#include <AABox.h>
#include <AudioFormat.h> // For AudioFilterHSF1s and _penumbraFilter
#include <AudioBuffer.h> // For AudioFilterHSF1s and _penumbraFilter
#include <AudioFilter.h> // For AudioFilterHSF1s and _penumbraFilter
#include <AudioFilterBank.h> // For AudioFilterHSF1s and _penumbraFilter
#include <AudioHRTF.h>
#include <UUIDHasher.h>
#include "PositionalAudioStream.h"
#include "AvatarAudioStream.h"
class PerListenerSourcePairData {
public:
PerListenerSourcePairData() {
_penumbraFilter.initialize(AudioConstants::SAMPLE_RATE, AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / 2);
};
AudioFilterHSF1s& getPenumbraFilter() { return _penumbraFilter; }
private:
AudioFilterHSF1s _penumbraFilter;
};
class AudioMixerClientData : public NodeData {
Q_OBJECT
public:
AudioMixerClientData();
~AudioMixerClientData();
const QHash<QUuid, PositionalAudioStream*>& getAudioStreams() const { return _audioStreams; }
AvatarAudioStream* getAvatarAudioStream() const;
AudioMixerClientData(const QUuid& nodeID);
using SharedStreamPointer = std::shared_ptr<PositionalAudioStream>;
using AudioStreamMap = std::unordered_map<QUuid, SharedStreamPointer>;
// locks the mutex to make a copy
AudioStreamMap getAudioStreams() { QReadLocker readLock { &_streamsLock }; return _audioStreams; }
AvatarAudioStream* getAvatarAudioStream();
// the following methods should be called from the AudioMixer assignment thread ONLY
// they are not thread-safe
// returns a new or existing HRTF object for the given stream from the given node
AudioHRTF& hrtfForStream(const QUuid& nodeID, const QUuid& streamID = QUuid()) { return _nodeSourcesHRTFMap[nodeID][streamID]; }
// remove HRTFs for all sources from this node
void removeHRTFsForNode(const QUuid& nodeID) { _nodeSourcesHRTFMap.erase(nodeID); }
// removes an AudioHRTF object for a given stream
void removeHRTFForStream(const QUuid& nodeID, const QUuid& streamID = QUuid());
int parseData(ReceivedMessage& message);
@ -48,24 +51,28 @@ public:
void removeDeadInjectedStreams();
QJsonObject getAudioStreamStats() const;
QJsonObject getAudioStreamStats();
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode);
void incrementOutgoingMixedAudioSequenceNumber() { _outgoingMixedAudioSequenceNumber++; }
quint16 getOutgoingSequenceNumber() const { return _outgoingMixedAudioSequenceNumber; }
void printUpstreamDownstreamStats() const;
void printUpstreamDownstreamStats();
signals:
void injectorStreamFinished(const QUuid& streamIdentifier);
PerListenerSourcePairData* getListenerSourcePairData(const QUuid& sourceUUID);
private:
void printAudioStreamStats(const AudioStreamStats& streamStats) const;
private:
QHash<QUuid, PositionalAudioStream*> _audioStreams; // mic stream stored under key of null UUID
QReadWriteLock _streamsLock;
AudioStreamMap _audioStreams; // microphone stream from avatar is stored under key of null UUID
// TODO: how can we prune this hash when a stream is no longer present?
QHash<QUuid, PerListenerSourcePairData*> _listenerSourcePairData;
using HRTFMap = std::unordered_map<QUuid, AudioHRTF>;
using NodeSourcesHRTFMap = std::unordered_map<QUuid, HRTFMap>;
NodeSourcesHRTFMap _nodeSourcesHRTFMap;
quint16 _outgoingMixedAudioSequenceNumber;

View file

@ -52,7 +52,7 @@ function setupMenus() {
}
if (!Menu.menuExists(ENTITIES_MENU)) {
Menu.addMenu(ENTITIES_MENU);
// NOTE: these menu items aren't currently working. I've temporarily removed them. Will add them back once we
// rewire these to work
/*
@ -66,20 +66,20 @@ function setupMenus() {
Menu.addMenuItem({ menuName: "Developer > Entities", menuItemName: "Disable Light Entities", isCheckable: true, isChecked: false });
*/
}
if (!Menu.menuExists(RENDER_MENU)) {
Menu.addMenu(RENDER_MENU);
createdRenderMenu = true;
}
if (!Menu.menuItemExists(RENDER_MENU, ENTITIES_ITEM)) {
Menu.addMenuItem({ menuName: RENDER_MENU, menuItemName: ENTITIES_ITEM, isCheckable: true, isChecked: Scene.shouldRenderEntities })
}
if (!Menu.menuItemExists(RENDER_MENU, AVATARS_ITEM)) {
Menu.addMenuItem({ menuName: RENDER_MENU, menuItemName: AVATARS_ITEM, isCheckable: true, isChecked: Scene.shouldRenderAvatars })
}
if (!Menu.menuExists(AUDIO_MENU)) {
Menu.addMenu(AUDIO_MENU);
}
@ -114,14 +114,6 @@ Menu.menuItemEvent.connect(function (menuItem) {
Scene.shouldRenderEntities = Menu.isOptionChecked(ENTITIES_ITEM);
} else if (menuItem == AVATARS_ITEM) {
Scene.shouldRenderAvatars = Menu.isOptionChecked(AVATARS_ITEM);
} else if (menuItem == AUDIO_SOURCE_INJECT && !createdGeneratedAudioMenu) {
Audio.injectGeneratedNoise(Menu.isOptionChecked(AUDIO_SOURCE_INJECT));
} else if (menuItem == AUDIO_SOURCE_PINK_NOISE && !createdGeneratedAudioMenu) {
Audio.selectPinkNoise();
Menu.setIsOptionChecked(AUDIO_SOURCE_SINE_440, false);
} else if (menuItem == AUDIO_SOURCE_SINE_440 && !createdGeneratedAudioMenu) {
Audio.selectSine440();
Menu.setIsOptionChecked(AUDIO_SOURCE_PINK_NOISE, false);
} else if (menuItem == AUDIO_STEREO_INPUT) {
Audio.setStereoInput(Menu.isOptionChecked(AUDIO_STEREO_INPUT))
} else if (AUDIO_LISTENER_OPTIONS.indexOf(menuItem) !== -1) {
@ -145,14 +137,14 @@ Scene.shouldRenderEntitiesChanged.connect(function(shouldRenderEntities) {
function scriptEnding() {
Menu.removeMenu(ENTITIES_MENU);
if (createdRenderMenu) {
Menu.removeMenu(RENDER_MENU);
} else {
Menu.removeMenuItem(RENDER_MENU, ENTITIES_ITEM);
Menu.removeMenuItem(RENDER_MENU, AVATARS_ITEM);
}
if (createdGeneratedAudioMenu) {
Audio.injectGeneratedNoise(false);
Menu.removeMenuItem(AUDIO_MENU, AUDIO_SOURCE_INJECT);

View file

@ -94,14 +94,11 @@ AudioClient::AudioClient() :
_shouldEchoLocally(false),
_shouldEchoToServer(false),
_isNoiseGateEnabled(true),
_audioSourceInjectEnabled(false),
_reverb(false),
_reverbOptions(&_scriptReverbOptions),
_inputToNetworkResampler(NULL),
_networkToOutputResampler(NULL),
_loopbackResampler(NULL),
_noiseSourceEnabled(false),
_toneSourceEnabled(true),
_outgoingAvatarAudioSequenceNumber(0),
_audioOutputIODevice(_receivedAudioStream, this),
_stats(&_receivedAudioStream),
@ -139,10 +136,6 @@ AudioClient::~AudioClient() {
void AudioClient::reset() {
_receivedAudioStream.reset();
_stats.reset();
_noiseSource.reset();
_toneSource.reset();
_sourceGain.reset();
_inputGain.reset();
_sourceReverb.reset();
_listenerReverb.reset();
}
@ -432,26 +425,9 @@ void AudioClient::start() {
qCDebug(audioclient) << "Unable to set up audio output because of a problem with output format.";
qCDebug(audioclient) << "The closest format available is" << outputDeviceInfo.nearestFormat(_desiredOutputFormat);
}
if (_audioInput) {
_inputFrameBuffer.initialize( _inputFormat.channelCount(), _audioInput->bufferSize() * 8 );
}
_inputGain.initialize();
_sourceGain.initialize();
_noiseSource.initialize();
_toneSource.initialize();
_sourceGain.setParameters(0.05f, 0.0f);
_inputGain.setParameters(1.0f, 0.0f);
}
void AudioClient::stop() {
_inputFrameBuffer.finalize();
_inputGain.finalize();
_sourceGain.finalize();
_noiseSource.finalize();
_toneSource.finalize();
// "switch" to invalid devices in order to shut down the state
switchInputToAudioDevice(QAudioDeviceInfo());
switchOutputToAudioDevice(QAudioDeviceInfo());
@ -705,24 +681,6 @@ void AudioClient::handleAudioInput() {
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
QByteArray inputByteArray = _inputDevice->readAll();
// Add audio source injection if enabled
if (!_muted && _audioSourceInjectEnabled) {
int16_t* inputFrameData = (int16_t*)inputByteArray.data();
const uint32_t inputFrameCount = inputByteArray.size() / sizeof(int16_t);
_inputFrameBuffer.copyFrames(1, inputFrameCount, inputFrameData, false /*copy in*/);
#if ENABLE_INPUT_GAIN
_inputGain.render(_inputFrameBuffer); // input/mic gain+mute
#endif
if (_toneSourceEnabled) { // sine generator
_toneSource.render(_inputFrameBuffer);
} else if(_noiseSourceEnabled) { // pink noise generator
_noiseSource.render(_inputFrameBuffer);
}
_sourceGain.render(_inputFrameBuffer); // post gain
_inputFrameBuffer.copyFrames(1, inputFrameCount, inputFrameData, true /*copy out*/);
}
handleLocalEchoAndReverb(inputByteArray);
@ -757,12 +715,12 @@ void AudioClient::handleAudioInput() {
_inputFormat, _desiredInputFormat);
// Remove DC offset
if (!_isStereoInput && !_audioSourceInjectEnabled) {
if (!_isStereoInput) {
_inputGate.removeDCOffset(networkAudioSamples, numNetworkSamples);
}
// only impose the noise gate and perform tone injection if we are sending mono audio
if (!_isStereoInput && !_audioSourceInjectEnabled && _isNoiseGateEnabled) {
if (!_isStereoInput && _isNoiseGateEnabled) {
_inputGate.gateSamples(networkAudioSamples, numNetworkSamples);
// if we performed the noise gate we can get values from it instead of enumerating the samples again
@ -886,19 +844,6 @@ void AudioClient::setIsStereoInput(bool isStereoInput) {
}
}
void AudioClient::enableAudioSourceInject(bool enable) {
_audioSourceInjectEnabled = enable;
}
void AudioClient::selectAudioSourcePinkNoise() {
_noiseSourceEnabled = true;
_toneSourceEnabled = false;
}
void AudioClient::selectAudioSourceSine440() {
_toneSourceEnabled = true;
_noiseSourceEnabled = false;
}
bool AudioClient::outputLocalInjector(bool isStereo, AudioInjector* injector) {
if (injector->getLocalBuffer()) {

View file

@ -25,13 +25,7 @@
#include <QtMultimedia/QAudioInput>
#include <AbstractAudioInterface.h>
#include <AudioBuffer.h>
#include <AudioEffectOptions.h>
#include <AudioFormat.h>
#include <AudioGain.h>
#include <AudioRingBuffer.h>
#include <AudioSourceTone.h>
#include <AudioSourceNoise.h>
#include <AudioStreamStats.h>
#include <DependencyManager.h>
@ -152,10 +146,6 @@ public slots:
void audioMixerKilled();
void toggleMute();
virtual void enableAudioSourceInject(bool enable);
virtual void selectAudioSourcePinkNoise();
virtual void selectAudioSourceSine440();
virtual void setIsStereoInput(bool stereo);
void toggleAudioNoiseReduction() { _isNoiseGateEnabled = !_isNoiseGateEnabled; }
@ -256,7 +246,6 @@ private:
bool _shouldEchoLocally;
bool _shouldEchoToServer;
bool _isNoiseGateEnabled;
bool _audioSourceInjectEnabled;
bool _reverb;
AudioEffectOptions _scriptReverbOptions;
@ -284,23 +273,6 @@ private:
int calculateNumberOfFrameSamples(int numBytes) const;
float calculateDeviceToNetworkInputRatio() const;
// Input framebuffer
AudioBufferFloat32 _inputFrameBuffer;
// Input gain
AudioGain _inputGain;
// Post tone/pink noise generator gain
AudioGain _sourceGain;
// Pink noise source
bool _noiseSourceEnabled;
AudioSourcePinkNoise _noiseSource;
// Tone source
bool _toneSourceEnabled;
AudioSourceTone _toneSource;
quint16 _outgoingAvatarAudioSequenceNumber;
AudioOutputIODevice _audioOutputIODevice;

View file

@ -33,10 +33,6 @@ public:
public slots:
virtual bool outputLocalInjector(bool isStereo, AudioInjector* injector) = 0;
virtual void enableAudioSourceInject(bool enable) = 0;
virtual void selectAudioSourcePinkNoise() = 0;
virtual void selectAudioSourceSine440() = 0;
virtual void setIsStereoInput(bool stereo) = 0;
};

View file

@ -1,462 +0,0 @@
//
// AudioBuffer.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/29/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioBuffer_h
#define hifi_AudioBuffer_h
#include <typeinfo>
#include <QDebug>
#include "AudioFormat.h"
template< typename T >
class AudioFrameBuffer {
protected:
uint32_t _channelCount;
uint32_t _channelCountMax;
uint32_t _frameCount;
uint32_t _frameCountMax;
T** _frameBuffer;
void allocateFrames();
void deallocateFrames();
public:
AudioFrameBuffer();
AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount);
virtual ~AudioFrameBuffer();
void initialize(const uint32_t channelCount, const uint32_t frameCount);
void finalize();
T**& getFrameData();
uint32_t getChannelCount();
uint32_t getFrameCount();
template< typename S >
void copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut = false);
void zeroFrames();
};
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer() :
_channelCount(0),
_frameCount(0),
_frameCountMax(0),
_frameBuffer(NULL) {
}
template< typename T >
AudioFrameBuffer< T >::AudioFrameBuffer(const uint32_t channelCount, const uint32_t frameCount) :
_channelCount(channelCount),
_channelCountMax(channelCount),
_frameCount(frameCount),
_frameCountMax(frameCount),
_frameBuffer(NULL) {
allocateFrames();
}
template< typename T >
AudioFrameBuffer< T >::~AudioFrameBuffer() {
finalize();
}
template< typename T >
void AudioFrameBuffer< T >::allocateFrames() {
_frameBuffer = new T*[_channelCountMax];
for (uint32_t i = 0; i < _channelCountMax; ++i) {
_frameBuffer[i] = new T[_frameCountMax];
}
}
template< typename T >
void AudioFrameBuffer< T >::deallocateFrames() {
if (_frameBuffer) {
for (uint32_t i = 0; i < _channelCountMax; ++i) {
delete[] _frameBuffer[i];
}
delete[] _frameBuffer;
}
_frameBuffer = NULL;
}
template< typename T >
void AudioFrameBuffer< T >::initialize(const uint32_t channelCount, const uint32_t frameCount) {
if (_frameBuffer) {
finalize();
}
_channelCount = channelCount;
_channelCountMax = channelCount;
_frameCount = frameCount;
_frameCountMax = frameCount;
allocateFrames();
}
template< typename T >
void AudioFrameBuffer< T >::finalize() {
deallocateFrames();
_channelCount = 0;
_channelCountMax = 0;
_frameCount = 0;
_frameCountMax = 0;
}
template< typename T >
inline T**& AudioFrameBuffer< T >::getFrameData() {
return _frameBuffer;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getChannelCount() {
return _channelCount;
}
template< typename T >
inline uint32_t AudioFrameBuffer< T >::getFrameCount() {
return _frameCount;
}
template< typename T >
inline void AudioFrameBuffer< T >::zeroFrames() {
if (!_frameBuffer) {
return;
}
for (uint32_t i = 0; i < _channelCountMax; ++i) {
memset(_frameBuffer[i], 0, sizeof(T)*_frameCountMax);
}
}
template< typename T >
template< typename S >
inline void AudioFrameBuffer< T >::copyFrames(uint32_t channelCount, const uint32_t frameCount, S* frames, const bool copyOut) {
if ( !_frameBuffer || !frames) {
return;
}
if (channelCount <=_channelCountMax && frameCount <=_frameCountMax) {
// We always allow copying fewer frames than we have allocated
_frameCount = frameCount;
_channelCount = channelCount;
} else {
qDebug() << "Audio framing error: _channelCount="
<< _channelCount
<< "channelCountMax="
<< _channelCountMax
<< "_frameCount="
<< _frameCount
<< "frameCountMax="
<< _frameCountMax;
_channelCount = std::min(_channelCount,_channelCountMax);
_frameCount = std::min(_frameCount,_frameCountMax);
}
bool frameAlignment16 = (_frameCount & 0x0F) == 0;
if (copyOut) {
S* dst = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same, just copy out
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = _frameBuffer[0][i + 0];
*dst++ = _frameBuffer[0][i + 1];
*dst++ = _frameBuffer[0][i + 2];
*dst++ = _frameBuffer[0][i + 3];
*dst++ = _frameBuffer[0][i + 4];
*dst++ = _frameBuffer[0][i + 5];
*dst++ = _frameBuffer[0][i + 6];
*dst++ = _frameBuffer[0][i + 7];
*dst++ = _frameBuffer[0][i + 8];
*dst++ = _frameBuffer[0][i + 9];
*dst++ = _frameBuffer[0][i + 10];
*dst++ = _frameBuffer[0][i + 11];
*dst++ = _frameBuffer[0][i + 12];
*dst++ = _frameBuffer[0][i + 13];
*dst++ = _frameBuffer[0][i + 14];
*dst++ = _frameBuffer[0][i + 15];
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = _frameBuffer[0][i + 0];
*dst++ = _frameBuffer[1][i + 0];
*dst++ = _frameBuffer[0][i + 1];
*dst++ = _frameBuffer[1][i + 1];
*dst++ = _frameBuffer[0][i + 2];
*dst++ = _frameBuffer[1][i + 2];
*dst++ = _frameBuffer[0][i + 3];
*dst++ = _frameBuffer[1][i + 3];
*dst++ = _frameBuffer[0][i + 4];
*dst++ = _frameBuffer[1][i + 4];
*dst++ = _frameBuffer[0][i + 5];
*dst++ = _frameBuffer[1][i + 5];
*dst++ = _frameBuffer[0][i + 6];
*dst++ = _frameBuffer[1][i + 6];
*dst++ = _frameBuffer[0][i + 7];
*dst++ = _frameBuffer[1][i + 7];
*dst++ = _frameBuffer[0][i + 8];
*dst++ = _frameBuffer[1][i + 8];
*dst++ = _frameBuffer[0][i + 9];
*dst++ = _frameBuffer[1][i + 9];
*dst++ = _frameBuffer[0][i + 10];
*dst++ = _frameBuffer[1][i + 10];
*dst++ = _frameBuffer[0][i + 11];
*dst++ = _frameBuffer[1][i + 11];
*dst++ = _frameBuffer[0][i + 12];
*dst++ = _frameBuffer[1][i + 12];
*dst++ = _frameBuffer[0][i + 13];
*dst++ = _frameBuffer[1][i + 13];
*dst++ = _frameBuffer[0][i + 14];
*dst++ = _frameBuffer[1][i + 14];
*dst++ = _frameBuffer[0][i + 15];
*dst++ = _frameBuffer[1][i + 15];
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = _frameBuffer[j][i];
}
}
}
} else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) { // source and destination aare not the same, convert from float32_t to int16_t and copy out
const int scale = (1 << ((8 * sizeof(S)) - 1));
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = (S)(_frameBuffer[0][i + 0] * scale);
*dst++ = (S)(_frameBuffer[0][i + 1] * scale);
*dst++ = (S)(_frameBuffer[0][i + 2] * scale);
*dst++ = (S)(_frameBuffer[0][i + 3] * scale);
*dst++ = (S)(_frameBuffer[0][i + 4] * scale);
*dst++ = (S)(_frameBuffer[0][i + 5] * scale);
*dst++ = (S)(_frameBuffer[0][i + 6] * scale);
*dst++ = (S)(_frameBuffer[0][i + 7] * scale);
*dst++ = (S)(_frameBuffer[0][i + 8] * scale);
*dst++ = (S)(_frameBuffer[0][i + 9] * scale);
*dst++ = (S)(_frameBuffer[0][i + 10] * scale);
*dst++ = (S)(_frameBuffer[0][i + 11] * scale);
*dst++ = (S)(_frameBuffer[0][i + 12] * scale);
*dst++ = (S)(_frameBuffer[0][i + 13] * scale);
*dst++ = (S)(_frameBuffer[0][i + 14] * scale);
*dst++ = (S)(_frameBuffer[0][i + 15] * scale);
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
*dst++ = (S)(_frameBuffer[0][i + 0] * scale);
*dst++ = (S)(_frameBuffer[1][i + 0] * scale);
*dst++ = (S)(_frameBuffer[0][i + 1] * scale);
*dst++ = (S)(_frameBuffer[1][i + 1] * scale);
*dst++ = (S)(_frameBuffer[0][i + 2] * scale);
*dst++ = (S)(_frameBuffer[1][i + 2] * scale);
*dst++ = (S)(_frameBuffer[0][i + 3] * scale);
*dst++ = (S)(_frameBuffer[1][i + 3] * scale);
*dst++ = (S)(_frameBuffer[0][i + 4] * scale);
*dst++ = (S)(_frameBuffer[1][i + 4] * scale);
*dst++ = (S)(_frameBuffer[0][i + 5] * scale);
*dst++ = (S)(_frameBuffer[1][i + 5] * scale);
*dst++ = (S)(_frameBuffer[0][i + 6] * scale);
*dst++ = (S)(_frameBuffer[1][i + 6] * scale);
*dst++ = (S)(_frameBuffer[0][i + 7] * scale);
*dst++ = (S)(_frameBuffer[1][i + 7] * scale);
*dst++ = (S)(_frameBuffer[0][i + 8] * scale);
*dst++ = (S)(_frameBuffer[1][i + 8] * scale);
*dst++ = (S)(_frameBuffer[0][i + 9] * scale);
*dst++ = (S)(_frameBuffer[1][i + 9] * scale);
*dst++ = (S)(_frameBuffer[0][i + 10] * scale);
*dst++ = (S)(_frameBuffer[1][i + 10] * scale);
*dst++ = (S)(_frameBuffer[0][i + 11] * scale);
*dst++ = (S)(_frameBuffer[1][i + 11] * scale);
*dst++ = (S)(_frameBuffer[0][i + 12] * scale);
*dst++ = (S)(_frameBuffer[1][i + 12] * scale);
*dst++ = (S)(_frameBuffer[0][i + 13] * scale);
*dst++ = (S)(_frameBuffer[1][i + 13] * scale);
*dst++ = (S)(_frameBuffer[0][i + 14] * scale);
*dst++ = (S)(_frameBuffer[1][i + 14] * scale);
*dst++ = (S)(_frameBuffer[0][i + 15] * scale);
*dst++ = (S)(_frameBuffer[1][i + 15] * scale);
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*dst++ = (S)(_frameBuffer[j][i] * scale);
}
}
}
} else {
assert(0); // currently unsupported conversion
}
}
} else { // copyIn
S* src = frames;
if(typeid(T) == typeid(S)) { // source and destination types are the same, copy in
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = *src++;
_frameBuffer[0][i + 1] = *src++;
_frameBuffer[0][i + 2] = *src++;
_frameBuffer[0][i + 3] = *src++;
_frameBuffer[0][i + 4] = *src++;
_frameBuffer[0][i + 5] = *src++;
_frameBuffer[0][i + 6] = *src++;
_frameBuffer[0][i + 7] = *src++;
_frameBuffer[0][i + 8] = *src++;
_frameBuffer[0][i + 9] = *src++;
_frameBuffer[0][i + 10] = *src++;
_frameBuffer[0][i + 11] = *src++;
_frameBuffer[0][i + 12] = *src++;
_frameBuffer[0][i + 13] = *src++;
_frameBuffer[0][i + 14] = *src++;
_frameBuffer[0][i + 15] = *src++;
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = *src++;
_frameBuffer[1][i + 0] = *src++;
_frameBuffer[0][i + 1] = *src++;
_frameBuffer[1][i + 1] = *src++;
_frameBuffer[0][i + 2] = *src++;
_frameBuffer[1][i + 2] = *src++;
_frameBuffer[0][i + 3] = *src++;
_frameBuffer[1][i + 3] = *src++;
_frameBuffer[0][i + 4] = *src++;
_frameBuffer[1][i + 4] = *src++;
_frameBuffer[0][i + 5] = *src++;
_frameBuffer[1][i + 5] = *src++;
_frameBuffer[0][i + 6] = *src++;
_frameBuffer[1][i + 6] = *src++;
_frameBuffer[0][i + 7] = *src++;
_frameBuffer[1][i + 7] = *src++;
_frameBuffer[0][i + 8] = *src++;
_frameBuffer[1][i + 8] = *src++;
_frameBuffer[0][i + 9] = *src++;
_frameBuffer[1][i + 9] = *src++;
_frameBuffer[0][i + 10] = *src++;
_frameBuffer[1][i + 10] = *src++;
_frameBuffer[0][i + 11] = *src++;
_frameBuffer[1][i + 11] = *src++;
_frameBuffer[0][i + 12] = *src++;
_frameBuffer[1][i + 12] = *src++;
_frameBuffer[0][i + 13] = *src++;
_frameBuffer[1][i + 13] = *src++;
_frameBuffer[0][i + 14] = *src++;
_frameBuffer[1][i + 14] = *src++;
_frameBuffer[0][i + 15] = *src++;
_frameBuffer[1][i + 15] = *src++;
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = *src++;
}
}
}
} else {
if(typeid(T) == typeid(float32_t) &&
typeid(S) == typeid(int16_t)) { // source and destination aare not the same, convert from int16_t to float32_t and copy in
const int scale = (1 << ((8 * sizeof(S)) - 1));
if (frameAlignment16 && (_channelCount == 1 || _channelCount == 2)) {
if (_channelCount == 1) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 15] = ((T)(*src++)) / scale;
}
} else if (_channelCount == 2) {
for (uint32_t i = 0; i < _frameCount; i += 16) {
_frameBuffer[0][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 0] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 1] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 2] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 3] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 4] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 5] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 6] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 7] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 8] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 9] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 10] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 11] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 12] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 13] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 14] = ((T)(*src++)) / scale;
_frameBuffer[0][i + 15] = ((T)(*src++)) / scale;
_frameBuffer[1][i + 15] = ((T)(*src++)) / scale;
}
}
} else {
for (uint32_t i = 0; i < _frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_frameBuffer[j][i] = ((T)(*src++)) / scale;
}
}
}
} else {
assert(0); // currently unsupported conversion
}
}
}
}
typedef AudioFrameBuffer< float32_t > AudioBufferFloat32;
typedef AudioFrameBuffer< int32_t > AudioBufferSInt32;
#endif // hifi_AudioBuffer_h

View file

@ -1,315 +0,0 @@
//
// AudioFilter.h
// hifi
//
// Created by Craig Hansen-Sturm on 8/9/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFilter_h
#define hifi_AudioFilter_h
#include <NumericalConstants.h>
// Implements a standard biquad filter in "Direct Form 1"
// Reference http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
//
class AudioBiquad {
//
// private data
//
float32_t _a0; // gain
float32_t _a1; // feedforward 1
float32_t _a2; // feedforward 2
float32_t _b1; // feedback 1
float32_t _b2; // feedback 2
float32_t _xm1;
float32_t _xm2;
float32_t _ym1;
float32_t _ym2;
public:
//
// ctor/dtor
//
AudioBiquad() :
_xm1(0.),
_xm2(0.),
_ym1(0.),
_ym2(0.) {
setParameters(0.,0.,0.,0.,0.);
}
~AudioBiquad() {
}
//
// public interface
//
void setParameters(const float32_t a0, const float32_t a1, const float32_t a2, const float32_t b1, const float32_t b2) {
_a0 = a0; _a1 = a1; _a2 = a2; _b1 = b1; _b2 = b2;
}
void getParameters(float32_t& a0, float32_t& a1, float32_t& a2, float32_t& b1, float32_t& b2) {
a0 = _a0; a1 = _a1; a2 = _a2; b1 = _b1; b2 = _b2;
}
void render(const float32_t* in, float32_t* out, const uint32_t frames) {
float32_t x;
float32_t y;
for (uint32_t i = 0; i < frames; ++i) {
x = *in++;
// biquad
y = (_a0 * x)
+ (_a1 * _xm1)
+ (_a2 * _xm2)
- (_b1 * _ym1)
- (_b2 * _ym2);
y = (y >= -EPSILON && y < EPSILON) ? 0.0f : y; // clamp to 0
// update delay line
_xm2 = _xm1;
_xm1 = x;
_ym2 = _ym1;
_ym1 = y;
*out++ = y;
}
}
void reset() {
_xm1 = _xm2 = _ym1 = _ym2 = 0.;
}
};
//
// Implements common base class interface for all Audio Filter Objects
//
template< class T >
class AudioFilter {
protected:
//
// data
//
AudioBiquad _kernel;
float32_t _sampleRate;
float32_t _frequency;
float32_t _gain;
float32_t _slope;
//
// helpers
//
void updateKernel() {
static_cast<T*>(this)->updateKernel();
}
public:
//
// ctor/dtor
//
AudioFilter() {
setParameters(0.,0.,0.,0.);
}
~AudioFilter() {
}
//
// public interface
//
void setParameters(const float32_t sampleRate, const float32_t frequency, const float32_t gain, const float32_t slope) {
_sampleRate = std::max(sampleRate, 1.0f);
_frequency = std::max(frequency, 2.0f);
_gain = std::max(gain, 0.0f);
_slope = std::max(slope, 0.00001f);
updateKernel();
}
void getParameters(float32_t& sampleRate, float32_t& frequency, float32_t& gain, float32_t& slope) {
sampleRate = _sampleRate; frequency = _frequency; gain = _gain; slope = _slope;
}
void render(const float32_t* in, float32_t* out, const uint32_t frames) {
_kernel.render(in,out,frames);
}
void reset() {
_kernel.reset();
}
};
//
// Implements a low-shelf filter using a biquad
//
class AudioFilterLSF : public AudioFilter< AudioFilterLSF >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t a = _gain;
const float32_t aAdd1 = a + 1.0f;
const float32_t aSub1 = a - 1.0f;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float32_t aSub1TimesCosOmega = aSub1 * cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) - (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = 2*A*( (A-1) - (A+1)*cos(w0) )
b2 = A*( (A+1) - (A-1)*cos(w0) - 2*sqrt(A)*alpha )
a0 = (A+1) + (A-1)*cos(w0) + 2*sqrt(A)*alpha
a1 = -2*( (A-1) + (A+1)*cos(w0) )
a2 = (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float32_t b0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta) * a;
const float32_t b1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + 0.0f) * a;
const float32_t b2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta) * a;
const float32_t a0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta);
const float32_t a1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + 0.0f);
const float32_t a2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta);
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a hi-shelf filter using a biquad
//
class AudioFilterHSF : public AudioFilter< AudioFilterHSF >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t a = _gain;
const float32_t aAdd1 = a + 1.0f;
const float32_t aSub1 = a - 1.0f;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float32_t aSub1TimesCosOmega = aSub1 * cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) + (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = -2*A*( (A-1) + (A+1)*cos(w0) )
b2 = A*( (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha )
a0 = (A+1) - (A-1)*cos(w0) + 2*sqrt(A)*alpha
a1 = 2*( (A-1) - (A+1)*cos(w0) )
a2 = (A+1) - (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float32_t b0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta) * a;
const float32_t b1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + 0.0f) * a;
const float32_t b2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta) * a;
const float32_t a0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta);
const float32_t a1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + 0.0f);
const float32_t a2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta);
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a all-pass filter using a biquad
//
class AudioFilterALL : public AudioFilter< AudioFilterALL >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t cosOmega = cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
/*
b0 = 1 - alpha
b1 = -2*cos(w0)
b2 = 1 + alpha
a0 = 1 + alpha
a1 = -2*cos(w0)
a2 = 1 - alpha
*/
const float32_t b0 = +1.0f - alpha;
const float32_t b1 = -2.0f * cosOmega;
const float32_t b2 = +1.0f + alpha;
const float32_t a0 = +1.0f + alpha;
const float32_t a1 = -2.0f * cosOmega;
const float32_t a2 = +1.0f - alpha;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a single-band parametric EQ using a biquad "peaking EQ" configuration
//
class AudioFilterPEQ : public AudioFilter< AudioFilterPEQ >
{
public:
//
// helpers
//
void updateKernel() {
const float32_t a = _gain;
const float32_t omega = TWO_PI * _frequency / _sampleRate;
const float32_t cosOmega = cosf(omega);
const float32_t alpha = 0.5f * sinf(omega) / _slope;
const float32_t alphaMulA = alpha * a;
const float32_t alphaDivA = alpha / a;
/*
b0 = 1 + alpha*A
b1 = -2*cos(w0)
b2 = 1 - alpha*A
a0 = 1 + alpha/A
a1 = -2*cos(w0)
a2 = 1 - alpha/A
*/
const float32_t b0 = +1.0f + alphaMulA;
const float32_t b1 = -2.0f * cosOmega;
const float32_t b2 = +1.0f - alphaMulA;
const float32_t a0 = +1.0f + alphaDivA;
const float32_t a1 = -2.0f * cosOmega;
const float32_t a2 = +1.0f - alphaDivA;
const float32_t normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
#endif // hifi_AudioFilter_h

View file

@ -1,44 +0,0 @@
//
// AudioFilterBank.cpp
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "AudioFilterBank.h"
template<>
AudioFilterLSF1s::FilterParameter
AudioFilterLSF1s::_profiles[ AudioFilterLSF1s::_profileCount ][ AudioFilterLSF1s::_filterCount ] = {
// Freq Gain Slope
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterHSF1s::FilterParameter
AudioFilterHSF1s::_profiles[ AudioFilterHSF1s::_profileCount ][ AudioFilterHSF1s::_filterCount ] = {
// Freq Gain Slope
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterPEQ1s::FilterParameter
AudioFilterPEQ1s::_profiles[ AudioFilterPEQ1s::_profileCount ][ AudioFilterPEQ1s::_filterCount ] = {
// Freq Gain Q
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterPEQ3m::FilterParameter
AudioFilterPEQ3m::_profiles[ AudioFilterPEQ3m::_profileCount ][ AudioFilterPEQ3m::_filterCount ] = {
// Freq Gain Q Freq Gain Q Freq Gain Q
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // flat response (default)
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 0.1f, 1.0f } }, // treble cut
{ { 300.0f, 0.1f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // bass cut
{ { 300.0f, 1.5f, 0.71f }, { 1000.0f, 0.5f, 1.0f }, { 4000.0f, 1.50f, 0.71f } } // smiley curve
};

View file

@ -1,188 +0,0 @@
//
// AudioFilterBank.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/23/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFilterBank_h
#define hifi_AudioFilterBank_h
#include <stdint.h>
#include "AudioBuffer.h"
#include "AudioFilter.h"
#include "AudioFormat.h"
//
// Helper/convenience class that implements a bank of Filter objects
//
template< typename T, const uint32_t N, const uint32_t C >
class AudioFilterBank {
//
// types
//
struct FilterParameter {
float32_t _p1;
float32_t _p2;
float32_t _p3;
};
//
// private static data
//
static const uint32_t _filterCount = N;
static const uint32_t _channelCount = C;
static const uint32_t _profileCount = 4;
static FilterParameter _profiles[ _profileCount ][ _filterCount ];
//
// private data
//
T _filters[ _filterCount ][ _channelCount ];
float32_t* _buffer[ _channelCount ];
float32_t _sampleRate;
uint32_t _frameCount;
public:
//
// ctor/dtor
//
AudioFilterBank() :
_sampleRate(0.0f),
_frameCount(0) {
for (uint32_t i = 0; i < _channelCount; ++i) {
_buffer[ i ] = NULL;
}
}
~AudioFilterBank() {
finalize();
}
//
// public interface
//
void initialize(const float32_t sampleRate, const uint32_t frameCount = 0) {
finalize();
for (uint32_t i = 0; i < _channelCount; ++i) {
_buffer[i] = (float32_t*)malloc(frameCount * sizeof(float32_t));
}
_sampleRate = sampleRate;
_frameCount = frameCount;
reset();
loadProfile(0); // load default profile "flat response" into the bank (see AudioFilterBank.cpp)
}
void finalize() {
for (uint32_t i = 0; i < _channelCount; ++i) {
if (_buffer[i]) {
free (_buffer[i]);
_buffer[i] = NULL;
}
}
}
void loadProfile(int profileIndex) {
if (profileIndex >= 0 && profileIndex < (int)_profileCount) {
for (uint32_t i = 0; i < _filterCount; ++i) {
FilterParameter p = _profiles[profileIndex][i];
for (uint32_t j = 0; j < _channelCount; ++j) {
_filters[i][j].setParameters(_sampleRate,p._p1,p._p2,p._p3);
}
}
}
}
void setParameters(uint32_t filterStage, uint32_t filterChannel, const float32_t sampleRate, const float32_t frequency,
const float32_t gain, const float32_t slope) {
if (filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].setParameters(sampleRate,frequency,gain,slope);
}
}
void getParameters(uint32_t filterStage, uint32_t filterChannel, float32_t& sampleRate, float32_t& frequency,
float32_t& gain, float32_t& slope) {
if (filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].getParameters(sampleRate,frequency,gain,slope);
}
}
void render(const int16_t* in, int16_t* out, const uint32_t frameCount) {
if (frameCount > _frameCount) {
return;
}
const int scale = (1 << ((8 * sizeof(int16_t)) - 1));
// de-interleave and convert int16_t to float32 (normalized to -1. ... 1.)
for (uint32_t i = 0; i < frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_buffer[j][i] = ((float)(*in++)) * (1.0f / scale);
}
}
// now step through each filter
for (uint32_t i = 0; i < _channelCount; ++i) {
for (uint32_t j = 0; j < _filterCount; ++j) {
_filters[j][i].render( &_buffer[i][0], &_buffer[i][0], frameCount );
}
}
// convert float32 to int16_t and interleave
for (uint32_t i = 0; i < frameCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
*out++ = (int16_t)(_buffer[j][i] * scale);
}
}
}
void render(AudioBufferFloat32& frameBuffer) {
float32_t** samples = frameBuffer.getFrameData();
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < _filterCount; ++i) {
_filters[i][j].render( samples[j], samples[j], frameBuffer.getFrameCount() );
}
}
}
void reset() {
for (uint32_t i = 0; i < _filterCount; ++i) {
for (uint32_t j = 0; j < _channelCount; ++j) {
_filters[i][j].reset();
}
}
}
};
//
// Specializations of AudioFilterBank
//
typedef AudioFilterBank< AudioFilterLSF, 1, 1> AudioFilterLSF1m; // mono bank with one band of LSF
typedef AudioFilterBank< AudioFilterLSF, 1, 2> AudioFilterLSF1s; // stereo bank with one band of LSF
typedef AudioFilterBank< AudioFilterHSF, 1, 1> AudioFilterHSF1m; // mono bank with one band of HSF
typedef AudioFilterBank< AudioFilterHSF, 1, 2> AudioFilterHSF1s; // stereo bank with one band of HSF
typedef AudioFilterBank< AudioFilterPEQ, 1, 1> AudioFilterPEQ1m; // mono bank with one band of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 2, 1> AudioFilterPEQ2m; // mono bank with two bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 3, 1> AudioFilterPEQ3m; // mono bank with three bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 1, 2> AudioFilterPEQ1s; // stereo bank with one band of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 2, 2> AudioFilterPEQ2s; // stereo bank with two bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 3, 2> AudioFilterPEQ3s; // stereo bank with three bands of PEQ
// etc....
#endif // hifi_AudioFilter_h

View file

@ -1,90 +0,0 @@
//
// AudioFormat.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/28/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFormat_h
#define hifi_AudioFormat_h
#ifndef _FLOAT32_T
#define _FLOAT32_T
typedef float float32_t;
#endif
#ifndef _FLOAT64_T
#define _FLOAT64_T
typedef double float64_t;
#endif
#include <assert.h>
#include <cstring>
#include "AudioConstants.h"
//
// Audio format structure (currently for uncompressed streams only)
//
struct AudioFormat {
struct Flags {
uint32_t _isFloat : 1;
uint32_t _isSigned : 1;
uint32_t _isInterleaved : 1;
uint32_t _isBigEndian : 1;
uint32_t _isPacked : 1;
uint32_t _reserved : 27;
} _flags;
uint32_t _bytesPerFrame;
uint32_t _channelsPerFrame;
uint32_t _bitsPerChannel;
float64_t _sampleRate;
AudioFormat() {
memset(this, 0, sizeof(*this));
}
~AudioFormat() { }
AudioFormat& operator=(const AudioFormat& fmt) {
memcpy(this, &fmt, sizeof(*this));
return *this;
}
bool operator==(const AudioFormat& fmt) {
return memcmp(this, &fmt, sizeof(*this)) == 0;
}
bool operator!=(const AudioFormat& fmt) {
return memcmp(this, &fmt, sizeof(*this)) != 0;
}
void setCanonicalFloat32(uint32_t channels) {
assert(channels > 0 && channels <= 2);
_sampleRate = AudioConstants::SAMPLE_RATE;
_bitsPerChannel = sizeof(float32_t) * 8;
_channelsPerFrame = channels;
_bytesPerFrame = _channelsPerFrame * _bitsPerChannel / 8;
_flags._isFloat = true;
_flags._isInterleaved = _channelsPerFrame > 1;
}
void setCanonicalInt16(uint32_t channels) {
assert(channels > 0 && channels <= 2);
_sampleRate = AudioConstants::SAMPLE_RATE;
_bitsPerChannel = sizeof(int16_t) * 8;
_channelsPerFrame = channels;
_bytesPerFrame = _channelsPerFrame * _bitsPerChannel / 8;
_flags._isSigned = true;
_flags._isInterleaved = _channelsPerFrame > 1;
}
};
#endif // hifi_AudioFormat_h

View file

@ -1,48 +0,0 @@
//
// AudioGain.cpp
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <algorithm>
#include <math.h>
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioGain.h"
AudioGain::AudioGain() {
initialize();
}
AudioGain::~AudioGain() {
finalize();
}
void AudioGain::initialize() {
setParameters(1.0f,0.0f);
}
void AudioGain::finalize() {
}
void AudioGain::reset() {
initialize();
}
void AudioGain::setParameters(const float gain, const float mute) {
_gain = std::min(std::max(gain, 0.0f), 1.0f);
_mute = mute != 0.0f;
}
void AudioGain::getParameters(float& gain, float& mute) {
gain = _gain;
mute = _mute ? 1.0f : 0.0f;
}

View file

@ -1,117 +0,0 @@
//
// AudioGain.h
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioGain_h
#define hifi_AudioGain_h
class AudioGain
{
float32_t _gain;
bool _mute;
public:
AudioGain();
~AudioGain();
void initialize();
void finalize();
void reset();
void setParameters(const float gain, const float mute);
void getParameters(float& gain, float& mute);
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioGain::render(AudioBufferFloat32& frameBuffer) {
if (_mute) {
frameBuffer.zeroFrames();
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 1) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
}
} else if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gain;
samples[0][i + 1] *= _gain;
samples[0][i + 2] *= _gain;
samples[0][i + 3] *= _gain;
samples[0][i + 4] *= _gain;
samples[0][i + 5] *= _gain;
samples[0][i + 6] *= _gain;
samples[0][i + 7] *= _gain;
samples[0][i + 8] *= _gain;
samples[0][i + 9] *= _gain;
samples[0][i + 10] *= _gain;
samples[0][i + 11] *= _gain;
samples[0][i + 12] *= _gain;
samples[0][i + 13] *= _gain;
samples[0][i + 14] *= _gain;
samples[0][i + 15] *= _gain;
samples[1][i + 0] *= _gain;
samples[1][i + 1] *= _gain;
samples[1][i + 2] *= _gain;
samples[1][i + 3] *= _gain;
samples[1][i + 4] *= _gain;
samples[1][i + 5] *= _gain;
samples[1][i + 6] *= _gain;
samples[1][i + 7] *= _gain;
samples[1][i + 8] *= _gain;
samples[1][i + 9] *= _gain;
samples[1][i + 10] *= _gain;
samples[1][i + 11] *= _gain;
samples[1][i + 12] *= _gain;
samples[1][i + 13] *= _gain;
samples[1][i + 14] *= _gain;
samples[1][i + 15] *= _gain;
}
} else {
assert("unsupported channel format");
}
} else {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[j][i] *= _gain;
}
}
}
}
#endif // AudioGain_h

View file

@ -59,6 +59,8 @@ static const float crossfadeTable[HRTF_BLOCK] = {
0.0024846123f, 0.0019026510f, 0.0013981014f, 0.0009710421f, 0.0006215394f, 0.0003496476f, 0.0001554090f, 0.0000388538f,
};
static const float TWOPI = 6.283185307f;
//
// on x86 architecture, assume that SSE2 is present
//
@ -509,10 +511,7 @@ static void setAzimuthAndGain(float firCoef[4][HRTF_TAPS], float bqCoef[5][4], i
int index, float azimuth, float gain, int channel) {
// convert from radians to table units
//azimuth *= HRTF_AZIMUTHS / (2.0f * M_PI);
// convert from degrees to table units
azimuth *= HRTF_AZIMUTHS / 360.0f;
azimuth *= HRTF_AZIMUTHS / TWOPI;
// wrap to principle value
while (azimuth < 0.0f) {

View file

@ -26,6 +26,7 @@ static const float HRTF_GAIN = 0.5f; // HRTF global gain adjustment
class AudioHRTF {
public:
AudioHRTF() {};
//
// input: mono source
@ -43,6 +44,8 @@ public:
void renderSilent(int16_t* input, float* output, int index, float azimuth, float gain, int numFrames);
private:
AudioHRTF(const AudioHRTF&) = delete;
AudioHRTF& operator=(const AudioHRTF&) = delete;
// SIMD channel assignmentS
enum Channel {

View file

@ -1,52 +0,0 @@
//
// AudioPan.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 9/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <math.h>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioPan.h"
float32_t AudioPan::ONE_MINUS_EPSILON = 1.0f - EPSILON;
float32_t AudioPan::ZERO_PLUS_EPSILON = 0.0f + EPSILON;
float32_t AudioPan::ONE_HALF_MINUS_EPSILON = 0.5f - EPSILON;
float32_t AudioPan::ONE_HALF_PLUS_EPSILON = 0.5f + EPSILON;
AudioPan::AudioPan() {
initialize();
}
AudioPan::~AudioPan() {
finalize();
}
void AudioPan::initialize() {
setParameters(0.5f);
}
void AudioPan::finalize() {
}
void AudioPan::reset() {
initialize();
}
void AudioPan::setParameters(const float32_t pan) {
// pan ranges between 0.0 and 1.0f inclusive. 0.5f is midpoint between full left and full right
_pan = std::min(std::max(pan, 0.0f), 1.0f);
updateCoefficients();
}
void AudioPan::getParameters(float32_t& pan) {
pan = _pan;
}

View file

@ -1,126 +0,0 @@
//
// AudioPan.h
// hifi
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioPan_h
#define hifi_AudioPan_h
#include <NumericalConstants.h>
#include "AudioFormat.h"
class AudioPan
{
float32_t _pan;
float32_t _gainLeft;
float32_t _gainRight;
static float32_t ONE_MINUS_EPSILON;
static float32_t ZERO_PLUS_EPSILON;
static float32_t ONE_HALF_MINUS_EPSILON;
static float32_t ONE_HALF_PLUS_EPSILON;
void updateCoefficients();
public:
AudioPan();
~AudioPan();
void initialize();
void finalize();
void reset();
void setParameters(const float32_t pan);
void getParameters(float32_t& pan);
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioPan::render(AudioBufferFloat32& frameBuffer) {
if (frameBuffer.getChannelCount() != 2) {
return;
}
float32_t** samples = frameBuffer.getFrameData();
bool frameAlignment16 = (frameBuffer.getFrameCount() & 0x0F) == 0;
if (frameAlignment16) {
if (frameBuffer.getChannelCount() == 2) {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 16) {
samples[0][i + 0] *= _gainLeft;
samples[0][i + 1] *= _gainLeft;
samples[0][i + 2] *= _gainLeft;
samples[0][i + 3] *= _gainLeft;
samples[0][i + 4] *= _gainLeft;
samples[0][i + 5] *= _gainLeft;
samples[0][i + 6] *= _gainLeft;
samples[0][i + 7] *= _gainLeft;
samples[0][i + 8] *= _gainLeft;
samples[0][i + 9] *= _gainLeft;
samples[0][i + 10] *= _gainLeft;
samples[0][i + 11] *= _gainLeft;
samples[0][i + 12] *= _gainLeft;
samples[0][i + 13] *= _gainLeft;
samples[0][i + 14] *= _gainLeft;
samples[0][i + 15] *= _gainLeft;
samples[1][i + 0] *= _gainRight;
samples[1][i + 1] *= _gainRight;
samples[1][i + 2] *= _gainRight;
samples[1][i + 3] *= _gainRight;
samples[1][i + 4] *= _gainRight;
samples[1][i + 5] *= _gainRight;
samples[1][i + 6] *= _gainRight;
samples[1][i + 7] *= _gainRight;
samples[1][i + 8] *= _gainRight;
samples[1][i + 9] *= _gainRight;
samples[1][i + 10] *= _gainRight;
samples[1][i + 11] *= _gainRight;
samples[1][i + 12] *= _gainRight;
samples[1][i + 13] *= _gainRight;
samples[1][i + 14] *= _gainRight;
samples[1][i + 15] *= _gainRight;
}
} else {
assert("unsupported channel format");
}
} else {
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); i += 1) {
samples[0][i] *= _gainLeft;
samples[1][i] *= _gainRight;
}
}
}
inline void AudioPan::updateCoefficients() {
// implement constant power sin^2 + cos^2 = 1 panning law
if (_pan >= ONE_MINUS_EPSILON) { // full right
_gainLeft = 0.0f;
_gainRight = 1.0f;
} else if (_pan <= ZERO_PLUS_EPSILON) { // full left
_gainLeft = 1.0f;
_gainRight = 0.0f;
} else if ((_pan >= ONE_HALF_MINUS_EPSILON) && (_pan <= ONE_HALF_PLUS_EPSILON)) { // center
_gainLeft = 1.0f / SQUARE_ROOT_OF_2;
_gainRight = 1.0f / SQUARE_ROOT_OF_2;
} else { // intermediate cases
_gainLeft = cosf( TWO_PI * _pan );
_gainRight = sinf( TWO_PI * _pan );
}
}
#endif // AudioPan_h

View file

@ -141,11 +141,17 @@ public:
}
void readSamples(int16_t* dest, int numSamples) {
int16_t* at = _at;
for (int i = 0; i < numSamples; i++) {
*dest = *at;
++dest;
at = (at == _bufferLast) ? _bufferFirst : at + 1;
auto samplesToEnd = _bufferLast - _at + 1;
if (samplesToEnd >= numSamples) {
memcpy(dest, _at, numSamples * sizeof(int16_t));
_at += numSamples;
} else {
auto samplesFromStart = numSamples - samplesToEnd;
memcpy(dest, _at, samplesToEnd * sizeof(int16_t));
memcpy(dest + samplesToEnd, _bufferFirst, samplesFromStart * sizeof(int16_t));
_at = _bufferFirst + samplesFromStart;
}
}

View file

@ -1,21 +0,0 @@
//
// AudioSourceNoise.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <math.h>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioSourceNoise.h"
template<>
uint32_t AudioSourcePinkNoise::_randomSeed = 1974; // a truly random number

View file

@ -1,103 +0,0 @@
//
// AudioSourceNoise.h
// hifi
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// Adapted from code by Phil Burk http://www.firstpr.com.au/dsp/pink-noise/
//
#ifndef hifi_AudioSourceNoise_h
#define hifi_AudioSourceNoise_h
template< const uint16_t N = 30>
class AudioSourceNoise
{
static const uint16_t _randomRows = N;
static const uint16_t _randomBits = 24;
static const uint16_t _randomShift = (sizeof(int32_t) * 8) - _randomBits;
static uint32_t _randomSeed;
int32_t _rows[_randomRows];
int32_t _runningSum; // used to optimize summing of generators.
uint16_t _index; // incremented each sample.
uint16_t _indexMask; // index wrapped by ANDing with this mask.
float32_t _scale; // used to scale within range of -1.0 to +1.0
static uint32_t generateRandomNumber() {
_randomSeed = (_randomSeed * 196314165) + 907633515;
return _randomSeed >> _randomShift;
}
public:
AudioSourceNoise() {
initialize();
}
~AudioSourceNoise() {
finalize();
}
void initialize() {
memset(_rows, 0, _randomRows * sizeof(int32_t));
_runningSum = 0;
_index = 0;
_indexMask = (uint16_t)((1 << _randomRows) - 1);
_scale = 1.0f / ((_randomRows + 1) * (1 << (_randomBits - 1)));
}
void finalize() {
}
void reset() {
initialize();
}
void setParameters(void) {
}
void getParameters(void) {
}
void render(AudioBufferFloat32& frameBuffer) {
uint32_t randomNumber;
float32_t** samples = frameBuffer.getFrameData();
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
_index = (_index + 1) & _indexMask; // increment and mask index.
if (_index != 0) { // if index is zero, don't update any random values.
uint32_t numZeros = 0; // determine how many trailing zeros in _index
uint32_t tmp = _index;
while ((tmp & 1) == 0) {
tmp >>= 1;
numZeros++;
}
// replace the indexed _rows random value. subtract and add back to _runningSum instead
// of adding all the random values together. only one value changes each time.
_runningSum -= _rows[numZeros];
randomNumber = generateRandomNumber();
_runningSum += randomNumber;
_rows[numZeros] = randomNumber;
}
// add extra white noise value and scale between -1.0 and +1.0
samples[j][i] = (_runningSum + generateRandomNumber()) * _scale;
}
}
}
};
typedef AudioSourceNoise<> AudioSourcePinkNoise;
#endif // AudioSourceNoise_h

View file

@ -1,55 +0,0 @@
//
// AudioSourceTone.cpp
// libraries/audio/src
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <NumericalConstants.h>
#include "AudioSourceTone.h"
AudioSourceTone::AudioSourceTone() {
initialize();
}
AudioSourceTone::~AudioSourceTone() {
finalize();
}
void AudioSourceTone::finalize() {
}
void AudioSourceTone::reset() {
}
void AudioSourceTone::updateCoefficients() {
_omega = _frequency / _sampleRate * TWO_PI;
_epsilon = 2.0f * sinf(_omega / 2.0f);
_yq1 = cosf(-1.0f * _omega);
_y1 = sinf(+1.0f * _omega);
}
void AudioSourceTone::initialize() {
const float32_t FREQUENCY_220_HZ = 220.0f;
const float32_t GAIN_MINUS_6DB = 0.501f;
setParameters(AudioConstants::SAMPLE_RATE, FREQUENCY_220_HZ, GAIN_MINUS_6DB);
}
void AudioSourceTone::setParameters(const float32_t sampleRate, const float32_t frequency, const float32_t amplitude) {
_sampleRate = std::max(sampleRate, 1.0f);
_frequency = std::max(frequency, 1.0f);
_amplitude = std::max(amplitude, 1.0f);
updateCoefficients();
}
void AudioSourceTone::getParameters(float32_t& sampleRate, float32_t& frequency, float32_t& amplitude) {
sampleRate = _sampleRate;
frequency = _frequency;
amplitude = _amplitude;
}

View file

@ -1,65 +0,0 @@
//
// AudioSourceTone.h
// hifi
//
// Created by Craig Hansen-Sturm on 9/1/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioSourceTone_h
#define hifi_AudioSourceTone_h
#include "AudioBuffer.h"
#include "AudioFormat.h"
// Implements a two-pole Gordon-Smith oscillator
class AudioSourceTone {
float32_t _frequency;
float32_t _amplitude;
float32_t _sampleRate;
float32_t _omega;
float32_t _epsilon;
float32_t _yq1;
float32_t _y1;
void updateCoefficients();
public:
AudioSourceTone();
~AudioSourceTone();
void initialize();
void finalize();
void reset();
void setParameters(const float32_t sampleRate, const float32_t frequency, const float32_t amplitude);
void getParameters(float32_t& sampleRate, float32_t& frequency, float32_t& amplitude);
void render(AudioBufferFloat32& frameBuffer);
};
inline void AudioSourceTone::render(AudioBufferFloat32& frameBuffer) {
float32_t** samples = frameBuffer.getFrameData();
float32_t yq;
float32_t y;
for (uint32_t i = 0; i < frameBuffer.getFrameCount(); ++i) {
yq = _yq1 - (_epsilon * _y1);
y = _y1 + (_epsilon * yq);
// update delays
_yq1 = yq;
_y1 = y;
for (uint32_t j = 0; j < frameBuffer.getChannelCount(); ++j) {
samples[j][i] = _amplitude * y;
}
}
}
#endif

View file

@ -116,6 +116,8 @@ public:
bool lastPopSucceeded() const { return _lastPopSucceeded; };
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
quint64 usecsSinceLastPacket() { return usecTimestampNow() - _lastPacketReceivedTime; }
void setToStarved();
void setSettings(const Settings& settings);
@ -171,7 +173,7 @@ public:
float getWetLevel() const { return _wetLevel; }
void setReverb(float reverbTime, float wetLevel);
void clearReverb() { _hasReverb = false; }
public slots:
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
/// is enabled, those stats are used to calculate _desiredJitterBufferFrames.

View file

@ -23,15 +23,15 @@ public:
float getRadius() const { return _radius; }
float getAttenuationRatio() const { return _attenuationRatio; }
QUuid getStreamIdentifier() const { return _streamIdentifier; }
virtual const QUuid& getStreamIdentifier() const override { return _streamIdentifier; }
private:
// disallow copying of InjectedAudioStream objects
InjectedAudioStream(const InjectedAudioStream&);
InjectedAudioStream& operator= (const InjectedAudioStream&);
AudioStreamStats getAudioStreamStats() const;
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
AudioStreamStats getAudioStreamStats() const override;
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) override;
const QUuid _streamIdentifier;
float _radius;

View file

@ -28,7 +28,10 @@ public:
};
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings);
const QUuid DEFAULT_STREAM_IDENTIFIER = QUuid();
virtual const QUuid& getStreamIdentifier() const { return DEFAULT_STREAM_IDENTIFIER; }
virtual void resetStats();
virtual AudioStreamStats getAudioStreamStats() const;
@ -40,7 +43,6 @@ public:
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
bool ignorePenumbraFilter() { return _ignorePenumbra; }
PositionalAudioStream::Type getType() const { return _type; }
const glm::vec3& getPosition() const { return _position; }
const glm::quat& getOrientation() const { return _orientation; }

View file

@ -24,8 +24,6 @@
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFormat.h"
#include "AudioBuffer.h"
#include "AudioLogging.h"
#include "Sound.h"

View file

@ -11,8 +11,9 @@
#include "NodeData.h"
NodeData::NodeData() :
_mutex()
NodeData::NodeData(const QUuid& nodeID) :
_mutex(),
_nodeID(nodeID)
{
}

View file

@ -24,14 +24,17 @@ class Node;
class NodeData : public QObject {
Q_OBJECT
public:
NodeData();
NodeData(const QUuid& nodeID = QUuid());
virtual ~NodeData() = 0;
virtual int parseData(ReceivedMessage& message) { return 0; }
const QUuid& getNodeID() const { return _nodeID; }
QMutex& getMutex() { return _mutex; }
private:
QMutex _mutex;
QUuid _nodeID;
};
#endif // hifi_NodeData_h

View file

@ -54,24 +54,6 @@ ScriptAudioInjector* AudioScriptingInterface::playSound(Sound* sound, const Audi
}
}
void AudioScriptingInterface::injectGeneratedNoise(bool inject) {
if (_localAudioInterface) {
_localAudioInterface->enableAudioSourceInject(inject);
}
}
void AudioScriptingInterface::selectPinkNoise() {
if (_localAudioInterface) {
_localAudioInterface->selectAudioSourcePinkNoise();
}
}
void AudioScriptingInterface::selectSine440() {
if (_localAudioInterface) {
_localAudioInterface->selectAudioSourceSine440();
}
}
void AudioScriptingInterface::setStereoInput(bool stereo) {
if (_localAudioInterface) {
_localAudioInterface->setIsStereoInput(stereo);

View file

@ -29,10 +29,6 @@ protected:
// this method is protected to stop C++ callers from calling, but invokable from script
Q_INVOKABLE ScriptAudioInjector* playSound(Sound* sound, const AudioInjectorOptions& injectorOptions = AudioInjectorOptions());
Q_INVOKABLE void injectGeneratedNoise(bool inject);
Q_INVOKABLE void selectPinkNoise();
Q_INVOKABLE void selectSine440();
Q_INVOKABLE void setStereoInput(bool stereo);
signals: