Merge branch 'master' of https://github.com/highfidelity/hifi into orange

This commit is contained in:
Sam Cake 2015-05-13 16:00:42 -07:00
commit d13106f4c9
9 changed files with 282 additions and 231 deletions

View file

@ -49,12 +49,12 @@ AssignmentClient::AssignmentClient(Assignment::Type requestAssignmentType, QStri
LogUtils::init();
QSettings::setDefaultFormat(QSettings::IniFormat);
// create a NodeList as an unassigned client
DependencyManager::registerInheritance<LimitedNodeList, NodeList>();
auto addressManager = DependencyManager::set<AddressManager>();
auto nodeList = DependencyManager::set<NodeList>(NodeType::Unassigned); // Order is important
auto animationCache = DependencyManager::set<AnimationCache>();
auto avatarHashMap = DependencyManager::set<AvatarHashMap>();
auto entityScriptingInterface = DependencyManager::set<EntityScriptingInterface>();
@ -76,18 +76,18 @@ AssignmentClient::AssignmentClient(Assignment::Type requestAssignmentType, QStri
qDebug() << "The destination wallet UUID for credits is" << uuidStringWithoutCurlyBraces(walletUUID);
_requestAssignment.setWalletUUID(walletUUID);
}
// check for an overriden assignment server hostname
if (assignmentServerHostname != "") {
// change the hostname for our assignment server
_assignmentServerHostname = assignmentServerHostname;
}
_assignmentServerSocket = HifiSockAddr(_assignmentServerHostname, assignmentServerPort, true);
nodeList->setAssignmentServerSocket(_assignmentServerSocket);
qDebug() << "Assignment server socket is" << _assignmentServerSocket;
// call a timer function every ASSIGNMENT_REQUEST_INTERVAL_MSECS to ask for assignment, if required
qDebug() << "Waiting for assignment -" << _requestAssignment;
@ -104,35 +104,35 @@ AssignmentClient::AssignmentClient(Assignment::Type requestAssignmentType, QStri
// connections to AccountManager for authentication
connect(&AccountManager::getInstance(), &AccountManager::authRequired,
this, &AssignmentClient::handleAuthenticationRequest);
// Create Singleton objects on main thread
NetworkAccessManager::getInstance();
// did we get an assignment-client monitor port?
if (assignmentMonitorPort > 0) {
_assignmentClientMonitorSocket = HifiSockAddr(DEFAULT_ASSIGNMENT_CLIENT_MONITOR_HOSTNAME, assignmentMonitorPort);
qDebug() << "Assignment-client monitor socket is" << _assignmentClientMonitorSocket;
// Hook up a timer to send this child's status to the Monitor once per second
setUpStatsToMonitor();
}
}
}
void AssignmentClient::stopAssignmentClient() {
qDebug() << "Forced stop of assignment-client.";
_requestTimer.stop();
_statsTimerACM.stop();
if (_currentAssignment) {
// grab the thread for the current assignment
QThread* currentAssignmentThread = _currentAssignment->thread();
// ask the current assignment to stop
QMetaObject::invokeMethod(_currentAssignment, "stop", Qt::BlockingQueuedConnection);
// ask the current assignment to delete itself on its thread
_currentAssignment->deleteLater();
@ -148,9 +148,9 @@ void AssignmentClient::stopAssignmentClient() {
void AssignmentClient::aboutToQuit() {
stopAssignmentClient();
// clear the log handler so that Qt doesn't call the destructor on LogHandler
qInstallMessageHandler(0);
qInstallMessageHandler(0);
}
@ -175,9 +175,9 @@ void AssignmentClient::sendStatsPacketToACM() {
void AssignmentClient::sendAssignmentRequest() {
if (!_currentAssignment) {
auto nodeList = DependencyManager::get<NodeList>();
if (_assignmentServerHostname == "localhost") {
// we want to check again for the local domain-server port in case the DS has restarted
quint16 localAssignmentServerPort;
@ -186,13 +186,13 @@ void AssignmentClient::sendAssignmentRequest() {
if (localAssignmentServerPort != _assignmentServerSocket.getPort()) {
qDebug() << "Port for local assignment server read from shared memory is"
<< localAssignmentServerPort;
_assignmentServerSocket.setPort(localAssignmentServerPort);
nodeList->setAssignmentServerSocket(_assignmentServerSocket);
}
}
}
nodeList->sendAssignment(_requestAssignment);
}
}
@ -232,13 +232,15 @@ void AssignmentClient::readPendingDatagrams() {
connect(workerThread, &QThread::started, _currentAssignment.data(), &ThreadedAssignment::run);
// once the ThreadedAssignment says it is finished - we ask it to deleteLater
// Once the ThreadedAssignment says it is finished - we ask it to deleteLater
// This is a queued connection so that it is put into the event loop to be processed by the worker
// thread when it is ready.
connect(_currentAssignment.data(), &ThreadedAssignment::finished, _currentAssignment.data(),
&ThreadedAssignment::deleteLater);
&ThreadedAssignment::deleteLater, Qt::QueuedConnection);
// once it is deleted, we quit the worker thread
connect(_currentAssignment.data(), &ThreadedAssignment::destroyed, workerThread, &QThread::quit);
// have the worker thread remove itself once it is done
connect(workerThread, &QThread::finished, workerThread, &QThread::deleteLater);
@ -264,7 +266,7 @@ void AssignmentClient::readPendingDatagrams() {
if (senderSockAddr.getAddress() == QHostAddress::LocalHost ||
senderSockAddr.getAddress() == QHostAddress::LocalHostIPv6) {
qDebug() << "AssignmentClientMonitor at" << senderSockAddr << "requested stop via PacketTypeStopNode.";
QCoreApplication::quit();
} else {
qDebug() << "Got a stop packet from other than localhost.";
@ -306,7 +308,7 @@ void AssignmentClient::assignmentCompleted() {
// we expect that to be here the previous assignment has completely cleaned up
assert(_currentAssignment.isNull());
// reset our current assignment pointer to NULL now that it has been deleted
// reset our current assignment pointer to NULL now that it has been deleted
_currentAssignment = NULL;
// reset the logging target to the the CHILD_TARGET_NAME
@ -317,7 +319,7 @@ void AssignmentClient::assignmentCompleted() {
auto nodeList = DependencyManager::get<NodeList>();
// have us handle incoming NodeList datagrams again, and make sure our ThreadedAssignment isn't handling them
connect(&nodeList->getNodeSocket(), &QUdpSocket::readyRead, this, &AssignmentClient::readPendingDatagrams);
connect(&nodeList->getNodeSocket(), &QUdpSocket::readyRead, this, &AssignmentClient::readPendingDatagrams);
// reset our NodeList by switching back to unassigned and clearing the list
nodeList->setOwnerType(NodeType::Unassigned);

View file

@ -24,7 +24,7 @@ class AssignmentClient : public QObject {
public:
AssignmentClient(Assignment::Type requestAssignmentType, QString assignmentPool,
QUuid walletUUID, QString assignmentServerHostname, quint16 assignmentServerPort,
QUuid walletUUID, QString assignmentServerHostname, quint16 assignmentServerPort,
quint16 assignmentMonitorPort);
private slots:
void sendAssignmentRequest();

View file

@ -109,11 +109,11 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
// Basically, we'll repeat that last frame until it has a frame to mix. Depending on how many times
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
// This improves the perceived quality of the audio slightly.
bool showDebug = false; // (randFloat() < 0.05f);
float repeatedFrameFadeFactor = 1.0f;
if (!streamToAdd->lastPopSucceeded()) {
if (_streamSettings._repetitionWithFade && !streamToAdd->getLastPopOutput().isNull()) {
// reptition with fade is enabled, and we do have a valid previous frame to repeat.
@ -126,73 +126,73 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
return 0;
}
}
// at this point, we know streamToAdd's last pop output is valid
// if the frame we're about to mix is silent, bail
if (streamToAdd->getLastPopOutputLoudness() == 0.0f) {
return 0;
}
float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0;
float weakChannelAmplitudeRatio = 1.0f;
// Is the source that I am mixing my own?
bool sourceIsSelf = (streamToAdd == listeningNodeStream);
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
float distanceBetween = glm::length(relativePosition);
if (distanceBetween < EPSILON) {
distanceBetween = EPSILON;
}
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
// according to mixer performance we have decided this does not get to be mixed in
// bail out
return 0;
}
++_sumMixes;
if (streamToAdd->getType() == PositionalAudioStream::Injector) {
attenuationCoefficient *= reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
if (showDebug) {
qDebug() << "AttenuationRatio: " << reinterpret_cast<InjectedAudioStream*>(streamToAdd)->getAttenuationRatio();
}
}
if (showDebug) {
qDebug() << "distance: " << distanceBetween;
}
glm::quat inverseOrientation = glm::inverse(listeningNodeStream->getOrientation());
if (!sourceIsSelf && (streamToAdd->getType() == PositionalAudioStream::Microphone)) {
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd->getOrientation()) * relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
if (showDebug) {
qDebug() << "angleOfDelivery" << angleOfDelivery << "offAxisCoefficient: " << offAxisCoefficient;
}
// multiply the current attenuation coefficient by the calculated off axis coefficient
attenuationCoefficient *= offAxisCoefficient;
}
float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance;
for (int i = 0; i < _zonesSettings.length(); ++i) {
if (_audioZones[_zonesSettings[i].source].contains(streamToAdd->getPosition()) &&
@ -201,67 +201,67 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
break;
}
}
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = 1 - (logf(distanceBetween / ATTENUATION_BEGINS_AT_DISTANCE) / logf(2.0f)
* attenuationPerDoublingInDistance);
if (distanceCoefficient < 0) {
distanceCoefficient = 0;
}
// multiply the current attenuation coefficient by the distance coefficient
attenuationCoefficient *= distanceCoefficient;
if (showDebug) {
qDebug() << "distanceCoefficient: " << distanceCoefficient;
}
}
if (!sourceIsSelf) {
// Compute sample delay for the two ears to create phase panning
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
// project the rotated source position vector onto the XZ plane
rotatedSourcePosition.y = 0.0f;
// produce an oriented angle about the y-axis
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedSourcePosition),
glm::vec3(0.0f, 1.0f, 0.0f));
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
// figure out the number of samples of delay and the ratio of the amplitude
// in the weak channel for audio spatialization
float sinRatio = fabsf(sinf(bearingRelativeAngleToSource));
numSamplesDelay = SAMPLE_PHASE_DELAY_AT_90 * sinRatio;
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
if (distanceBetween < RADIUS_OF_HEAD) {
// Diminish phase panning if source would be inside head
numSamplesDelay *= distanceBetween / RADIUS_OF_HEAD;
weakChannelAmplitudeRatio += (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio) * distanceBetween / RADIUS_OF_HEAD;
}
}
if (showDebug) {
qDebug() << "attenuation: " << attenuationCoefficient;
qDebug() << "bearingRelativeAngleToSource: " << bearingRelativeAngleToSource << " numSamplesDelay: " << numSamplesDelay;
}
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
if (!streamToAdd->isStereo()) {
// this is a mono stream, which means it gets full attenuation and spatialization
// we need to do several things in this process:
// 1) convert from mono to stereo by copying each input sample into the left and right output samples
// 2)
// 2)
// 2) apply an attenuation AND fade to all samples (left and right)
// 3) based on the bearing relative angle to the source we will weaken and delay either the left or
// right channel of the input into the output
// 4) because one of these channels is delayed, we will need to use historical samples from
// 4) because one of these channels is delayed, we will need to use historical samples from
// the input stream for that delayed channel
// Mono input to stereo output (item 1 above)
@ -274,12 +274,12 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
// determine which side is weak and delayed (item 3 above)
bool rightSideWeakAndDelayed = (bearingRelativeAngleToSource > 0.0f);
// since we're converting from mono to stereo, we'll use these two indices to step through
// the output samples. we'll increment each index independently in the loop
int leftDestinationIndex = 0;
int rightDestinationIndex = 1;
// One of our two channels will be delayed (determined below). We'll use this index to step
// through filling in our output with the historical samples for the delayed channel. (item 4 above)
int delayedChannelHistoricalAudioOutputIndex;
@ -287,14 +287,14 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
// All samples will be attenuated by at least this much
float leftSideAttenuation = attenuationAndFade;
float rightSideAttenuation = attenuationAndFade;
// The weak/delayed channel will be attenuated by this additional amount
float attenuationAndWeakChannelRatioAndFade = attenuationAndFade * weakChannelAmplitudeRatio;
// Now, based on the determination of which side is weak and delayed, set up our true starting point
// for our indexes, as well as the appropriate attenuation for each channel
if (rightSideWeakAndDelayed) {
delayedChannelHistoricalAudioOutputIndex = rightDestinationIndex;
delayedChannelHistoricalAudioOutputIndex = rightDestinationIndex;
rightSideAttenuation = attenuationAndWeakChannelRatioAndFade;
rightDestinationIndex += (numSamplesDelay * OUTPUT_SAMPLES_PER_INPUT_SAMPLE);
} else {
@ -315,7 +315,7 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
for (int i = 0; i < numSamplesDelay; i++) {
int16_t originalHistoricalSample = *delayStreamSourceSamples;
_preMixSamples[delayedChannelHistoricalAudioOutputIndex] += originalHistoricalSample
_preMixSamples[delayedChannelHistoricalAudioOutputIndex] += originalHistoricalSample
* attenuationAndWeakChannelRatioAndFade;
++delayStreamSourceSamples; // move our input pointer
delayedChannelHistoricalAudioOutputIndex += OUTPUT_SAMPLES_PER_INPUT_SAMPLE; // move our output sample
@ -339,7 +339,7 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
leftDestinationIndex += OUTPUT_SAMPLES_PER_INPUT_SAMPLE;
rightDestinationIndex += OUTPUT_SAMPLES_PER_INPUT_SAMPLE;
}
} else {
int stereoDivider = streamToAdd->isStereo() ? 1 : 2;
@ -355,44 +355,44 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
if (!sourceIsSelf && _enableFilter && !streamToAdd->ignorePenumbraFilter()) {
const float TWO_OVER_PI = 2.0f / PI;
const float ZERO_DB = 1.0f;
const float NEGATIVE_ONE_DB = 0.891f;
const float NEGATIVE_THREE_DB = 0.708f;
const float FILTER_GAIN_AT_0 = ZERO_DB; // source is in front
const float FILTER_GAIN_AT_90 = NEGATIVE_ONE_DB; // source is incident to left or right ear
const float FILTER_GAIN_AT_180 = NEGATIVE_THREE_DB; // source is behind
const float FILTER_CUTOFF_FREQUENCY_HZ = 1000.0f;
const float penumbraFilterFrequency = FILTER_CUTOFF_FREQUENCY_HZ; // constant frequency
const float penumbraFilterSlope = NEGATIVE_THREE_DB; // constant slope
float penumbraFilterGainL;
float penumbraFilterGainR;
// variable gain calculation broken down by quadrant
if (-bearingRelativeAngleToSource < -PI_OVER_TWO && -bearingRelativeAngleToSource > -PI) {
penumbraFilterGainL = TWO_OVER_PI *
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_0;
penumbraFilterGainR = TWO_OVER_PI *
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_180) * (-bearingRelativeAngleToSource + PI_OVER_TWO) + FILTER_GAIN_AT_90;
} else if (-bearingRelativeAngleToSource <= PI && -bearingRelativeAngleToSource > PI_OVER_TWO) {
penumbraFilterGainL = TWO_OVER_PI *
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
penumbraFilterGainR = TWO_OVER_PI *
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_180 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI) + FILTER_GAIN_AT_180;
} else if (-bearingRelativeAngleToSource <= PI_OVER_TWO && -bearingRelativeAngleToSource > 0) {
penumbraFilterGainL = TWO_OVER_PI *
(FILTER_GAIN_AT_90 - FILTER_GAIN_AT_0) * (-bearingRelativeAngleToSource - PI_OVER_TWO) + FILTER_GAIN_AT_90;
penumbraFilterGainR = FILTER_GAIN_AT_0;
penumbraFilterGainR = FILTER_GAIN_AT_0;
} else {
penumbraFilterGainL = FILTER_GAIN_AT_0;
penumbraFilterGainR = TWO_OVER_PI *
penumbraFilterGainR = TWO_OVER_PI *
(FILTER_GAIN_AT_0 - FILTER_GAIN_AT_90) * (-bearingRelativeAngleToSource) + FILTER_GAIN_AT_0;
}
if (distanceBetween < RADIUS_OF_HEAD) {
// Diminish effect if source would be inside head
penumbraFilterGainL += (1.0f - penumbraFilterGainL) * (1.0f - distanceBetween / RADIUS_OF_HEAD);
@ -405,16 +405,16 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
<< "gainR=" << penumbraFilterGainR
<< "angle=" << -bearingRelativeAngleToSource;
}
// Get our per listener/source data so we can get our filter
AudioFilterHSF1s& penumbraFilter = listenerNodeData->getListenerSourcePairData(streamUUID)->getPenumbraFilter();
// set the gain on both filter channels
penumbraFilter.setParameters(0, 0, AudioConstants::SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainL, penumbraFilterSlope);
penumbraFilter.setParameters(0, 1, AudioConstants::SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainR, penumbraFilterSlope);
penumbraFilter.render(_preMixSamples, _preMixSamples, AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / 2);
}
// Actually mix the _preMixSamples into the _mixSamples here.
for (int s = 0; s < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; s++) {
_mixSamples[s] = glm::clamp(_mixSamples[s] + _preMixSamples[s], AudioConstants::MIN_SAMPLE_VALUE,
@ -427,30 +427,30 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
int AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// zero out the client mix for this node
memset(_preMixSamples, 0, sizeof(_preMixSamples));
memset(_mixSamples, 0, sizeof(_mixSamples));
// loop through all other nodes that have sufficient audio to mix
int streamsMixed = 0;
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
QUuid streamUUID = i.key();
if (otherNodeStream->getType() == PositionalAudioStream::Microphone) {
streamUUID = otherNode->getUUID();
}
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
streamsMixed += addStreamToMixForListeningNodeWithStream(listenerNodeData, streamUUID,
otherNodeStream, nodeAudioStream);
@ -458,13 +458,13 @@ int AudioMixer::prepareMixForListeningNode(Node* node) {
}
}
});
return streamsMixed;
}
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
static char clientEnvBuffer[MAX_PACKET_SIZE];
// Send stream properties
bool hasReverb = false;
float reverbTime, wetLevel;
@ -477,7 +477,7 @@ void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
hasReverb = true;
reverbTime = _zoneReverbSettings[i].reverbTime;
wetLevel = _zoneReverbSettings[i].wetLevel;
// Modulate wet level with distance to wall
float MIN_ATTENUATION_DISTANCE = 2.0f;
float MAX_ATTENUATION = -12; // dB
@ -502,24 +502,24 @@ void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
stream->clearReverb();
}
}
// Send at change or every so often
float CHANCE_OF_SEND = 0.01f;
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
if (sendData) {
auto nodeList = DependencyManager::get<NodeList>();
int numBytesEnvPacketHeader = nodeList->populatePacketHeader(clientEnvBuffer, PacketTypeAudioEnvironment);
char* envDataAt = clientEnvBuffer + numBytesEnvPacketHeader;
unsigned char bitset = 0;
if (hasReverb) {
setAtBit(bitset, HAS_REVERB_BIT);
}
memcpy(envDataAt, &bitset, sizeof(unsigned char));
envDataAt += sizeof(unsigned char);
if (hasReverb) {
memcpy(envDataAt, &reverbTime, sizeof(float));
envDataAt += sizeof(float);
@ -532,7 +532,7 @@ void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr) {
auto nodeList = DependencyManager::get<NodeList>();
if (nodeList->packetVersionAndHashMatch(receivedPacket)) {
// pull any new audio data from nodes off of the network stack
PacketType mixerPacketType = packetTypeForPacket(receivedPacket);
@ -541,14 +541,14 @@ void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const Hif
|| mixerPacketType == PacketTypeInjectAudio
|| mixerPacketType == PacketTypeSilentAudioFrame
|| mixerPacketType == PacketTypeAudioStreamStats) {
nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket);
} else if (mixerPacketType == PacketTypeMuteEnvironment) {
SharedNodePointer sendingNode = nodeList->sendingNodeForPacket(receivedPacket);
if (sendingNode->getCanAdjustLocks()) {
QByteArray packet = receivedPacket;
nodeList->populatePacketHeader(packet, PacketTypeMuteEnvironment);
nodeList->eachNode([&](const SharedNodePointer& node){
if (node->getType() == NodeType::Agent && node->getActiveSocket() &&
node->getLinkedData() && node != sendingNode) {
@ -560,18 +560,18 @@ void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const Hif
// let processNodeData handle it.
nodeList->processNodeData(senderSockAddr, receivedPacket);
}
}
}
}
void AudioMixer::sendStatsPacket() {
static QJsonObject statsObject;
statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
statsObject["average_listeners_per_frame"] = (float) _sumListeners / (float) _numStatFrames;
if (_sumListeners > 0) {
statsObject["average_mixes_per_listener"] = (float) _sumMixes / (float) _sumListeners;
} else {
@ -581,65 +581,65 @@ void AudioMixer::sendStatsPacket() {
_sumListeners = 0;
_sumMixes = 0;
_numStatFrames = 0;
QJsonObject readPendingDatagramStats;
QJsonObject rpdCallsStats;
rpdCallsStats["calls_per_sec_avg_30s"] = _readPendingCallsPerSecondStats.getWindowAverage();
rpdCallsStats["calls_last_sec"] = _readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5;
readPendingDatagramStats["calls"] = rpdCallsStats;
QJsonObject packetsPerCallStats;
packetsPerCallStats["avg_30s"] = _datagramsReadPerCallStats.getWindowAverage();
packetsPerCallStats["avg_1s"] = _datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage();
readPendingDatagramStats["packets_per_call"] = packetsPerCallStats;
QJsonObject packetsTimePerCallStats;
packetsTimePerCallStats["usecs_per_call_avg_30s"] = _timeSpentPerCallStats.getWindowAverage();
packetsTimePerCallStats["usecs_per_call_avg_1s"] = _timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage();
packetsTimePerCallStats["prct_time_in_call_30s"] =
packetsTimePerCallStats["prct_time_in_call_30s"] =
_timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND) * 100.0;
packetsTimePerCallStats["prct_time_in_call_1s"] =
packetsTimePerCallStats["prct_time_in_call_1s"] =
_timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
readPendingDatagramStats["packets_time_per_call"] = packetsTimePerCallStats;
QJsonObject hashMatchTimePerCallStats;
hashMatchTimePerCallStats["usecs_per_hashmatch_avg_30s"] = _timeSpentPerHashMatchCallStats.getWindowAverage();
hashMatchTimePerCallStats["usecs_per_hashmatch_avg_1s"]
hashMatchTimePerCallStats["usecs_per_hashmatch_avg_1s"]
= _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage();
hashMatchTimePerCallStats["prct_time_in_hashmatch_30s"]
hashMatchTimePerCallStats["prct_time_in_hashmatch_30s"]
= _timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0;
hashMatchTimePerCallStats["prct_time_in_hashmatch_1s"]
= _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
hashMatchTimePerCallStats["prct_time_in_hashmatch_1s"]
= _timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0;
readPendingDatagramStats["hashmatch_time_per_call"] = hashMatchTimePerCallStats;
statsObject["read_pending_datagrams"] = readPendingDatagramStats;
// add stats for each listerner
auto nodeList = DependencyManager::get<NodeList>();
QJsonObject listenerStats;
nodeList->eachNode([&](const SharedNodePointer& node) {
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
if (clientData) {
QJsonObject nodeStats;
QString uuidString = uuidStringWithoutCurlyBraces(node->getUUID());
nodeStats["outbound_kbps"] = node->getOutboundBandwidth();
nodeStats[USERNAME_UUID_REPLACEMENT_STATS_KEY] = uuidString;
nodeStats["jitter"] = clientData->getAudioStreamStats();
listenerStats[uuidString] = nodeStats;
}
});
// add the listeners object to the root object
statsObject["listeners"] = listenerStats;
// send off the stats packets
ThreadedAssignment::addPacketStatsAndSendStatsPacket(statsObject);
}
@ -649,125 +649,125 @@ void AudioMixer::run() {
ThreadedAssignment::commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);
auto nodeList = DependencyManager::get<NodeList>();
// we do not want this event loop to be the handler for UDP datagrams, so disconnect
disconnect(&nodeList->getNodeSocket(), 0, this, 0);
// setup a QThread with us as parent that will house the AudioMixerDatagramProcessor
_datagramProcessingThread = new QThread(this);
_datagramProcessingThread->setObjectName("Datagram Processor Thread");
// create an AudioMixerDatagramProcessor and move it to that thread
AudioMixerDatagramProcessor* datagramProcessor = new AudioMixerDatagramProcessor(nodeList->getNodeSocket(), thread());
datagramProcessor->moveToThread(_datagramProcessingThread);
// remove the NodeList as the parent of the node socket
nodeList->getNodeSocket().setParent(NULL);
nodeList->getNodeSocket().moveToThread(_datagramProcessingThread);
// let the datagram processor handle readyRead from node socket
connect(&nodeList->getNodeSocket(), &QUdpSocket::readyRead,
datagramProcessor, &AudioMixerDatagramProcessor::readPendingDatagrams);
// connect to the datagram processing thread signal that tells us we have to handle a packet
connect(datagramProcessor, &AudioMixerDatagramProcessor::packetRequiresProcessing, this, &AudioMixer::readPendingDatagram);
// delete the datagram processor and the associated thread when the QThread quits
connect(_datagramProcessingThread, &QThread::finished, datagramProcessor, &QObject::deleteLater);
connect(datagramProcessor, &QObject::destroyed, _datagramProcessingThread, &QThread::deleteLater);
// start the datagram processing thread
_datagramProcessingThread->start();
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
nodeList->linkedDataCreateCallback = [](Node* node) {
node->setLinkedData(new AudioMixerClientData());
};
// wait until we have the domain-server settings, otherwise we bail
DomainHandler& domainHandler = nodeList->getDomainHandler();
qDebug() << "Waiting for domain settings from domain-server.";
// block until we get the settingsRequestComplete signal
QEventLoop loop;
connect(&domainHandler, &DomainHandler::settingsReceived, &loop, &QEventLoop::quit);
connect(&domainHandler, &DomainHandler::settingsReceiveFail, &loop, &QEventLoop::quit);
domainHandler.requestDomainSettings();
loop.exec();
if (domainHandler.getSettingsObject().isEmpty()) {
qDebug() << "Failed to retreive settings object from domain-server. Bailing on assignment.";
setFinished(true);
return;
}
const QJsonObject& settingsObject = domainHandler.getSettingsObject();
// check the settings object to see if we have anything we can parse out
parseSettingsObject(settingsObject);
int nextFrame = 0;
QElapsedTimer timer;
timer.start();
char clientMixBuffer[MAX_PACKET_SIZE];
int usecToSleep = AudioConstants::NETWORK_FRAME_USECS;
const int TRAILING_AVERAGE_FRAMES = 100;
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
while (!_isFinished) {
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
const float RATIO_BACK_OFF = 0.02f;
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
if (usecToSleep < 0) {
usecToSleep = 0;
}
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio)
+ (usecToSleep * CURRENT_FRAME_RATIO / (float) AudioConstants::NETWORK_FRAME_USECS);
float lastCutoffRatio = _performanceThrottlingRatio;
bool hasRatioChanged = false;
if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
// we're struggling - change our min required loudness to reduce some load
_performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));
qDebug() << "Mixer is struggling, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
<< lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
hasRatioChanged = true;
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
// we've recovered and can back off the required loudness
_performanceThrottlingRatio = _performanceThrottlingRatio - RATIO_BACK_OFF;
if (_performanceThrottlingRatio < 0) {
_performanceThrottlingRatio = 0;
}
qDebug() << "Mixer is recovering, sleeping" << _trailingSleepRatio * 100 << "% of frame time. Old cutoff was"
<< lastCutoffRatio << "and is now" << _performanceThrottlingRatio;
hasRatioChanged = true;
}
if (hasRatioChanged) {
// set out min audability threshold from the new ratio
_minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
qDebug() << "Minimum audability required to be mixed is now" << _minAudibilityThreshold;
framesSinceCutoffEvent = 0;
}
}
if (!hasRatioChanged) {
++framesSinceCutoffEvent;
}
@ -777,9 +777,9 @@ void AudioMixer::run() {
perSecondActions();
_lastPerSecondCallbackTime = now;
}
nodeList->eachNode([&](const SharedNodePointer& node) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
@ -787,14 +787,14 @@ void AudioMixer::run() {
// a pointer to the popped data is stored as a member in InboundAudioStream.
// That's how the popped audio data will be read for mixing (but only if the pop was successful)
nodeData->checkBuffersBeforeFrameSend();
// if the stream should be muted, send mute packet
if (nodeData->getAvatarAudioStream()
&& shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())) {
QByteArray packet = nodeList->byteArrayWithPopulatedHeader(PacketTypeNoisyMute);
nodeList->writeDatagram(packet, node);
}
if (node->getType() == NodeType::Agent && node->getActiveSocket()
&& nodeData->getAvatarAudioStream()) {
@ -810,7 +810,7 @@ void AudioMixer::run() {
quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(mixDataAt, &sequence, sizeof(quint16));
mixDataAt += sizeof(quint16);
// pack mixed audio samples
memcpy(mixDataAt, _mixSamples, AudioConstants::NETWORK_FRAME_BYTES_STEREO);
mixDataAt += AudioConstants::NETWORK_FRAME_BYTES_STEREO;
@ -829,7 +829,7 @@ void AudioMixer::run() {
memcpy(mixDataAt, &numSilentSamples, sizeof(quint16));
mixDataAt += sizeof(quint16);
}
// Send audio environment
sendAudioEnvironmentPacket(node);
@ -847,9 +847,9 @@ void AudioMixer::run() {
}
}
});
++_numStatFrames;
QCoreApplication::processEvents();
if (_isFinished) {
@ -925,7 +925,7 @@ void AudioMixer::perSecondActions() {
void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
if (settingsObject.contains(AUDIO_BUFFER_GROUP_KEY)) {
QJsonObject audioBufferGroupObject = settingsObject[AUDIO_BUFFER_GROUP_KEY].toObject();
// check the payload to see if we have asked for dynamicJitterBuffer support
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "dynamic_jitter_buffer";
_streamSettings._dynamicJitterBuffers = audioBufferGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
@ -934,7 +934,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
} else {
qDebug() << "Dynamic jitter buffers disabled.";
}
bool ok;
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "static_desired_jitter_buffer_frames";
_streamSettings._staticDesiredJitterBufferFrames = audioBufferGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
@ -942,14 +942,14 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
_streamSettings._staticDesiredJitterBufferFrames = DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES;
}
qDebug() << "Static desired jitter buffer frames:" << _streamSettings._staticDesiredJitterBufferFrames;
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "max_frames_over_desired";
_streamSettings._maxFramesOverDesired = audioBufferGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
}
qDebug() << "Max frames over desired:" << _streamSettings._maxFramesOverDesired;
const QString USE_STDEV_FOR_DESIRED_CALC_JSON_KEY = "use_stdev_for_desired_calc";
_streamSettings._useStDevForJitterCalc = audioBufferGroupObject[USE_STDEV_FOR_DESIRED_CALC_JSON_KEY].toBool();
if (_streamSettings._useStDevForJitterCalc) {
@ -957,28 +957,28 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
} else {
qDebug() << "Using Fred's max-gap method for jitter calc if dynamic jitter buffers enabled";
}
const QString WINDOW_STARVE_THRESHOLD_JSON_KEY = "window_starve_threshold";
_streamSettings._windowStarveThreshold = audioBufferGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowStarveThreshold = DEFAULT_WINDOW_STARVE_THRESHOLD;
}
qDebug() << "Window A starve threshold:" << _streamSettings._windowStarveThreshold;
const QString WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY = "window_seconds_for_desired_calc_on_too_many_starves";
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
}
qDebug() << "Window A length:" << _streamSettings._windowSecondsForDesiredCalcOnTooManyStarves << "seconds";
const QString WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY = "window_seconds_for_desired_reduction";
_streamSettings._windowSecondsForDesiredReduction = audioBufferGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredReduction = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
}
qDebug() << "Window B length:" << _streamSettings._windowSecondsForDesiredReduction << "seconds";
const QString REPETITION_WITH_FADE_JSON_KEY = "repetition_with_fade";
_streamSettings._repetitionWithFade = audioBufferGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
if (_streamSettings._repetitionWithFade) {
@ -986,17 +986,17 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
} else {
qDebug() << "Repetition with fade disabled";
}
const QString PRINT_STREAM_STATS_JSON_KEY = "print_stream_stats";
_printStreamStats = audioBufferGroupObject[PRINT_STREAM_STATS_JSON_KEY].toBool();
if (_printStreamStats) {
qDebug() << "Stream stats will be printed to stdout";
}
}
if (settingsObject.contains(AUDIO_ENV_GROUP_KEY)) {
QJsonObject audioEnvGroupObject = settingsObject[AUDIO_ENV_GROUP_KEY].toObject();
const QString ATTENATION_PER_DOULING_IN_DISTANCE = "attenuation_per_doubling_in_distance";
if (audioEnvGroupObject[ATTENATION_PER_DOULING_IN_DISTANCE].isString()) {
bool ok = false;
@ -1006,7 +1006,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
qDebug() << "Attenuation per doubling in distance changed to" << _attenuationPerDoublingInDistance;
}
}
const QString NOISE_MUTING_THRESHOLD = "noise_muting_threshold";
if (audioEnvGroupObject[NOISE_MUTING_THRESHOLD].isString()) {
bool ok = false;
@ -1024,22 +1024,22 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
if (_enableFilter) {
qDebug() << "Filter enabled";
}
const QString AUDIO_ZONES = "zones";
if (audioEnvGroupObject[AUDIO_ZONES].isObject()) {
const QJsonObject& zones = audioEnvGroupObject[AUDIO_ZONES].toObject();
const QString X_RANGE = "x_range";
const QString Y_RANGE = "y_range";
const QString Z_RANGE = "z_range";
foreach (const QString& zone, zones.keys()) {
QJsonObject zoneObject = zones[zone].toObject();
if (zoneObject.contains(X_RANGE) && zoneObject.contains(Y_RANGE) && zoneObject.contains(Z_RANGE)) {
QStringList xRange = zoneObject.value(X_RANGE).toString().split("-", QString::SkipEmptyParts);
QStringList yRange = zoneObject.value(Y_RANGE).toString().split("-", QString::SkipEmptyParts);
QStringList zRange = zoneObject.value(Z_RANGE).toString().split("-", QString::SkipEmptyParts);
if (xRange.size() == 2 && yRange.size() == 2 && zRange.size() == 2) {
float xMin, xMax, yMin, yMax, zMin, zMax;
bool ok, allOk = true;
@ -1055,7 +1055,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
allOk &= ok;
zMax = zRange[1].toFloat(&ok);
allOk &= ok;
if (allOk) {
glm::vec3 corner(xMin, yMin, zMin);
glm::vec3 dimensions(xMax - xMin, yMax - yMin, zMax - zMin);
@ -1068,63 +1068,63 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
}
}
}
const QString ATTENUATION_COEFFICIENTS = "attenuation_coefficients";
if (audioEnvGroupObject[ATTENUATION_COEFFICIENTS].isArray()) {
const QJsonArray& coefficients = audioEnvGroupObject[ATTENUATION_COEFFICIENTS].toArray();
const QString SOURCE = "source";
const QString LISTENER = "listener";
const QString COEFFICIENT = "coefficient";
for (int i = 0; i < coefficients.count(); ++i) {
QJsonObject coefficientObject = coefficients[i].toObject();
if (coefficientObject.contains(SOURCE) &&
coefficientObject.contains(LISTENER) &&
coefficientObject.contains(COEFFICIENT)) {
ZonesSettings settings;
bool ok;
settings.source = coefficientObject.value(SOURCE).toString();
settings.listener = coefficientObject.value(LISTENER).toString();
settings.coefficient = coefficientObject.value(COEFFICIENT).toString().toFloat(&ok);
if (ok && settings.coefficient >= 0.0f && settings.coefficient <= 1.0f &&
_audioZones.contains(settings.source) && _audioZones.contains(settings.listener)) {
_zonesSettings.push_back(settings);
qDebug() << "Added Coefficient:" << settings.source << settings.listener << settings.coefficient;
}
}
}
}
const QString REVERB = "reverb";
if (audioEnvGroupObject[REVERB].isArray()) {
const QJsonArray& reverb = audioEnvGroupObject[REVERB].toArray();
const QString ZONE = "zone";
const QString REVERB_TIME = "reverb_time";
const QString WET_LEVEL = "wet_level";
for (int i = 0; i < reverb.count(); ++i) {
QJsonObject reverbObject = reverb[i].toObject();
if (reverbObject.contains(ZONE) &&
reverbObject.contains(REVERB_TIME) &&
reverbObject.contains(WET_LEVEL)) {
bool okReverbTime, okWetLevel;
QString zone = reverbObject.value(ZONE).toString();
float reverbTime = reverbObject.value(REVERB_TIME).toString().toFloat(&okReverbTime);
float wetLevel = reverbObject.value(WET_LEVEL).toString().toFloat(&okWetLevel);
if (okReverbTime && okWetLevel && _audioZones.contains(zone)) {
ReverbSettings settings;
settings.zone = zone;
settings.reverbTime = reverbTime;
settings.wetLevel = wetLevel;
_zoneReverbSettings.push_back(settings);
qDebug() << "Added Reverb:" << zone << reverbTime << wetLevel;
}

View file

@ -29,44 +29,46 @@ class AudioMixer : public ThreadedAssignment {
Q_OBJECT
public:
AudioMixer(const QByteArray& packet);
void deleteLater() { qDebug() << "DELETE LATER CALLED?"; QObject::deleteLater(); }
public slots:
/// threaded run of assignment
void run();
void readPendingDatagrams() { }; // this will not be called since our datagram processing thread will handle
void readPendingDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr);
void sendStatsPacket();
static const InboundAudioStream::Settings& getStreamSettings() { return _streamSettings; }
private:
/// adds one stream to the mix for a listening node
int addStreamToMixForListeningNodeWithStream(AudioMixerClientData* listenerNodeData,
const QUuid& streamUUID,
PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream);
/// prepares and sends a mix to one Node
int prepareMixForListeningNode(Node* node);
/// Send Audio Environment packet for a single node
void sendAudioEnvironmentPacket(SharedNodePointer node);
// used on a per stream basis to run the filter on before mixing, large enough to handle the historical
// data from a phase delay as well as an entire network buffer
int16_t _preMixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
// client samples capacity is larger than what will be sent to optimize mixing
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
int16_t _mixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
void perSecondActions();
bool shouldMute(float quietestFrame);
void parseSettingsObject(const QJsonObject& settingsObject);
float _trailingSleepRatio;
float _minAudibilityThreshold;
float _performanceThrottlingRatio;
@ -75,7 +77,7 @@ private:
int _numStatFrames;
int _sumListeners;
int _sumMixes;
QHash<QString, AABox> _audioZones;
struct ZonesSettings {
QString source;
@ -89,12 +91,12 @@ private:
float wetLevel;
};
QVector<ReverbSettings> _zoneReverbSettings;
static InboundAudioStream::Settings _streamSettings;
static bool _printStreamStats;
static bool _enableFilter;
quint64 _lastPerSecondCallbackTime;
bool _sendAudioStreamStats;

View file

@ -254,6 +254,7 @@ QJsonObject DomainServerSettingsManager::responseObjectForType(const QString& ty
void DomainServerSettingsManager::updateSetting(const QString& key, const QJsonValue& newValue, QVariantMap& settingMap,
const QJsonObject& settingDescription) {
if (newValue.isString()) {
if (newValue.toString().isEmpty()) {
// this is an empty value, clear it in settings variant so the default is sent
@ -288,7 +289,16 @@ void DomainServerSettingsManager::updateSetting(const QString& key, const QJsonV
settingMap[key] = QVariantMap();
}
QVariantMap& thisMap = *reinterpret_cast<QVariantMap*>(settingMap[key].data());
QVariant& possibleMap = settingMap[key];
if (!possibleMap.canConvert(QMetaType::QVariantMap)) {
// if this isn't a map then we need to make it one, otherwise we're about to crash
qDebug() << "Value at" << key << "was not the expected QVariantMap while updating DS settings"
<< "- removing existing value and making it a QVariantMap";
possibleMap = QVariantMap();
}
QVariantMap& thisMap = *reinterpret_cast<QVariantMap*>(possibleMap.data());
foreach(const QString childKey, newValue.toObject().keys()) {
QJsonObject childDescriptionObject = settingDescription;
@ -351,7 +361,7 @@ void DomainServerSettingsManager::recurseJSONObjectAndOverwriteSettings(const QJ
settingsVariant[rootKey] = QVariantMap();
}
QVariantMap& thisMap = settingsVariant;
QVariantMap* thisMap = &settingsVariant;
QJsonObject groupDescriptionObject;
@ -362,7 +372,7 @@ void DomainServerSettingsManager::recurseJSONObjectAndOverwriteSettings(const QJ
groupDescriptionObject = groupValue.toObject();
// change the map we will update to be the map for this group
thisMap = *reinterpret_cast<QVariantMap*>(settingsVariant[rootKey].data());
thisMap = reinterpret_cast<QVariantMap*>(settingsVariant[rootKey].data());
break;
}
@ -388,7 +398,7 @@ void DomainServerSettingsManager::recurseJSONObjectAndOverwriteSettings(const QJ
}
if (!matchingDescriptionObject.isEmpty()) {
updateSetting(rootKey, rootValue, thisMap, matchingDescriptionObject);
updateSetting(rootKey, rootValue, *thisMap, matchingDescriptionObject);
} else {
qDebug() << "Setting for root key" << rootKey << "does not exist - cannot update setting.";
}
@ -401,7 +411,7 @@ void DomainServerSettingsManager::recurseJSONObjectAndOverwriteSettings(const QJ
// if we matched the setting then update the value
if (!matchingDescriptionObject.isEmpty()) {
QJsonValue settingValue = rootValue.toObject()[settingKey];
updateSetting(settingKey, settingValue, thisMap, matchingDescriptionObject);
updateSetting(settingKey, settingValue, *thisMap, matchingDescriptionObject);
} else {
qDebug() << "Could not find description for setting" << settingKey << "in group" << rootKey <<
"- cannot update setting.";

View file

@ -603,9 +603,11 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
auto faceshiftTracker = DependencyManager::get<Faceshift>();
faceshiftTracker->init();
connect(faceshiftTracker.data(), &FaceTracker::muteToggled, this, &Application::faceTrackerMuteToggled);
#ifdef HAVE_DDE
auto ddeTracker = DependencyManager::get<DdeFaceTracker>();
ddeTracker->init();
connect(ddeTracker.data(), &FaceTracker::muteToggled, this, &Application::faceTrackerMuteToggled);
#endif
}

View file

@ -23,7 +23,7 @@ ThreadedAssignment::ThreadedAssignment(const QByteArray& packet) :
_isFinished(false),
_datagramProcessingThread(NULL)
{
}
void ThreadedAssignment::setFinished(bool isFinished) {
@ -41,7 +41,7 @@ void ThreadedAssignment::setFinished(bool isFinished) {
if (_statsTimer) {
_statsTimer->stop();
}
// stop processing datagrams from the node socket
// this ensures we won't process a domain list while we are going down
auto nodeList = DependencyManager::get<NodeList>();
@ -52,20 +52,21 @@ void ThreadedAssignment::setFinished(bool isFinished) {
// if we have a datagram processing thread, quit it and wait on it to make sure that
// the node socket is back on the same thread as the NodeList
if (_datagramProcessingThread) {
// tell the datagram processing thread to quit and wait until it is done,
// tell the datagram processing thread to quit and wait until it is done,
// then return the node socket to the NodeList
_datagramProcessingThread->quit();
_datagramProcessingThread->wait();
// set node socket parent back to NodeList
nodeList->getNodeSocket().setParent(nodeList.data());
}
// move the NodeList back to the QCoreApplication instance's thread
nodeList->moveToThread(QCoreApplication::instance()->thread());
emit finished();
}
}
@ -74,17 +75,17 @@ void ThreadedAssignment::setFinished(bool isFinished) {
void ThreadedAssignment::commonInit(const QString& targetName, NodeType_t nodeType, bool shouldSendStats) {
// change the logging target name while the assignment is running
LogHandler::getInstance().setTargetName(targetName);
auto nodeList = DependencyManager::get<NodeList>();
nodeList->setOwnerType(nodeType);
// this is a temp fix for Qt 5.3 - rebinding the node socket gives us readyRead for the socket on this thread
nodeList->rebindNodeSocket();
_domainServerTimer = new QTimer();
connect(_domainServerTimer, SIGNAL(timeout()), this, SLOT(checkInWithDomainServerOrExit()));
_domainServerTimer->start(DOMAIN_SERVER_CHECK_IN_MSECS);
if (shouldSendStats) {
// send a stats packet every 1 second
_statsTimer = new QTimer();
@ -95,15 +96,15 @@ void ThreadedAssignment::commonInit(const QString& targetName, NodeType_t nodeTy
void ThreadedAssignment::addPacketStatsAndSendStatsPacket(QJsonObject &statsObject) {
auto nodeList = DependencyManager::get<NodeList>();
float packetsPerSecond, bytesPerSecond;
// XXX can BandwidthRecorder be used for this?
nodeList->getPacketStats(packetsPerSecond, bytesPerSecond);
nodeList->resetPacketStats();
statsObject["packets_per_second"] = packetsPerSecond;
statsObject["bytes_per_second"] = bytesPerSecond;
nodeList->sendStatsToDomainServer(statsObject);
}
@ -122,7 +123,7 @@ void ThreadedAssignment::checkInWithDomainServerOrExit() {
bool ThreadedAssignment::readAvailableDatagram(QByteArray& destinationByteArray, HifiSockAddr& senderSockAddr) {
auto nodeList = DependencyManager::get<NodeList>();
if (nodeList->getNodeSocket().hasPendingDatagrams()) {
destinationByteArray.resize(nodeList->getNodeSocket().pendingDatagramSize());
nodeList->getNodeSocket().readDatagram(destinationByteArray.data(), destinationByteArray.size(),

View file

@ -20,8 +20,8 @@ class ThreadedAssignment : public Assignment {
Q_OBJECT
public:
ThreadedAssignment(const QByteArray& packet);
~ThreadedAssignment() { stop(); }
~ThreadedAssignment() { stop(); }
void setFinished(bool isFinished);
virtual void aboutToFinish() { };
void addPacketStatsAndSendStatsPacket(QJsonObject& statsObject);
@ -35,7 +35,7 @@ public slots:
signals:
void finished();
protected:
bool readAvailableDatagram(QByteArray& destinationByteArray, HifiSockAddr& senderSockAddr);
void commonInit(const QString& targetName, NodeType_t nodeType, bool shouldSendStats = true);
@ -43,7 +43,7 @@ protected:
QThread* _datagramProcessingThread;
QTimer* _domainServerTimer = nullptr;
QTimer* _statsTimer = nullptr;
private slots:
void checkInWithDomainServerOrExit();

View file

@ -15,6 +15,36 @@
#include "BulletUtil.h"
// find the average point on a convex shape
glm::vec3 findCenter(const QVector<glm::vec3>& points) {
glm::vec3 result = glm::vec3(0);
for (int i = 0; i < points.size(); i++) {
result += points[i];
}
return result * (1.0f / points.size());
}
// bullet puts "margins" around all the collision shapes. This can cause shapes will hulls
// to float a bit above what they are sitting on, etc. One option is to call:
//
// compound->setMargin(0.01);
//
// to reduce the size of the margin, but this has some consequences for the
// performance and stability of the simulation. Instead, we clench in all the points of
// the hull by the margin. These clenched points + bullets margin will but the actual
// collision hull fairly close to the visual edge of the object.
QVector<glm::vec3> shrinkByMargin(const QVector<glm::vec3>& points, const glm::vec3 center, float margin) {
QVector<glm::vec3> result(points.size());
for (int i = 0; i < points.size(); i++) {
glm::vec3 pVec = points[ i ] - center;
glm::vec3 pVecNorm = glm::normalize(pVec);
result[ i ] = center + pVec - (pVecNorm * margin);
}
return result;
}
btCollisionShape* ShapeInfoUtil::createShapeFromInfo(const ShapeInfo& info) {
btCollisionShape* shape = NULL;
switch(info.getType()) {
@ -40,7 +70,9 @@ btCollisionShape* ShapeInfoUtil::createShapeFromInfo(const ShapeInfo& info) {
if (numSubShapes == 1) {
auto hull = new btConvexHullShape();
const QVector<QVector<glm::vec3>>& points = info.getPoints();
foreach (glm::vec3 point, points[0]) {
glm::vec3 center = findCenter(points[0]);
QVector<glm::vec3> shrunken = shrinkByMargin(points[0], center, hull->getMargin());
foreach (glm::vec3 point, shrunken) {
btVector3 btPoint(point[0], point[1], point[2]);
hull->addPoint(btPoint, false);
}
@ -53,7 +85,9 @@ btCollisionShape* ShapeInfoUtil::createShapeFromInfo(const ShapeInfo& info) {
trans.setIdentity();
foreach (QVector<glm::vec3> hullPoints, points) {
auto hull = new btConvexHullShape();
foreach (glm::vec3 point, hullPoints) {
glm::vec3 center = findCenter(points[0]);
QVector<glm::vec3> shrunken = shrinkByMargin(hullPoints, center, hull->getMargin());
foreach (glm::vec3 point, shrunken) {
btVector3 btPoint(point[0], point[1], point[2]);
hull->addPoint(btPoint, false);
}