Merge branch 'master' of https://github.com/highfidelity/hifi into virtualEntities

Conflicts:
	assignment-client/src/Agent.cpp
	examples/editModels.js
This commit is contained in:
ZappoMan 2014-08-28 21:19:56 -07:00
commit 2bcad6ab2b
139 changed files with 9494 additions and 2581 deletions
BUILD.md
assignment-client/src
domain-server/resources/web/settings
examples
interface
libraries

View file

@ -96,7 +96,9 @@ Currently building on Windows has been tested using the following compilers:
#####Windows SDK 7.1
Whichever version of Visual Studio you use, first install [Microsoft Windows SDK for Windows 7 and .NET Framework 4](http://www.microsoft.com/en-us/download/details.aspx?id=8279).
Whichever version of Visual Studio you use, you will need [Microsoft Windows SDK for Windows 7 and .NET Framework 4](http://www.microsoft.com/en-us/download/details.aspx?id=8279).
NOTE: If using Visual Studio C++ 2010 Express, you need to follow a specific install order. See below before installing the Windows SDK.
######Windows 8.1
You may have already downloaded the Windows 8 SDK (e.g. if you have previously installed Visual Studio 2013). If so, change CMAKE_PREFIX_PATH in %HIFI_DIR%\CMakeLists.txt to point to the Windows 8 SDK binaries. The default path is `C:\Program Files (x86)\Windows Kits\8.1\Lib\winv6.3\um\x86`
@ -109,6 +111,14 @@ The following patches/service packs are also required:
* [VS2010 SP1](http://www.microsoft.com/en-us/download/details.aspx?id=23691)
* [VS2010 SP1 Compiler Update](http://www.microsoft.com/en-us/download/details.aspx?id=4422)
IMPORTANT: Use the following install order:
Visual Studio C++ 2010 Express
Windows SDK 7.1
VS2010 SP1
VS2010 SP1 Compiler Update
If you get an error while installing the VS2010 SP1 Compiler update saying that you don't have the Windows SDK installed, then uninstall all of the above and start again in the correct order.
Some of the build instructions will ask you to start a Visual Studio Command Prompt. You should have a shortcut in your Start menu called "Open Visual Studio Command Prompt (2010)" which will do so.
#####Visual Studio 2013

View file

@ -33,12 +33,17 @@
#include "Agent.h"
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
Agent::Agent(const QByteArray& packet) :
ThreadedAssignment(packet),
_voxelEditSender(),
_particleEditSender(),
_entityEditSender(),
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 1, false, 1, 0, false),
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES,
InboundAudioStream::Settings(0, false, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, false,
DEFAULT_WINDOW_STARVE_THRESHOLD, DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES,
DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION, false)),
_avatarHashMap()
{
// be the parent of the script engine so it gets moved when we do
@ -148,7 +153,7 @@ void Agent::readPendingDatagrams() {
_voxelViewer.processDatagram(mutablePacket, sourceNode);
}
} else if (datagramPacketType == PacketTypeMixedAudio) {
} else if (datagramPacketType == PacketTypeMixedAudio || datagramPacketType == PacketTypeSilentAudioFrame) {
_receivedAudioStream.parseData(receivedPacket);

View file

@ -69,12 +69,12 @@ void attachNewNodeDataToNode(Node *newNode) {
}
}
bool AudioMixer::_useDynamicJitterBuffers = false;
int AudioMixer::_staticDesiredJitterBufferFrames = 0;
int AudioMixer::_maxFramesOverDesired = 0;
InboundAudioStream::Settings AudioMixer::_streamSettings;
bool AudioMixer::_printStreamStats = false;
bool AudioMixer::_enableFilter = false;
AudioMixer::AudioMixer(const QByteArray& packet) :
ThreadedAssignment(packet),
_trailingSleepRatio(1.0f),
@ -85,7 +85,12 @@ AudioMixer::AudioMixer(const QByteArray& packet) :
_sumMixes(0),
_sourceUnattenuatedZone(NULL),
_listenerUnattenuatedZone(NULL),
_lastSendAudioStreamStatsTime(usecTimestampNow())
_lastPerSecondCallbackTime(usecTimestampNow()),
_sendAudioStreamStats(false),
_datagramsReadPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_timeSpentPerHashMatchCallStats(0, READ_DATAGRAMS_STATS_WINDOW_SECONDS),
_readPendingCallsPerSecondStats(1, READ_DATAGRAMS_STATS_WINDOW_SECONDS)
{
}
@ -99,15 +104,44 @@ const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
const float ATTENUATION_AMOUNT_PER_DOUBLING_IN_DISTANCE = 0.18f;
const float ATTENUATION_EPSILON_DISTANCE = 0.1f;
void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
int AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream) {
// If repetition with fade is enabled:
// If streamToAdd could not provide a frame (it was starved), then we'll mix its previously-mixed frame
// This is preferable to not mixing it at all since that's equivalent to inserting silence.
// Basically, we'll repeat that last frame until it has a frame to mix. Depending on how many times
// we've repeated that frame in a row, we'll gradually fade that repeated frame into silence.
// This improves the perceived quality of the audio slightly.
float repeatedFrameFadeFactor = 1.0f;
if (!streamToAdd->lastPopSucceeded()) {
if (_streamSettings._repetitionWithFade && !streamToAdd->getLastPopOutput().isNull()) {
// reptition with fade is enabled, and we do have a valid previous frame to repeat.
// calculate its fade factor, which depends on how many times it's already been repeated.
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd->getConsecutiveNotMixedCount() - 1);
if (repeatedFrameFadeFactor == 0.0f) {
return 0;
}
} else {
return 0;
}
}
// at this point, we know streamToAdd's last pop output is valid
// if the frame we're about to mix is silent, bail
if (streamToAdd->getLastPopOutputLoudness() == 0.0f) {
return 0;
}
float bearingRelativeAngleToSource = 0.0f;
float attenuationCoefficient = 1.0f;
int numSamplesDelay = 0;
float weakChannelAmplitudeRatio = 1.0f;
bool shouldAttenuate = (streamToAdd != listeningNodeStream);
if (shouldAttenuate) {
// if the two stream pointers do not match then these are different streams
@ -122,7 +156,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
// according to mixer performance we have decided this does not get to be mixed in
// bail out
return;
return 0;
}
++_sumMixes;
@ -222,12 +256,13 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
int delayedChannelIndex = 0;
const int SINGLE_STEREO_OFFSET = 2;
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
// setup the int16_t variables for the two sample sets
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationCoefficient;
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationCoefficient;
correctStreamSample[0] = streamPopOutput[s / 2] * attenuationAndFade;
correctStreamSample[1] = streamPopOutput[(s / 2) + 1] * attenuationAndFade;
delayedChannelIndex = s + (numSamplesDelay * 2) + delayedChannelOffset;
@ -243,7 +278,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
if (numSamplesDelay > 0) {
// if there was a sample delay for this stream, we need to pull samples prior to the popped output
// to stick at the beginning
float attenuationAndWeakChannelRatio = attenuationCoefficient * weakChannelAmplitudeRatio;
float attenuationAndWeakChannelRatioAndFade = attenuationCoefficient * weakChannelAmplitudeRatio * repeatedFrameFadeFactor;
AudioRingBuffer::ConstIterator delayStreamPopOutput = streamPopOutput - numSamplesDelay;
// TODO: delayStreamPopOutput may be inside the last frame written if the ringbuffer is completely full
@ -251,7 +286,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
for (int i = 0; i < numSamplesDelay; i++) {
int parentIndex = i * 2;
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatio;
_clientSamples[parentIndex + delayedChannelOffset] += *delayStreamPopOutput * attenuationAndWeakChannelRatioAndFade;
++delayStreamPopOutput;
}
}
@ -262,41 +297,82 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
attenuationCoefficient = 1.0f;
}
float attenuationAndFade = attenuationCoefficient * repeatedFrameFadeFactor;
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationCoefficient),
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(streamPopOutput[s / stereoDivider] * attenuationAndFade),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
}
}
if (_enableFilter && shouldAttenuate) {
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream->getPosition();
if (relativePosition.z < 0) { // if the source is behind us
AudioFilterHSF1s& penumbraFilter = streamToAdd->getFilter();
// calculate penumbra angle
float headPenumbraAngle = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(relativePosition));
if (relativePosition.x < 0) {
headPenumbraAngle *= -1.0f; // [-pi/2,+pi/2]
}
const float SQUARE_ROOT_OF_TWO_OVER_TWO = 0.71f; // half power
const float ONE_OVER_TWO_PI = 1.0f / TWO_PI;
const float FILTER_CUTOFF_FREQUENCY_HZ = 4000.0f;
// calculate the updated gain, frequency and slope. this will be tuned over time.
const float penumbraFilterGainL = (-1.0f * ONE_OVER_TWO_PI * headPenumbraAngle) + SQUARE_ROOT_OF_TWO_OVER_TWO;
const float penumbraFilterGainR = (+1.0f * ONE_OVER_TWO_PI * headPenumbraAngle) + SQUARE_ROOT_OF_TWO_OVER_TWO;
const float penumbraFilterFrequency = FILTER_CUTOFF_FREQUENCY_HZ; // constant frequency
const float penumbraFilterSlope = SQUARE_ROOT_OF_TWO_OVER_TWO; // constant slope
qDebug() << "penumbra gainL="
<< penumbraFilterGainL
<< "penumbra gainR="
<< penumbraFilterGainR
<< "penumbraAngle="
<< headPenumbraAngle;
// set the gain on both filter channels
penumbraFilter.setParameters(0, 0, SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainL, penumbraFilterSlope);
penumbraFilter.setParameters(0, 1, SAMPLE_RATE, penumbraFilterFrequency, penumbraFilterGainR, penumbraFilterSlope);
penumbraFilter.render(_clientSamples, _clientSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / 2);
}
}
return 1;
}
void AudioMixer::prepareMixForListeningNode(Node* node) {
int AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
// zero out the client mix for this node
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
// loop through all other nodes that have sufficient audio to mix
int streamsMixed = 0;
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
if (otherNode->getLinkedData()) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
const QHash<QUuid, PositionalAudioStream*>& otherNodeAudioStreams = otherNodeClientData->getAudioStreams();
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = otherNodeAudioStreams.begin(); i != otherNodeAudioStreams.constEnd(); i++) {
for (i = otherNodeAudioStreams.constBegin(); i != otherNodeAudioStreams.constEnd(); i++) {
PositionalAudioStream* otherNodeStream = i.value();
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
&& otherNodeStream->lastPopSucceeded()
&& otherNodeStream->getLastPopOutputTrailingLoudness() > 0.0f) {
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
streamsMixed += addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
}
}
}
}
return streamsMixed;
}
void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr) {
@ -332,7 +408,7 @@ void AudioMixer::readPendingDatagram(const QByteArray& receivedPacket, const Hif
void AudioMixer::sendStatsPacket() {
static QJsonObject statsObject;
statsObject["useDynamicJitterBuffers"] = _useDynamicJitterBuffers;
statsObject["useDynamicJitterBuffers"] = _streamSettings._dynamicJitterBuffers;
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
@ -358,9 +434,42 @@ void AudioMixer::sendStatsPacket() {
int sizeOfStats = 0;
int TOO_BIG_FOR_MTU = 1200; // some extra space for JSONification
QString property = "readPendingDatagram_calls_stats";
QString value = getReadPendingDatagramsCallsPerSecondsStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
property = "readPendingDatagram_packets_per_call_stats";
value = getReadPendingDatagramsPacketsPerCallStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
property = "readPendingDatagram_packets_time_per_call_stats";
value = getReadPendingDatagramsTimeStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
property = "readPendingDatagram_hashmatch_time_per_call_stats";
value = getReadPendingDatagramsHashMatchTimeStatsString();
statsObject2[qPrintable(property)] = value;
somethingToSend = true;
sizeOfStats += property.size() + value.size();
NodeList* nodeList = NodeList::getInstance();
int clientNumber = 0;
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
// if we're too large, send the packet
if (sizeOfStats > TOO_BIG_FOR_MTU) {
nodeList->sendStatsToDomainServer(statsObject2);
sizeOfStats = 0;
statsObject2 = QJsonObject(); // clear it
somethingToSend = false;
}
clientNumber++;
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
if (clientData) {
@ -370,14 +479,6 @@ void AudioMixer::sendStatsPacket() {
somethingToSend = true;
sizeOfStats += property.size() + value.size();
}
// if we're too large, send the packet
if (sizeOfStats > TOO_BIG_FOR_MTU) {
nodeList->sendStatsToDomainServer(statsObject2);
sizeOfStats = 0;
statsObject2 = QJsonObject(); // clear it
somethingToSend = false;
}
}
if (somethingToSend) {
@ -448,41 +549,81 @@ void AudioMixer::run() {
if (settingsObject.contains(AUDIO_GROUP_KEY)) {
QJsonObject audioGroupObject = settingsObject[AUDIO_GROUP_KEY].toObject();
// check the payload to see if we have asked for dynamicJitterBuffer support
const QString DYNAMIC_JITTER_BUFFER_JSON_KEY = "A-dynamic-jitter-buffer";
bool shouldUseDynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (shouldUseDynamicJitterBuffers) {
_streamSettings._dynamicJitterBuffers = audioGroupObject[DYNAMIC_JITTER_BUFFER_JSON_KEY].toBool();
if (_streamSettings._dynamicJitterBuffers) {
qDebug() << "Enable dynamic jitter buffers.";
_useDynamicJitterBuffers = true;
} else {
qDebug() << "Dynamic jitter buffers disabled.";
_useDynamicJitterBuffers = false;
}
bool ok;
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-desired-jitter-buffer-frames";
_staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
const QString DESIRED_JITTER_BUFFER_FRAMES_KEY = "B-static-desired-jitter-buffer-frames";
_streamSettings._staticDesiredJitterBufferFrames = audioGroupObject[DESIRED_JITTER_BUFFER_FRAMES_KEY].toString().toInt(&ok);
if (!ok) {
_staticDesiredJitterBufferFrames = DEFAULT_DESIRED_JITTER_BUFFER_FRAMES;
_streamSettings._staticDesiredJitterBufferFrames = DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES;
}
qDebug() << "Static desired jitter buffer frames:" << _staticDesiredJitterBufferFrames;
qDebug() << "Static desired jitter buffer frames:" << _streamSettings._staticDesiredJitterBufferFrames;
const QString MAX_FRAMES_OVER_DESIRED_JSON_KEY = "C-max-frames-over-desired";
_maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
_streamSettings._maxFramesOverDesired = audioGroupObject[MAX_FRAMES_OVER_DESIRED_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
_streamSettings._maxFramesOverDesired = DEFAULT_MAX_FRAMES_OVER_DESIRED;
}
qDebug() << "Max frames over desired:" << _streamSettings._maxFramesOverDesired;
const QString USE_STDEV_FOR_DESIRED_CALC_JSON_KEY = "D-use-stdev-for-desired-calc";
_streamSettings._useStDevForJitterCalc = audioGroupObject[USE_STDEV_FOR_DESIRED_CALC_JSON_KEY].toBool();
if (_streamSettings._useStDevForJitterCalc) {
qDebug() << "Using Philip's stdev method for jitter calc if dynamic jitter buffers enabled";
} else {
qDebug() << "Using Fred's max-gap method for jitter calc if dynamic jitter buffers enabled";
}
qDebug() << "Max frames over desired:" << _maxFramesOverDesired;
const QString PRINT_STREAM_STATS_JSON_KEY = "H-print-stream-stats";
const QString WINDOW_STARVE_THRESHOLD_JSON_KEY = "E-window-starve-threshold";
_streamSettings._windowStarveThreshold = audioGroupObject[WINDOW_STARVE_THRESHOLD_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowStarveThreshold = DEFAULT_WINDOW_STARVE_THRESHOLD;
}
qDebug() << "Window A starve threshold:" << _streamSettings._windowStarveThreshold;
const QString WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY = "F-window-seconds-for-desired-calc-on-too-many-starves";
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = audioGroupObject[WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES;
}
qDebug() << "Window A length:" << _streamSettings._windowSecondsForDesiredCalcOnTooManyStarves << "seconds";
const QString WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY = "G-window-seconds-for-desired-reduction";
_streamSettings._windowSecondsForDesiredReduction = audioGroupObject[WINDOW_SECONDS_FOR_DESIRED_REDUCTION_JSON_KEY].toString().toInt(&ok);
if (!ok) {
_streamSettings._windowSecondsForDesiredReduction = DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION;
}
qDebug() << "Window B length:" << _streamSettings._windowSecondsForDesiredReduction << "seconds";
const QString REPETITION_WITH_FADE_JSON_KEY = "H-repetition-with-fade";
_streamSettings._repetitionWithFade = audioGroupObject[REPETITION_WITH_FADE_JSON_KEY].toBool();
if (_streamSettings._repetitionWithFade) {
qDebug() << "Repetition with fade enabled";
} else {
qDebug() << "Repetition with fade disabled";
}
const QString PRINT_STREAM_STATS_JSON_KEY = "I-print-stream-stats";
_printStreamStats = audioGroupObject[PRINT_STREAM_STATS_JSON_KEY].toBool();
if (_printStreamStats) {
qDebug() << "Stream stats will be printed to stdout";
}
const QString UNATTENUATED_ZONE_KEY = "D-unattenuated-zone";
const QString FILTER_KEY = "J-enable-filter";
_enableFilter = audioGroupObject[FILTER_KEY].toBool();
if (_enableFilter) {
qDebug() << "Filter enabled";
}
const QString UNATTENUATED_ZONE_KEY = "Z-unattenuated-zone";
QString unattenuatedZoneString = audioGroupObject[UNATTENUATED_ZONE_KEY].toString();
if (!unattenuatedZoneString.isEmpty()) {
@ -510,9 +651,8 @@ void AudioMixer::run() {
int nextFrame = 0;
QElapsedTimer timer;
timer.start();
char* clientMixBuffer = new char[NETWORK_BUFFER_LENGTH_BYTES_STEREO + sizeof(quint16)
+ numBytesForPacketHeaderGivenPacketType(PacketTypeMixedAudio)];
char clientMixBuffer[MAX_PACKET_SIZE];
int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
@ -571,15 +711,13 @@ void AudioMixer::run() {
if (!hasRatioChanged) {
++framesSinceCutoffEvent;
}
bool sendAudioStreamStats = false;
quint64 now = usecTimestampNow();
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) {
_lastSendAudioStreamStatsTime = now;
sendAudioStreamStats = true;
}
bool streamStatsPrinted = false;
quint64 now = usecTimestampNow();
if (now - _lastPerSecondCallbackTime > USECS_PER_SECOND) {
perSecondActions();
_lastPerSecondCallbackTime = now;
}
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
@ -592,43 +730,52 @@ void AudioMixer::run() {
if (node->getType() == NodeType::Agent && node->getActiveSocket()
&& nodeData->getAvatarAudioStream()) {
prepareMixForListeningNode(node.data());
int streamsMixed = prepareMixForListeningNode(node.data());
// pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
char* dataAt = clientMixBuffer + numBytesPacketHeader;
char* dataAt;
if (streamsMixed > 0) {
// pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
dataAt = clientMixBuffer + numBytesPacketHeader;
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16);
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16);
// pack mixed audio samples
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
// pack mixed audio samples
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
} else {
// pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeSilentAudioFrame);
dataAt = clientMixBuffer + numBytesPacketHeader;
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16);
// pack number of silent audio samples
quint16 numSilentSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
memcpy(dataAt, &numSilentSamples, sizeof(quint16));
dataAt += sizeof(quint16);
}
// send mixed audio packet
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
nodeData->incrementOutgoingMixedAudioSequenceNumber();
// send an audio stream stats packet if it's time
if (sendAudioStreamStats) {
if (_sendAudioStreamStats) {
nodeData->sendAudioStreamStatsPackets(node);
if (_printStreamStats) {
printf("\nStats for agent %s:\n", node->getUUID().toString().toLatin1().data());
nodeData->printUpstreamDownstreamStats();
streamStatsPrinted = true;
}
_sendAudioStreamStats = false;
}
++_sumListeners;
}
}
}
if (streamStatsPrinted) {
printf("\n----------------------------------------------------------------\n");
}
++_numStatFrames;
@ -644,6 +791,90 @@ void AudioMixer::run() {
usleep(usecToSleep);
}
}
delete[] clientMixBuffer;
}
void AudioMixer::perSecondActions() {
_sendAudioStreamStats = true;
int callsLastSecond = _datagramsReadPerCallStats.getCurrentIntervalSamples();
_readPendingCallsPerSecondStats.update(callsLastSecond);
if (_printStreamStats) {
printf("\n================================================================================\n\n");
printf(" readPendingDatagram() calls per second | avg: %.2f, avg_30s: %.2f, last_second: %d\n",
_readPendingCallsPerSecondStats.getAverage(),
_readPendingCallsPerSecondStats.getWindowAverage(),
callsLastSecond);
printf(" Datagrams read per call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_datagramsReadPerCallStats.getAverage(),
_datagramsReadPerCallStats.getWindowAverage(),
_datagramsReadPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per readPendingDatagram() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerCallStats.getAverage(),
_timeSpentPerCallStats.getWindowAverage(),
_timeSpentPerCallStats.getCurrentIntervalAverage());
printf(" Usecs spent per packetVersionAndHashMatch() call | avg: %.2f, avg_30s: %.2f, last_second: %.2f\n",
_timeSpentPerHashMatchCallStats.getAverage(),
_timeSpentPerHashMatchCallStats.getWindowAverage(),
_timeSpentPerHashMatchCallStats.getCurrentIntervalAverage());
double WINDOW_LENGTH_USECS = READ_DATAGRAMS_STATS_WINDOW_SECONDS * USECS_PER_SECOND;
printf(" %% time spent in readPendingDatagram() calls | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
printf("%% time spent in packetVersionAndHashMatch() calls: | avg_30s: %.6f%%, last_second: %.6f%%\n",
_timeSpentPerHashMatchCallStats.getWindowSum() / WINDOW_LENGTH_USECS * 100.0,
_timeSpentPerHashMatchCallStats.getCurrentIntervalSum() / USECS_PER_SECOND * 100.0);
foreach(const SharedNodePointer& node, NodeList::getInstance()->getNodeHash()) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
printf("\nStats for agent %s --------------------------------\n",
node->getUUID().toString().toLatin1().data());
nodeData->printUpstreamDownstreamStats();
}
}
}
}
_datagramsReadPerCallStats.currentIntervalComplete();
_timeSpentPerCallStats.currentIntervalComplete();
_timeSpentPerHashMatchCallStats.currentIntervalComplete();
}
QString AudioMixer::getReadPendingDatagramsCallsPerSecondsStatsString() const {
QString result = "calls_per_sec_avg_30s: " + QString::number(_readPendingCallsPerSecondStats.getWindowAverage(), 'f', 2)
+ " calls_last_sec: " + QString::number(_readPendingCallsPerSecondStats.getLastCompleteIntervalStats().getSum() + 0.5, 'f', 0);
return result;
}
QString AudioMixer::getReadPendingDatagramsPacketsPerCallStatsString() const {
QString result = "pkts_per_call_avg_30s: " + QString::number(_datagramsReadPerCallStats.getWindowAverage(), 'f', 2)
+ " pkts_per_call_avg_1s: " + QString::number(_datagramsReadPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2);
return result;
}
QString AudioMixer::getReadPendingDatagramsTimeStatsString() const {
QString result = "usecs_per_call_avg_30s: " + QString::number(_timeSpentPerCallStats.getWindowAverage(), 'f', 2)
+ " usecs_per_call_avg_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
+ " prct_time_in_call_30s: " + QString::number(_timeSpentPerCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
+ " prct_time_in_call_1s: " + QString::number(_timeSpentPerCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
return result;
}
QString AudioMixer::getReadPendingDatagramsHashMatchTimeStatsString() const {
QString result = "usecs_per_hashmatch_avg_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowAverage(), 'f', 2)
+ " usecs_per_hashmatch_avg_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getAverage(), 'f', 2)
+ " prct_time_in_hashmatch_30s: " + QString::number(_timeSpentPerHashMatchCallStats.getWindowSum() / (READ_DATAGRAMS_STATS_WINDOW_SECONDS*USECS_PER_SECOND) * 100.0, 'f', 6) + "%"
+ " prct_time_in_hashmatch_1s: " + QString::number(_timeSpentPerHashMatchCallStats.getLastCompleteIntervalStats().getSum() / USECS_PER_SECOND * 100.0, 'f', 6) + "%";
return result;
}

View file

@ -21,7 +21,8 @@ class AvatarAudioStream;
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
const int READ_DATAGRAMS_STATS_WINDOW_SECONDS = 30;
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
class AudioMixer : public ThreadedAssignment {
@ -38,21 +39,26 @@ public slots:
void sendStatsPacket();
static bool getUseDynamicJitterBuffers() { return _useDynamicJitterBuffers; }
static int getStaticDesiredJitterBufferFrames() { return _staticDesiredJitterBufferFrames; }
static int getMaxFramesOverDesired() { return _maxFramesOverDesired; }
static const InboundAudioStream::Settings& getStreamSettings() { return _streamSettings; }
private:
/// adds one stream to the mix for a listening node
void addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
int addStreamToMixForListeningNodeWithStream(PositionalAudioStream* streamToAdd,
AvatarAudioStream* listeningNodeStream);
/// prepares and sends a mix to one Node
void prepareMixForListeningNode(Node* node);
int prepareMixForListeningNode(Node* node);
// client samples capacity is larger than what will be sent to optimize mixing
// we are MMX adding 4 samples at a time so we need client samples to have an extra 4
int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)];
void perSecondActions();
QString getReadPendingDatagramsCallsPerSecondsStatsString() const;
QString getReadPendingDatagramsPacketsPerCallStatsString() const;
QString getReadPendingDatagramsTimeStatsString() const;
QString getReadPendingDatagramsHashMatchTimeStatsString() const;
float _trailingSleepRatio;
float _minAudibilityThreshold;
@ -63,13 +69,21 @@ private:
AABox* _sourceUnattenuatedZone;
AABox* _listenerUnattenuatedZone;
static bool _useDynamicJitterBuffers;
static int _staticDesiredJitterBufferFrames;
static int _maxFramesOverDesired;
static InboundAudioStream::Settings _streamSettings;
static bool _printStreamStats;
static bool _enableFilter;
quint64 _lastPerSecondCallbackTime;
quint64 _lastSendAudioStreamStatsTime;
bool _sendAudioStreamStats;
// stats
MovingMinMaxAvg<int> _datagramsReadPerCallStats; // update with # of datagrams read for each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerCallStats; // update with usecs spent inside each readPendingDatagrams call
MovingMinMaxAvg<quint64> _timeSpentPerHashMatchCallStats; // update with usecs spent inside each packetVersionAndHashMatch call
MovingMinMaxAvg<int> _readPendingCallsPerSecondStats; // update with # of readPendingDatagrams calls in the last second
};
#endif // hifi_AudioMixer_h

View file

@ -74,9 +74,7 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
quint8 channelFlag = *(reinterpret_cast<const quint8*>(channelFlagAt));
bool isStereo = channelFlag == 1;
_audioStreams.insert(nullUUID,
matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getUseDynamicJitterBuffers(),
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
_audioStreams.insert(nullUUID, matchingStream = new AvatarAudioStream(isStereo, AudioMixer::getStreamSettings()));
} else {
matchingStream = _audioStreams.value(nullUUID);
}
@ -88,9 +86,8 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(bytesBeforeStreamIdentifier, NUM_BYTES_RFC4122_UUID));
if (!_audioStreams.contains(streamIdentifier)) {
_audioStreams.insert(streamIdentifier,
matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers(),
AudioMixer::getStaticDesiredJitterBufferFrames(), AudioMixer::getMaxFramesOverDesired()));
// we don't have this injected stream yet, so add it
_audioStreams.insert(streamIdentifier, matchingStream = new InjectedAudioStream(streamIdentifier, AudioMixer::getStreamSettings()));
} else {
matchingStream = _audioStreams.value(streamIdentifier);
}
@ -105,18 +102,15 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, A
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
PositionalAudioStream* stream = i.value();
if (stream->popFrames(1, true) > 0) {
// this is a ring buffer that is ready to go
// calculate the trailing avg loudness for the next frame
// that would be mixed in
stream->updateLastPopOutputTrailingLoudness();
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
stream->setListenerUnattenuatedZone(listenerZone);
} else {
stream->setListenerUnattenuatedZone(NULL);
}
stream->updateLastPopOutputLoudnessAndTrailingLoudness();
}
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
stream->setListenerUnattenuatedZone(listenerZone);
} else {
stream->setListenerUnattenuatedZone(NULL);
}
}
}
@ -185,7 +179,9 @@ void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer&
// pack the calculated number of stream stats
for (int i = 0; i < numStreamStatsToPack; i++) {
AudioStreamStats streamStats = audioStreamsIterator.value()->updateSeqHistoryAndGetAudioStreamStats();
PositionalAudioStream* stream = audioStreamsIterator.value();
stream->perSecondCallbackForUpdatingStats();
AudioStreamStats streamStats = stream->getAudioStreamStats();
memcpy(dataAt, &streamStats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);

View file

@ -13,8 +13,8 @@
#include "AvatarAudioStream.h"
AvatarAudioStream::AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired)
AvatarAudioStream::AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Microphone, isStereo, settings)
{
}
@ -38,26 +38,9 @@ int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray&
// read the positional data
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
if (type == PacketTypeSilentAudioFrame) {
int16_t numSilentSamples;
memcpy(&numSilentSamples, packetAfterSeqNum.data() + readBytes, sizeof(int16_t));
readBytes += sizeof(int16_t);
numAudioSamples = numSilentSamples;
} else {
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
numAudioSamples = numAudioBytes / sizeof(int16_t);
}
return readBytes;
}
int AvatarAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
int readBytes = 0;
if (type == PacketTypeSilentAudioFrame) {
writeDroppableSilentSamples(numAudioSamples);
} else {
// there is audio data to read
readBytes += _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}
// calculate how many samples are in this packet
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
numAudioSamples = numAudioBytes / sizeof(int16_t);
return readBytes;
}

View file

@ -18,7 +18,7 @@
class AvatarAudioStream : public PositionalAudioStream {
public:
AvatarAudioStream(bool isStereo, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
AvatarAudioStream(bool isStereo, const InboundAudioStream::Settings& settings);
private:
// disallow copying of AvatarAudioStream objects
@ -26,7 +26,6 @@ private:
AvatarAudioStream& operator= (const AvatarAudioStream&);
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
};
#endif // hifi_AvatarAudioStream_h

View file

@ -9,8 +9,8 @@
"help": "Dynamically buffer client audio based on perceived jitter in packet receipt timing",
"default": false
},
"B-desired-jitter-buffer-frames": {
"label": "Desired Jitter Buffer Frames",
"B-static-desired-jitter-buffer-frames": {
"label": "Static Desired Jitter Buffer Frames",
"help": "If dynamic jitter buffers is disabled, this determines the target number of frames maintained by the AudioMixer's jitter buffers",
"placeholder": "1",
"default": "1"
@ -21,18 +21,54 @@
"placeholder": "10",
"default": "10"
},
"H-print-stream-stats": {
"D-use-stdev-for-desired-calc": {
"type": "checkbox",
"label": "Use Stdev for Desired Jitter Frames Calc:",
"help": "If checked, Philip's method (stdev of timegaps) is used to calculate desired jitter frames. Otherwise, Fred's method (max timegap) is used",
"default": false
},
"E-window-starve-threshold": {
"label": "Window Starve Threshold",
"help": "If this many starves occur in an N-second window (N is the number in the next field), then the desired jitter frames will be re-evaluated using Window A.",
"placeholder": "3",
"default": "3"
},
"F-window-seconds-for-desired-calc-on-too-many-starves": {
"label": "Timegaps Window (A) Seconds:",
"help": "Window A contains a history of timegaps. Its max timegap is used to re-evaluate the desired jitter frames when too many starves occur within it.",
"placeholder": "50",
"default": "50"
},
"G-window-seconds-for-desired-reduction": {
"label": "Timegaps Window (B) Seconds:",
"help": "Window B contains a history of timegaps. Its max timegap is used as a ceiling for the desired jitter frames value.",
"placeholder": "10",
"default": "10"
},
"H-repetition-with-fade": {
"type": "checkbox",
"label": "Repetition with Fade:",
"help": "If enabled, dropped frames and mixing during starves will repeat the last frame, eventually fading to silence",
"default": false
},
"I-print-stream-stats": {
"type": "checkbox",
"label": "Print Stream Stats:",
"help": "If enabled, audio upstream and downstream stats of each agent will be printed each second to stdout",
"default": false
},
"D-unattenuated-zone": {
"Z-unattenuated-zone": {
"label": "Unattenuated Zone",
"help": "Boxes for source and listener (corner x, corner y, corner z, size x, size y, size z, corner x, corner y, corner z, size x, size y, size z)",
"placeholder": "no zone",
"default": ""
},
"J-enable-filter": {
"type": "checkbox",
"label": "Enable Positional Filter",
"help": "If enabled, positional audio stream uses lowpass filter",
"default": false
}
}
}
}
}

View file

@ -0,0 +1,47 @@
//
// PlayRecordingOnAC.js
// examples
//
// Created by Clément Brisset on 8/24/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var filename = "http://your.recording.url";
var playFromCurrentLocation = true;
Avatar.faceModelURL = "http://public.highfidelity.io/models/heads/EvilPhilip_v7.fst";
Avatar.skeletonModelURL = "http://public.highfidelity.io/models/skeletons/Philip_Carl_Body_A-Pose.fst";
// Set position here if playFromCurrentLocation is true
Avatar.position = { x:1, y: 1, z: 1 };
Agent.isAvatar = true;
Avatar.loadRecording(filename);
count = 300; // This is necessary to wait for the audio mixer to connect
function update(event) {
if (count > 0) {
count--;
return;
}
if (count == 0) {
Avatar.startPlaying(playFromCurrentLocation);
Avatar.play();
Vec3.print("Playing from ", Avatar.position);
count--;
}
if (Avatar.isPlaying()) {
Avatar.play();
} else {
Script.update.disconnect(update);
}
}
Script.update.connect(update);

205
examples/Recorder.js Normal file
View file

@ -0,0 +1,205 @@
//
// Recorder.js
// examples
//
// Created by Clément Brisset on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
Script.include("toolBars.js");
var recordingFile = "recording.rec";
var windowDimensions = Controller.getViewportDimensions();
var TOOL_ICON_URL = "http://s3-us-west-1.amazonaws.com/highfidelity-public/images/tools/";
var ALPHA_ON = 1.0;
var ALPHA_OFF = 0.7;
var COLOR_ON = { red: 128, green: 0, blue: 0 };
var COLOR_OFF = { red: 128, green: 128, blue: 128 };
Tool.IMAGE_WIDTH *= 0.7;
Tool.IMAGE_HEIGHT *= 0.7;
var toolBar = null;
var recordIcon;
var playIcon;
var saveIcon;
var loadIcon;
setupToolBar();
var timer = null;
setupTimer();
function setupToolBar() {
if (toolBar != null) {
print("Multiple calls to Recorder.js:setupToolBar()");
return;
}
toolBar = new ToolBar(0, 0, ToolBar.HORIZONTAL);
toolBar.setBack(COLOR_OFF, ALPHA_OFF);
recordIcon = toolBar.addTool({
imageURL: TOOL_ICON_URL + "record.svg",
width: Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: ALPHA_ON,
visible: true
}, false);
playIcon = toolBar.addTool({
imageURL: TOOL_ICON_URL + "play.svg",
width: Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: ALPHA_ON,
visible: true
}, false, false);
saveIcon = toolBar.addTool({
imageURL: TOOL_ICON_URL + "save.svg",
width: Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: ALPHA_ON,
visible: true
}, false, false);
loadIcon = toolBar.addTool({
imageURL: TOOL_ICON_URL + "load.svg",
width: Tool.IMAGE_WIDTH,
height: Tool.IMAGE_HEIGHT,
alpha: ALPHA_ON,
visible: true
}, false, false);
}
function setupTimer() {
timer = Overlays.addOverlay("text", {
font: { size: 20 },
text: (0.00).toFixed(3),
backgroundColor: COLOR_OFF,
x: 0, y: 0,
width: 100,
height: 100,
alpha: 1.0,
visible: true
});
}
function updateTimer() {
var text = "";
if (MyAvatar.isRecording()) {
text = formatTime(MyAvatar.recorderElapsed())
} else {
text = formatTime(MyAvatar.playerElapsed()) + " / " +
formatTime(MyAvatar.playerLength());
}
Overlays.editOverlay(timer, {
text: text
})
}
function formatTime(time) {
var MIN_PER_HOUR = 60;
var SEC_PER_MIN = 60;
var MSEC_PER_SEC = 1000;
var hours = Math.floor(time / (MSEC_PER_SEC * SEC_PER_MIN * MIN_PER_HOUR));
time -= hours * (MSEC_PER_SEC * SEC_PER_MIN * MIN_PER_HOUR);
var minutes = Math.floor(time / (MSEC_PER_SEC * SEC_PER_MIN));
time -= minutes * (MSEC_PER_SEC * SEC_PER_MIN);
var seconds = Math.floor(time / MSEC_PER_SEC);
seconds = time / MSEC_PER_SEC;
var text = "";
text += (hours > 0) ? hours + ":" :
"";
text += (minutes > 0) ? ((minutes < 10 && text != "") ? "0" : "") + minutes + ":" :
"";
text += ((seconds < 10 && text != "") ? "0" : "") + seconds.toFixed(3);
return text;
}
function moveUI() {
var relative = { x: 30, y: 90 };
toolBar.move(relative.x,
windowDimensions.y - relative.y);
Overlays.editOverlay(timer, {
x: relative.x - 10,
y: windowDimensions.y - relative.y - 35,
width: 0,
height: 0
});
}
function mousePressEvent(event) {
clickedOverlay = Overlays.getOverlayAtPoint({ x: event.x, y: event.y });
print("Status: isPlaying=" + MyAvatar.isPlaying() + ", isRecording=" + MyAvatar.isRecording());
if (recordIcon === toolBar.clicked(clickedOverlay) && !MyAvatar.isPlaying()) {
if (!MyAvatar.isRecording()) {
MyAvatar.startRecording();
toolBar.setBack(COLOR_ON, ALPHA_ON);
} else {
MyAvatar.stopRecording();
MyAvatar.loadLastRecording();
toolBar.setBack(COLOR_OFF, ALPHA_OFF);
}
} else if (playIcon === toolBar.clicked(clickedOverlay) && !MyAvatar.isRecording()) {
if (MyAvatar.isPlaying()) {
MyAvatar.stopPlaying();
} else {
MyAvatar.setPlayFromCurrentLocation(true);
MyAvatar.setPlayerLoop(true);
MyAvatar.startPlaying(true);
}
} else if (saveIcon === toolBar.clicked(clickedOverlay)) {
if (!MyAvatar.isRecording()) {
recordingFile = Window.save("Save recording to file", ".", "*.rec");
MyAvatar.saveRecording(recordingFile);
}
} else if (loadIcon === toolBar.clicked(clickedOverlay)) {
if (!MyAvatar.isRecording()) {
recordingFile = Window.browse("Load recorcding from file", ".", "*.rec");
MyAvatar.loadRecording(recordingFile);
}
} else {
}
}
function update() {
var newDimensions = Controller.getViewportDimensions();
if (windowDimensions.x != newDimensions.x ||
windowDimensions.y != newDimensions.y) {
windowDimensions = newDimensions;
moveUI();
}
updateTimer();
}
function scriptEnding() {
if (MyAvatar.isRecording()) {
MyAvatar.stopRecording();
}
if (MyAvatar.isPlaying()) {
MyAvatar.stopPlaying();
}
toolBar.cleanup();
Overlays.deleteOverlay(timer);
}
Controller.mousePressEvent.connect(mousePressEvent);
Script.update.connect(update);
Script.scriptEnding.connect(scriptEnding);
// Should be called last to put everything into position
moveUI();

View file

@ -59,6 +59,13 @@ UnitTest.prototype.assertEquals = function(expected, actual, message) {
}
};
UnitTest.prototype.assertContains = function (expected, actual, message) {
this.numAssertions++;
if (actual.indexOf(expected) == -1) {
throw new AssertionException(expected, actual, message);
}
};
UnitTest.prototype.assertHasProperty = function(property, actual, message) {
this.numAssertions++;
if (actual[property] === undefined) {

File diff suppressed because it is too large Load diff

View file

@ -51,9 +51,6 @@ var lastVoxelScale = 0;
var dragStart = { x: 0, y: 0 };
var wheelPixelsMoved = 0;
var mouseX = 0;
var mouseY = 0;
// Create a table of the different colors you can choose
var colors = new Array();
colors[0] = { red: 120, green: 181, blue: 126 };
@ -1041,8 +1038,6 @@ function mousePressEvent(event) {
// TODO: does any of this stuff need to execute if we're panning or orbiting?
trackMouseEvent(event); // used by preview support
mouseX = event.x;
mouseY = event.y;
var pickRay = Camera.computePickRay(event.x, event.y);
var intersection = Voxels.findRayIntersection(pickRay);
audioOptions.position = Vec3.sum(pickRay.origin, pickRay.direction);
@ -1296,40 +1291,30 @@ function mouseMoveEvent(event) {
}
if (isAdding) {
// Watch the drag direction to tell which way to 'extrude' this voxel
var pickRay = Camera.computePickRay(event.x, event.y);
var distance = Vec3.length(Vec3.subtract(pickRay.origin, lastVoxelPosition));
var mouseSpot = Vec3.sum(Vec3.multiply(pickRay.direction, distance), pickRay.origin);
var delta = Vec3.subtract(mouseSpot, lastVoxelPosition);
if (!isExtruding) {
var pickRay = Camera.computePickRay(event.x, event.y);
var lastVoxelDistance = { x: pickRay.origin.x - lastVoxelPosition.x,
y: pickRay.origin.y - lastVoxelPosition.y,
z: pickRay.origin.z - lastVoxelPosition.z };
var distance = Vec3.length(lastVoxelDistance);
var mouseSpot = { x: pickRay.direction.x * distance, y: pickRay.direction.y * distance, z: pickRay.direction.z * distance };
mouseSpot.x += pickRay.origin.x;
mouseSpot.y += pickRay.origin.y;
mouseSpot.z += pickRay.origin.z;
var dx = mouseSpot.x - lastVoxelPosition.x;
var dy = mouseSpot.y - lastVoxelPosition.y;
var dz = mouseSpot.z - lastVoxelPosition.z;
// Use the drag direction to tell which way to 'extrude' this voxel
extrudeScale = lastVoxelScale;
extrudeDirection = { x: 0, y: 0, z: 0 };
isExtruding = true;
if (dx > lastVoxelScale) extrudeDirection.x = extrudeScale;
else if (dx < -lastVoxelScale) extrudeDirection.x = -extrudeScale;
else if (dy > lastVoxelScale) extrudeDirection.y = extrudeScale;
else if (dy < -lastVoxelScale) extrudeDirection.y = -extrudeScale;
else if (dz > lastVoxelScale) extrudeDirection.z = extrudeScale;
else if (dz < -lastVoxelScale) extrudeDirection.z = -extrudeScale;
if (delta.x > lastVoxelScale) extrudeDirection.x = 1;
else if (delta.x < -lastVoxelScale) extrudeDirection.x = -1;
else if (delta.y > lastVoxelScale) extrudeDirection.y = 1;
else if (delta.y < -lastVoxelScale) extrudeDirection.y = -1;
else if (delta.z > lastVoxelScale) extrudeDirection.z = 1;
else if (delta.z < -lastVoxelScale) extrudeDirection.z = -1;
else isExtruding = false;
} else {
// We have got an extrusion direction, now look for mouse move beyond threshold to add new voxel
var dx = event.x - mouseX;
var dy = event.y - mouseY;
if (Math.sqrt(dx*dx + dy*dy) > PIXELS_PER_EXTRUDE_VOXEL) {
lastVoxelPosition = Vec3.sum(lastVoxelPosition, extrudeDirection);
Voxels.setVoxel(lastVoxelPosition.x, lastVoxelPosition.y, lastVoxelPosition.z,
extrudeScale, lastVoxelColor.red, lastVoxelColor.green, lastVoxelColor.blue);
mouseX = event.x;
mouseY = event.y;
// Extrude if mouse has moved by a voxel in the extrude direction
var distanceInDirection = Vec3.dot(delta, extrudeDirection);
if (distanceInDirection > extrudeScale) {
lastVoxelPosition = Vec3.sum(lastVoxelPosition, Vec3.multiply(extrudeDirection, extrudeScale));
Voxels.setVoxel(lastVoxelPosition.x, lastVoxelPosition.y, lastVoxelPosition.z, extrudeScale,
lastVoxelColor.red, lastVoxelColor.green, lastVoxelColor.blue);
}
}
}

443
examples/frisbee.js Normal file
View file

@ -0,0 +1,443 @@
//
// frisbee.js
// examples
//
// Created by Thijs Wenker on 7/5/14.
// Copyright 2014 High Fidelity, Inc.
//
// Requirements: Razer Hydra's
//
// Fun game to throw frisbee's to eachother. Hold the trigger on any of the hydra's to create or catch a frisbee.
//
// Tip: use this together with the squeezeHands.js script to make it look nicer.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
Script.include("toolBars.js");
const LEFT_PALM = 0;
const LEFT_TIP = 1;
const LEFT_BUTTON_FWD = 5;
const LEFT_BUTTON_3 = 3;
const RIGHT_PALM = 2;
const RIGHT_TIP = 3;
const RIGHT_BUTTON_FWD = 11;
const RIGHT_BUTTON_3 = 9;
const FRISBEE_RADIUS = 0.08;
const GRAVITY_STRENGTH = 0.5;
const CATCH_RADIUS = 0.5;
const MIN_SIMULATION_SPEED = 0.15;
const THROWN_VELOCITY_SCALING = 1.5;
const SOUNDS_ENABLED = true;
const FRISBEE_BUTTON_URL = "http://test.thoys.nl/hifi/images/frisbee/frisbee_button_by_Judas.svg";
const FRISBEE_MODEL_SCALE = 275;
const FRISBEE_MENU = "Toys>Frisbee";
const FRISBEE_DESIGN_MENU = "Toys>Frisbee>Design";
const FRISBEE_ENABLED_SETTING = "Frisbee>Enabled";
const FRISBEE_CREATENEW_SETTING = "Frisbee>CreateNew";
const FRISBEE_DESIGN_SETTING = "Frisbee>Design";
const FRISBEE_FORCE_MOUSE_CONTROLS_SETTING = "Frisbee>ForceMouseControls";
//Add your own designs in FRISBEE_DESIGNS, be sure to put frisbee in the URL if you want others to be able to catch it without having a copy of your frisbee script.
const FRISBEE_DESIGNS = [
{"name":"Interface", "model":"http://test.thoys.nl/hifi/models/frisbee/frisbee.fbx"},
{"name":"Pizza", "model":"http://test.thoys.nl/hifi/models/frisbee/pizza.fbx"},
{"name":"Swirl", "model":"http://test.thoys.nl/hifi/models/frisbee/swirl.fbx"},
{"name":"Mayan", "model":"http://test.thoys.nl/hifi/models/frisbee/mayan.fbx"},
];
const FRISBEE_MENU_DESIGN_POSTFIX = " Design";
const FRISBEE_DESIGN_RANDOM = "Random";
const SPIN_MULTIPLIER = 1000;
const FRISBEE_LIFETIME = 300; // 5 minutes
var windowDimensions = Controller.getViewportDimensions();
var toolHeight = 50;
var toolWidth = 50;
var frisbeeToggle;
var toolBar;
var frisbeeEnabled = true;
var newfrisbeeEnabled = false;
var forceMouseControls = false;
var hydrasConnected = false;
var selectedDesign = FRISBEE_DESIGN_RANDOM;
function loadSettings() {
frisbeeEnabled = Settings.getValue(FRISBEE_ENABLED_SETTING, "true") == "true";
newfrisbeeEnabled = Settings.getValue(FRISBEE_CREATENEW_SETTING, "false") == "true";
forceMouseControls = Settings.getValue(FRISBEE_FORCE_MOUSE_CONTROLS_SETTING, "false") == "true";
selectedDesign = Settings.getValue(FRISBEE_DESIGN_SETTING, "Random");
}
function saveSettings() {
Settings.setValue(FRISBEE_ENABLED_SETTING, frisbeeEnabled ? "true" : "false");
Settings.setValue(FRISBEE_CREATENEW_SETTING, newfrisbeeEnabled ? "true" : "false");
Settings.setValue(FRISBEE_FORCE_MOUSE_CONTROLS_SETTING, forceMouseControls ? "true" : "false");
Settings.setValue(FRISBEE_DESIGN_SETTING, selectedDesign);
}
function moveOverlays() {
var newViewPort = Controller.getViewportDimensions();
if (typeof(toolBar) === 'undefined') {
initToolBar();
} else if (windowDimensions.x == newViewPort.x &&
windowDimensions.y == newViewPort.y) {
return;
}
windowDimensions = newViewPort;
var toolsX = windowDimensions.x - 8 - toolBar.width;
var toolsY = (windowDimensions.y - toolBar.height) / 2 + 80;
toolBar.move(toolsX, toolsY);
}
function frisbeeURL() {
return selectedDesign == FRISBEE_DESIGN_RANDOM ? FRISBEE_DESIGNS[Math.floor(Math.random() * FRISBEE_DESIGNS.length)].model : getFrisbee(selectedDesign).model;
}
//This function checks if the modelURL is inside of our Designs or contains "frisbee" in it.
function validFrisbeeURL(frisbeeURL) {
for (var frisbee in FRISBEE_DESIGNS) {
if (FRISBEE_DESIGNS[frisbee].model == frisbeeURL) {
return true;
}
}
return frisbeeURL.toLowerCase().indexOf("frisbee") !== -1;
}
function getFrisbee(frisbeeName) {
for (var frisbee in FRISBEE_DESIGNS) {
if (FRISBEE_DESIGNS[frisbee].name == frisbeeName) {
return FRISBEE_DESIGNS[frisbee];
}
}
return undefined;
}
function Hand(name, palm, tip, forwardButton, button3, trigger) {
this.name = name;
this.palm = palm;
this.tip = tip;
this.forwardButton = forwardButton;
this.button3 = button3;
this.trigger = trigger;
this.holdingFrisbee = false;
this.particle = false;
this.palmPosition = function() { return Controller.getSpatialControlPosition(this.palm); }
this.grabButtonPressed = function() {
return (
Controller.isButtonPressed(this.forwardButton) ||
Controller.isButtonPressed(this.button3) ||
Controller.getTriggerValue(this.trigger) > 0.5
)
};
this.holdPosition = function() { return this.palm == LEFT_PALM ? MyAvatar.getLeftPalmPosition() : MyAvatar.getRightPalmPosition(); };
this.holdRotation = function() {
var q = Controller.getSpatialControlRawRotation(this.palm);
q = Quat.multiply(MyAvatar.orientation, q);
return {x: q.x, y: q.y, z: q.z, w: q.w};
};
this.tipVelocity = function() { return Controller.getSpatialControlVelocity(this.tip); };
}
function MouseControl(button) {
this.button = button;
}
var leftHand = new Hand("LEFT", LEFT_PALM, LEFT_TIP, LEFT_BUTTON_FWD, LEFT_BUTTON_3, 0);
var rightHand = new Hand("RIGHT", RIGHT_PALM, RIGHT_TIP, RIGHT_BUTTON_FWD, RIGHT_BUTTON_3, 1);
var leftMouseControl = new MouseControl("LEFT");
var middleMouseControl = new MouseControl("MIDDLE");
var rightMouseControl = new MouseControl("RIGHT");
var mouseControls = [leftMouseControl, middleMouseControl, rightMouseControl];
var currentMouseControl = false;
var newSound = new Sound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/throw.raw");
var catchSound = new Sound("https://dl.dropboxusercontent.com/u/1864924/hifi-sounds/catch.raw");
var throwSound = new Sound("http://highfidelity-public.s3-us-west-1.amazonaws.com/sounds/Switches%20and%20sliders/slider%20-%20whoosh1.raw");
var simulatedFrisbees = [];
var wantDebugging = false;
function debugPrint(message) {
if (wantDebugging) {
print(message);
}
}
function playSound(sound, position) {
if (!SOUNDS_ENABLED) {
return;
}
var options = new AudioInjectionOptions();
options.position = position;
options.volume = 1.0;
Audio.playSound(sound, options);
}
function cleanupFrisbees() {
simulatedFrisbees = [];
var particles = Particles.findParticles(MyAvatar.position, 1000);
for (particle in particles) {
Particles.deleteParticle(particles[particle]);
}
}
function checkControllerSide(hand) {
// If I don't currently have a frisbee in my hand, then try to catch closest one
if (!hand.holdingFrisbee && hand.grabButtonPressed()) {
var closestParticle = Particles.findClosestParticle(hand.palmPosition(), CATCH_RADIUS);
var modelUrl = Particles.getParticleProperties(closestParticle).modelURL;
if (closestParticle.isKnownID && validFrisbeeURL(Particles.getParticleProperties(closestParticle).modelURL)) {
Particles.editParticle(closestParticle, {modelScale: 1, inHand: true, position: hand.holdPosition(), shouldDie: true});
Particles.deleteParticle(closestParticle);
debugPrint(hand.message + " HAND- CAUGHT SOMETHING!!");
var properties = {
position: hand.holdPosition(),
velocity: { x: 0, y: 0, z: 0},
gravity: { x: 0, y: 0, z: 0},
inHand: true,
radius: FRISBEE_RADIUS,
damping: 0.999,
modelURL: modelUrl,
modelScale: FRISBEE_MODEL_SCALE,
modelRotation: hand.holdRotation(),
lifetime: FRISBEE_LIFETIME
};
newParticle = Particles.addParticle(properties);
hand.holdingFrisbee = true;
hand.particle = newParticle;
playSound(catchSound, hand.holdPosition());
return; // exit early
}
}
// If '3' is pressed, and not holding a frisbee, make a new one
if (hand.grabButtonPressed() && !hand.holdingFrisbee && newfrisbeeEnabled) {
var properties = {
position: hand.holdPosition(),
velocity: { x: 0, y: 0, z: 0},
gravity: { x: 0, y: 0, z: 0},
inHand: true,
radius: FRISBEE_RADIUS,
damping: 0.999,
modelURL: frisbeeURL(),
modelScale: FRISBEE_MODEL_SCALE,
modelRotation: hand.holdRotation(),
lifetime: FRISBEE_LIFETIME
};
newParticle = Particles.addParticle(properties);
hand.holdingFrisbee = true;
hand.particle = newParticle;
// Play a new frisbee sound
playSound(newSound, hand.holdPosition());
return; // exit early
}
if (hand.holdingFrisbee) {
// If holding the frisbee keep it in the palm
if (hand.grabButtonPressed()) {
debugPrint(">>>>> " + hand.name + "-FRISBEE IN HAND, grabbing, hold and move");
var properties = {
position: hand.holdPosition(),
modelRotation: hand.holdRotation()
};
Particles.editParticle(hand.particle, properties);
} else {
debugPrint(">>>>> " + hand.name + "-FRISBEE IN HAND, not grabbing, THROW!!!");
// If frisbee just released, add velocity to it!
var properties = {
velocity: Vec3.multiply(hand.tipVelocity(), THROWN_VELOCITY_SCALING),
inHand: false,
lifetime: FRISBEE_LIFETIME,
gravity: { x: 0, y: -GRAVITY_STRENGTH, z: 0},
modelRotation: hand.holdRotation()
};
Particles.editParticle(hand.particle, properties);
simulatedFrisbees.push(hand.particle);
hand.holdingFrisbee = false;
hand.particle = false;
playSound(throwSound, hand.holdPosition());
}
}
}
function initToolBar() {
toolBar = new ToolBar(0, 0, ToolBar.VERTICAL);
frisbeeToggle = toolBar.addTool({
imageURL: FRISBEE_BUTTON_URL,
subImage: { x: 0, y: Tool.IMAGE_WIDTH, width: Tool.IMAGE_WIDTH, height: Tool.IMAGE_HEIGHT },
width: toolWidth,
height: toolHeight,
visible: true,
alpha: 0.9
}, true);
enableNewFrisbee(newfrisbeeEnabled);
}
function hydraCheck() {
var numberOfButtons = Controller.getNumberOfButtons();
var numberOfTriggers = Controller.getNumberOfTriggers();
var numberOfSpatialControls = Controller.getNumberOfSpatialControls();
var controllersPerTrigger = numberOfSpatialControls / numberOfTriggers;
hydrasConnected = (numberOfButtons == 12 && numberOfTriggers == 2 && controllersPerTrigger == 2);
return hydrasConnected;
}
function checkController(deltaTime) {
moveOverlays();
if (!frisbeeEnabled) {
return;
}
// this is expected for hydras
if (hydraCheck()) {
checkControllerSide(leftHand);
checkControllerSide(rightHand);
}
if (!hydrasConnected || forceMouseControls) {
//TODO: add mouse cursor control code here.
}
}
function controlFrisbees(deltaTime) {
var killSimulations = [];
for (frisbee in simulatedFrisbees) {
var properties = Particles.getParticleProperties(simulatedFrisbees[frisbee]);
//get the horizon length from the velocity origin in order to get speed
var speed = Vec3.length({x:properties.velocity.x, y:0, z:properties.velocity.z});
if (speed < MIN_SIMULATION_SPEED) {
//kill the frisbee simulation when speed is low
killSimulations.push(frisbee);
continue;
}
Particles.editParticle(simulatedFrisbees[frisbee], {modelRotation: Quat.multiply(properties.modelRotation, Quat.fromPitchYawRollDegrees(0, speed * deltaTime * SPIN_MULTIPLIER, 0))});
}
for (var i = killSimulations.length - 1; i >= 0; i--) {
simulatedFrisbees.splice(killSimulations[i], 1);
}
}
//catches interfering calls of hydra-cursors
function withinBounds(coords) {
return coords.x >= 0 && coords.x < windowDimensions.x && coords.y >= 0 && coords.y < windowDimensions.y;
}
function mouseMoveEvent(event) {
//TODO: mouse controls //print(withinBounds(event)); //print("move"+event.x);
}
function mousePressEvent(event) {
print(event.x);
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
if (frisbeeToggle == toolBar.clicked(clickedOverlay)) {
newfrisbeeEnabled = !newfrisbeeEnabled;
saveSettings();
enableNewFrisbee(newfrisbeeEnabled);
}
}
function enableNewFrisbee(enable) {
if (toolBar.numberOfTools() > 0) {
toolBar.tools[0].select(enable);
}
}
function mouseReleaseEvent(event) {
//TODO: mouse controls //print(JSON.stringify(event));
}
function setupMenus() {
Menu.addMenu(FRISBEE_MENU);
Menu.addMenuItem({
menuName: FRISBEE_MENU,
menuItemName: "Frisbee Enabled",
isCheckable: true,
isChecked: frisbeeEnabled
});
Menu.addMenuItem({
menuName: FRISBEE_MENU,
menuItemName: "Cleanup Frisbees"
});
Menu.addMenuItem({
menuName: FRISBEE_MENU,
menuItemName: "Force Mouse Controls",
isCheckable: true,
isChecked: forceMouseControls
});
Menu.addMenu(FRISBEE_DESIGN_MENU);
Menu.addMenuItem({
menuName: FRISBEE_DESIGN_MENU,
menuItemName: FRISBEE_DESIGN_RANDOM + FRISBEE_MENU_DESIGN_POSTFIX,
isCheckable: true,
isChecked: selectedDesign == FRISBEE_DESIGN_RANDOM
});
for (frisbee in FRISBEE_DESIGNS) {
Menu.addMenuItem({
menuName: FRISBEE_DESIGN_MENU,
menuItemName: FRISBEE_DESIGNS[frisbee].name + FRISBEE_MENU_DESIGN_POSTFIX,
isCheckable: true,
isChecked: selectedDesign == FRISBEE_DESIGNS[frisbee].name
});
}
}
//startup calls:
loadSettings();
setupMenus();
function scriptEnding() {
toolBar.cleanup();
Menu.removeMenu(FRISBEE_MENU);
}
function menuItemEvent(menuItem) {
if (menuItem == "Cleanup Frisbees") {
cleanupFrisbees();
return;
} else if (menuItem == "Frisbee Enabled") {
frisbeeEnabled = Menu.isOptionChecked(menuItem);
saveSettings();
return;
} else if (menuItem == "Force Mouse Controls") {
forceMouseControls = Menu.isOptionChecked(menuItem);
saveSettings();
return;
}
if (menuItem.indexOf(FRISBEE_MENU_DESIGN_POSTFIX, menuItem.length - FRISBEE_MENU_DESIGN_POSTFIX.length) !== -1) {
var item_name = menuItem.substring(0, menuItem.length - FRISBEE_MENU_DESIGN_POSTFIX.length);
if (item_name == FRISBEE_DESIGN_RANDOM || getFrisbee(item_name) != undefined) {
Menu.setIsOptionChecked(selectedDesign + FRISBEE_MENU_DESIGN_POSTFIX, false);
selectedDesign = item_name;
saveSettings();
Menu.setIsOptionChecked(selectedDesign + FRISBEE_MENU_DESIGN_POSTFIX, true);
}
}
}
// register the call back so it fires before each data send
Controller.mouseMoveEvent.connect(mouseMoveEvent);
Controller.mousePressEvent.connect(mousePressEvent);
Controller.mouseReleaseEvent.connect(mouseReleaseEvent);
Menu.menuItemEvent.connect(menuItemEvent);
Script.scriptEnding.connect(scriptEnding);
Script.update.connect(checkController);
Script.update.connect(controlFrisbees);

View file

@ -21,10 +21,10 @@ var position = { x: MyAvatar.position.x, y: MyAvatar.position.y, z: MyAvatar.pos
var joysticksCaptured = false;
var THRUST_CONTROLLER = 0;
var VIEW_CONTROLLER = 1;
var INITIAL_THRUST_MULTPLIER = 1.0;
var INITIAL_THRUST_MULTIPLIER = 1.0;
var THRUST_INCREASE_RATE = 1.05;
var MAX_THRUST_MULTIPLIER = 75.0;
var thrustMultiplier = INITIAL_THRUST_MULTPLIER;
var thrustMultiplier = INITIAL_THRUST_MULTIPLIER;
var grabDelta = { x: 0, y: 0, z: 0};
var grabStartPosition = { x: 0, y: 0, z: 0};
var grabDeltaVelocity = { x: 0, y: 0, z: 0};
@ -34,6 +34,8 @@ var grabbingWithRightHand = false;
var wasGrabbingWithRightHand = false;
var grabbingWithLeftHand = false;
var wasGrabbingWithLeftHand = false;
var movingWithHead = false;
var headStartPosition, headStartDeltaPitch, headStartFinalPitch, headStartRoll, headStartYaw;
var EPSILON = 0.000001;
var velocity = { x: 0, y: 0, z: 0};
var THRUST_MAG_UP = 100.0;
@ -241,6 +243,47 @@ function handleGrabBehavior(deltaTime) {
wasGrabbingWithLeftHand = grabbingWithLeftHand;
}
var HEAD_MOVE_DEAD_ZONE = 0.0;
var HEAD_STRAFE_DEAD_ZONE = 0.0;
var HEAD_ROTATE_DEAD_ZONE = 0.0;
var HEAD_THRUST_FWD_SCALE = 12000.0;
var HEAD_THRUST_STRAFE_SCALE = 1000.0;
var HEAD_YAW_RATE = 2.0;
var HEAD_PITCH_RATE = 1.0;
var HEAD_ROLL_THRUST_SCALE = 75.0;
var HEAD_PITCH_LIFT_THRUST = 3.0;
function moveWithHead(deltaTime) {
if (movingWithHead) {
var deltaYaw = MyAvatar.getHeadFinalYaw() - headStartYaw;
var deltaPitch = MyAvatar.getHeadDeltaPitch() - headStartDeltaPitch;
var bodyLocalCurrentHeadVector = Vec3.subtract(MyAvatar.getHeadPosition(), MyAvatar.position);
bodyLocalCurrentHeadVector = Vec3.multiplyQbyV(Quat.angleAxis(-deltaYaw, {x:0, y: 1, z:0}), bodyLocalCurrentHeadVector);
var headDelta = Vec3.subtract(bodyLocalCurrentHeadVector, headStartPosition);
headDelta = Vec3.multiplyQbyV(Quat.inverse(Camera.getOrientation()), headDelta);
headDelta.y = 0.0; // Don't respond to any of the vertical component of head motion
// Thrust based on leaning forward and side-to-side
if (Math.abs(headDelta.z) > HEAD_MOVE_DEAD_ZONE) {
MyAvatar.addThrust(Vec3.multiply(Quat.getFront(Camera.getOrientation()), -headDelta.z * HEAD_THRUST_FWD_SCALE * deltaTime));
}
if (Math.abs(headDelta.x) > HEAD_STRAFE_DEAD_ZONE) {
MyAvatar.addThrust(Vec3.multiply(Quat.getRight(Camera.getOrientation()), headDelta.x * HEAD_THRUST_STRAFE_SCALE * deltaTime));
}
if (Math.abs(deltaYaw) > HEAD_ROTATE_DEAD_ZONE) {
var orientation = Quat.multiply(Quat.angleAxis(deltaYaw * HEAD_YAW_RATE * deltaTime, {x:0, y: 1, z:0}), MyAvatar.orientation);
MyAvatar.orientation = orientation;
}
// Thrust Up/Down based on head pitch
MyAvatar.addThrust(Vec3.multiply({ x:0, y:1, z:0 }, (MyAvatar.getHeadFinalPitch() - headStartFinalPitch) * HEAD_PITCH_LIFT_THRUST * deltaTime));
// For head trackers, adjust pitch by head pitch
MyAvatar.headPitch += deltaPitch * HEAD_PITCH_RATE * deltaTime;
// Thrust strafe based on roll ange
MyAvatar.addThrust(Vec3.multiply(Quat.getRight(Camera.getOrientation()), -(MyAvatar.getHeadFinalRoll() - headStartRoll) * HEAD_ROLL_THRUST_SCALE * deltaTime));
}
}
// Update for joysticks and move button
function flyWithHydra(deltaTime) {
var thrustJoystickPosition = Controller.getJoystickPosition(THRUST_CONTROLLER);
@ -262,7 +305,7 @@ function flyWithHydra(deltaTime) {
thrustJoystickPosition.x * thrustMultiplier * deltaTime);
MyAvatar.addThrust(thrustRight);
} else {
thrustMultiplier = INITIAL_THRUST_MULTPLIER;
thrustMultiplier = INITIAL_THRUST_MULTIPLIER;
}
// View Controller
@ -280,6 +323,7 @@ function flyWithHydra(deltaTime) {
MyAvatar.headPitch = newPitch;
}
handleGrabBehavior(deltaTime);
moveWithHead(deltaTime);
displayDebug();
}
@ -296,3 +340,19 @@ function scriptEnding() {
}
Script.scriptEnding.connect(scriptEnding);
Controller.keyPressEvent.connect(function(event) {
if (event.text == "SPACE" && !movingWithHead) {
movingWithHead = true;
headStartPosition = Vec3.subtract(MyAvatar.getHeadPosition(), MyAvatar.position);
headStartDeltaPitch = MyAvatar.getHeadDeltaPitch();
headStartFinalPitch = MyAvatar.getHeadFinalPitch();
headStartRoll = MyAvatar.getHeadFinalRoll();
headStartYaw = MyAvatar.getHeadFinalYaw();
}
});
Controller.keyReleaseEvent.connect(function(event) {
if (event.text == "SPACE") {
movingWithHead = false;
}
});

View file

@ -0,0 +1,42 @@
//
// playSoundPath.js
// examples
//
// Created by Craig Hansen-Sturm on 05/27/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var soundClip = new Sound("https://s3-us-west-1.amazonaws.com/highfidelity-public/sounds/Voxels/voxel create 3.raw");
var currentTime = 1.570079; // pi/2
var deltaTime = 0.05;
var distance = 1;
var debug = 0;
function playSound() {
var options = new AudioInjectionOptions();
currentTime += deltaTime;
var s = distance * Math.sin(currentTime);
var c = distance * Math.cos(currentTime);
var soundOffset = { x:s, y:0, z:c };
if (debug) {
print("t=" + currentTime + "offset=" + soundOffset.x + "," + soundOffset.y + "," + soundOffset.z);
}
var avatarPosition = MyAvatar.position;
var soundPosition = Vec3.sum(avatarPosition,soundOffset);
options.position = soundPosition
options.volume = 1.0;
Audio.playSound(soundClip, options);
}
Script.setInterval(playSound, 250);

View file

@ -269,8 +269,7 @@ function update(deltaTime){
}
var locationChanged = false;
if (location.hostname != oldHost) {
print("Changed domain");
if (location.hostname != oldHost || !location.isConnected) {
for (model in models) {
removeIndicators(models[model]);
}

201
examples/speechControl.js Normal file
View file

@ -0,0 +1,201 @@
//
// speechControl.js
// examples
//
// Created by Ryan Huffman on 07/31/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
var ACCELERATION = 80;
var STEP_DURATION = 1.0; // Duration of a step command in seconds
var TURN_DEGREES = 90;
var SLIGHT_TURN_DEGREES = 45;
var TURN_AROUND_DEGREES = 180;
var TURN_RATE = 90; // Turn rate in degrees per second
/*****************************************************************************/
/** COMMANDS *****************************************************************/
var CMD_MOVE_FORWARD = "Move forward";
var CMD_MOVE_BACKWARD = "Move backward";
var CMD_MOVE_UP = "Move up";
var CMD_MOVE_DOWN = "Move down";
var CMD_MOVE_LEFT = "Move left";
var CMD_MOVE_RIGHT = "Move right";
var CMD_STEP_FORWARD = "Step forward";
var CMD_STEP_BACKWARD = "Step backward";
var CMD_STEP_LEFT = "Step left";
var CMD_STEP_RIGHT = "Step right";
var CMD_STEP_UP = "Step up";
var CMD_STEP_DOWN = "Step down";
var CMD_TURN_LEFT = "Turn left";
var CMD_TURN_SLIGHT_LEFT = "Turn slight left";
var CMD_TURN_RIGHT = "Turn right";
var CMD_TURN_SLIGHT_RIGHT = "Turn slight right";
var CMD_TURN_AROUND = "Turn around";
var CMD_STOP = "Stop";
var CMD_SHOW_COMMANDS = "Show commands";
var MOVE_COMMANDS = [
CMD_MOVE_FORWARD,
CMD_MOVE_BACKWARD,
CMD_MOVE_UP,
CMD_MOVE_DOWN,
CMD_MOVE_LEFT,
CMD_MOVE_RIGHT,
];
var STEP_COMMANDS = [
CMD_STEP_FORWARD,
CMD_STEP_BACKWARD,
CMD_STEP_UP,
CMD_STEP_DOWN,
CMD_STEP_LEFT,
CMD_STEP_RIGHT,
];
var TURN_COMMANDS = [
CMD_TURN_LEFT,
CMD_TURN_SLIGHT_LEFT,
CMD_TURN_RIGHT,
CMD_TURN_SLIGHT_RIGHT,
CMD_TURN_AROUND,
];
var OTHER_COMMANDS = [
CMD_STOP,
CMD_SHOW_COMMANDS,
];
var ALL_COMMANDS = []
.concat(MOVE_COMMANDS)
.concat(STEP_COMMANDS)
.concat(TURN_COMMANDS)
.concat(OTHER_COMMANDS);
/** END OF COMMANDS **********************************************************/
/*****************************************************************************/
var currentCommandFunc = null;
function handleCommandRecognized(command) {
if (MOVE_COMMANDS.indexOf(command) > -1 || STEP_COMMANDS.indexOf(command) > -1) {
// If this is a STEP_* command, we will want to countdown the duration
// of time to move. MOVE_* commands don't stop.
var timeRemaining = MOVE_COMMANDS.indexOf(command) > -1 ? 0 : STEP_DURATION;
var accel = { x: 0, y: 0, z: 0 };
if (command == CMD_MOVE_FORWARD || command == CMD_STEP_FORWARD) {
accel = { x: 0, y: 0, z: 1 };
} else if (command == CMD_MOVE_BACKWARD || command == CMD_STEP_BACKWARD) {
accel = { x: 0, y: 0, z: -1 };
} else if (command === CMD_MOVE_UP || command == CMD_STEP_UP) {
accel = { x: 0, y: 1, z: 0 };
} else if (command == CMD_MOVE_DOWN || command == CMD_STEP_DOWN) {
accel = { x: 0, y: -1, z: 0 };
} else if (command == CMD_MOVE_LEFT || command == CMD_STEP_LEFT) {
accel = { x: -1, y: 0, z: 0 };
} else if (command == CMD_MOVE_RIGHT || command == CMD_STEP_RIGHT) {
accel = { x: 1, y: 0, z: 0 };
}
currentCommandFunc = function(dt) {
if (timeRemaining > 0 && dt >= timeRemaining) {
dt = timeRemaining;
}
var headOrientation = MyAvatar.headOrientation;
var front = Quat.getFront(headOrientation);
var right = Quat.getRight(headOrientation);
var up = Quat.getUp(headOrientation);
var thrust = Vec3.multiply(front, accel.z * ACCELERATION);
thrust = Vec3.sum(thrust, Vec3.multiply(right, accel.x * ACCELERATION));
thrust = Vec3.sum(thrust, Vec3.multiply(up, accel.y * ACCELERATION));
MyAvatar.addThrust(thrust);
if (timeRemaining > 0) {
timeRemaining -= dt;
return timeRemaining > 0;
}
return true;
};
} else if (TURN_COMMANDS.indexOf(command) > -1) {
var degreesRemaining;
var sign;
if (command == CMD_TURN_LEFT) {
sign = 1;
degreesRemaining = TURN_DEGREES;
} else if (command == CMD_TURN_RIGHT) {
sign = -1;
degreesRemaining = TURN_DEGREES;
} else if (command == CMD_TURN_SLIGHT_LEFT) {
sign = 1;
degreesRemaining = SLIGHT_TURN_DEGREES;
} else if (command == CMD_TURN_SLIGHT_RIGHT) {
sign = -1;
degreesRemaining = SLIGHT_TURN_DEGREES;
} else if (command == CMD_TURN_AROUND) {
sign = 1;
degreesRemaining = TURN_AROUND_DEGREES;
}
currentCommandFunc = function(dt) {
// Determine how much to turn by
var turnAmount = TURN_RATE * dt;
if (turnAmount > degreesRemaining) {
turnAmount = degreesRemaining;
}
// Apply turn
var orientation = MyAvatar.orientation;
var deltaOrientation = Quat.fromPitchYawRollDegrees(0, sign * turnAmount, 0);
MyAvatar.orientation = Quat.multiply(orientation, deltaOrientation);
degreesRemaining -= turnAmount;
return turnAmount > 0;
}
} else if (command == CMD_STOP) {
currentCommandFunc = null;
} else if (command == CMD_SHOW_COMMANDS) {
var msg = "";
for (var i = 0; i < ALL_COMMANDS.length; i++) {
msg += ALL_COMMANDS[i] + "\n";
}
Window.alert(msg);
}
}
function update(dt) {
if (currentCommandFunc) {
if (currentCommandFunc(dt) === false) {
currentCommandFunc = null;
}
}
}
function setup() {
for (var i = 0; i < ALL_COMMANDS.length; i++) {
SpeechRecognizer.addCommand(ALL_COMMANDS[i]);
}
}
function scriptEnding() {
for (var i = 0; i < ALL_COMMANDS.length; i++) {
SpeechRecognizer.removeCommand(ALL_COMMANDS[i]);
}
}
Script.scriptEnding.connect(scriptEnding);
Script.update.connect(update);
SpeechRecognizer.commandRecognized.connect(handleCommandRecognized);
setup();

View file

@ -145,3 +145,98 @@ test("Test timeout", function() {
this.assertEquals(0, req.status, "status should be `0`");
this.assertEquals(4, req.errorCode, "4 is the timeout error code for QNetworkReply::NetworkError");
});
var localFile = Window.browse("Find defaultScripts.js file ...", "", "defaultScripts.js (defaultScripts.js)");
if (localFile !== null) {
localFile = "file:///" + localFile;
test("Test GET local file synchronously", function () {
var req = new XMLHttpRequest();
var statesVisited = [true, false, false, false, false]
req.onreadystatechange = function () {
statesVisited[req.readyState] = true;
};
req.open("GET", localFile, false);
req.send();
this.assertEquals(req.DONE, req.readyState, "readyState should be DONE");
this.assertEquals(200, req.status, "status should be `200`");
this.assertEquals("OK", req.statusText, "statusText should be `OK`");
this.assertEquals(0, req.errorCode);
this.assertNotEquals("", req.getAllResponseHeaders(), "headers should not be null");
this.assertContains("High Fidelity", req.response.substring(0, 100), "expected text not found in response")
for (var i = 0; i <= req.DONE; i++) {
this.assertEquals(true, statesVisited[i], i + " should be set");
}
});
test("Test GET nonexistent local file", function () {
var nonexistentFile = localFile.replace(".js", "NoExist.js");
var req = new XMLHttpRequest();
req.open("GET", nonexistentFile, false);
req.send();
this.assertEquals(req.DONE, req.readyState, "readyState should be DONE");
this.assertEquals(404, req.status, "status should be `404`");
this.assertEquals("Not Found", req.statusText, "statusText should be `Not Found`");
this.assertNotEquals(0, req.errorCode);
});
test("Test GET local file already open", function () {
// Can't open file exclusively in order to test.
});
test("Test GET local file with data not implemented", function () {
var req = new XMLHttpRequest();
req.open("GET", localFile, true);
req.send("data");
this.assertEquals(req.DONE, req.readyState, "readyState should be DONE");
this.assertEquals(501, req.status, "status should be `501`");
this.assertEquals("Not Implemented", req.statusText, "statusText should be `Not Implemented`");
this.assertNotEquals(0, req.errorCode);
});
test("Test GET local file asynchronously not implemented", function () {
var req = new XMLHttpRequest();
req.open("GET", localFile, true);
req.send();
this.assertEquals(req.DONE, req.readyState, "readyState should be DONE");
this.assertEquals(501, req.status, "status should be `501`");
this.assertEquals("Not Implemented", req.statusText, "statusText should be `Not Implemented`");
this.assertNotEquals(0, req.errorCode);
});
test("Test POST local file not implemented", function () {
var req = new XMLHttpRequest();
req.open("POST", localFile, false);
req.send();
this.assertEquals(req.DONE, req.readyState, "readyState should be DONE");
this.assertEquals(501, req.status, "status should be `501`");
this.assertEquals("Not Implemented", req.statusText, "statusText should be `Not Implemented`");
this.assertNotEquals(0, req.errorCode);
});
test("Test local file username and password not implemented", function () {
var req = new XMLHttpRequest();
req.open("GET", localFile, false, "username", "password");
req.send();
this.assertEquals(req.DONE, req.readyState, "readyState should be DONE");
this.assertEquals(501, req.status, "status should be `501`");
this.assertEquals("Not Implemented", req.statusText, "statusText should be `Not Implemented`");
this.assertNotEquals(0, req.errorCode);
});
} else {
print("Local file operation not tested");
}

View file

@ -132,20 +132,34 @@ ToolBar = function(x, y, direction) {
this.y = y;
this.width = 0;
this.height = 0;
this.back = this.back = Overlays.addOverlay("text", {
backgroundColor: { red: 255, green: 255, blue: 255 },
x: this.x,
y: this.y,
width: this.width,
height: this.height,
alpha: 1.0,
visible: false
});
this.addTool = function(properties, selectable, selected) {
if (direction == ToolBar.HORIZONTAL) {
properties.x = this.x + this.width;
properties.y = this.y;
this.width += properties.width + ToolBar.SPACING;
this.height += Math.max(properties.height, this.height);
this.height = Math.max(properties.height, this.height);
} else {
properties.x = this.x;
properties.y = this.y + this.height;
this.width = Math.max(properties.width, this.width);
this.height += properties.height + ToolBar.SPACING;
}
if (this.back != null) {
Overlays.editOverlay(this.back, {
width: this.width + 2 * ToolBar.SPACING,
height: this.height + 2 * ToolBar.SPACING
});
}
this.tools[this.tools.length] = new Tool(properties, selectable, selected);
return ((this.tools.length) - 1);
@ -159,18 +173,48 @@ ToolBar = function(x, y, direction) {
for(var tool in this.tools) {
this.tools[tool].move(this.tools[tool].x() + dx, this.tools[tool].y() + dy);
}
if (this.back != null) {
Overlays.editOverlay(this.back, {
x: x - ToolBar.SPACING,
y: y - ToolBar.SPACING
});
}
}
this.setAlpha = function(alpha) {
for(var tool in this.tools) {
this.setAlpha = function(alpha, tool) {
if(typeof(tool) === 'undefined') {
for(var tool in this.tools) {
this.tools[tool].setAlpha(alpha);
}
if (this.back != null) {
Overlays.editOverlay(this.back, { alpha: alpha});
}
} else {
this.tools[tool].setAlpha(alpha);
}
}
this.setBack = function(color, alpha) {
if (color == null) {
Overlays.editOverlay(this.back, {
visible: false
});
} else {
Overlays.editOverlay(this.back, {
visible: true,
backgroundColor: color,
alpha: alpha
})
}
}
this.show = function(doShow) {
for(var tool in this.tools) {
this.tools[tool].show(doShow);
}
if (this.back != null) {
Overlays.editOverlay(this.back, { visible: doShow});
}
}
this.clicked = function(clickedOverlay) {
@ -186,12 +230,25 @@ ToolBar = function(x, y, direction) {
return this.tools.length;
}
this.selectTool = function (tool, select) {
this.tools[tool].select(select);
}
this.toolSelected = function (tool) {
return this.tools[tool].selected();
}
this.cleanup = function() {
for(var tool in this.tools) {
this.tools[tool].cleanup();
delete this.tools[tool];
}
if (this.back != null) {
Overlays.deleteOverlay(this.back);
this.back = null;
}
this.tools = [];
this.x = x;
this.y = y;

View file

@ -46,6 +46,15 @@ foreach(SUBDIR avatar devices renderer ui starfield location scripting voxels pa
set(INTERFACE_SRCS ${INTERFACE_SRCS} "${SUBDIR_SRCS}")
endforeach(SUBDIR)
# Add SpeechRecognizer if on OS X, otherwise remove
if (APPLE)
file(GLOB INTERFACE_OBJCPP_SRCS "src/SpeechRecognizer.mm")
set(INTERFACE_SRCS ${INTERFACE_SRCS} ${INTERFACE_OBJCPP_SRCS})
else ()
get_filename_component(SPEECHRECOGNIZER_H "src/SpeechRecognizer.h" ABSOLUTE)
list(REMOVE_ITEM INTERFACE_SRCS ${SPEECHRECOGNIZER_H})
endif ()
find_package(Qt5 COMPONENTS Gui Multimedia Network OpenGL Script Svg WebKitWidgets)
# grab the ui files in resources/ui
@ -165,8 +174,9 @@ if (APPLE)
find_library(CoreFoundation CoreFoundation)
find_library(GLUT GLUT)
find_library(OpenGL OpenGL)
find_library(AppKit AppKit)
target_link_libraries(${TARGET_NAME} ${CoreAudio} ${CoreFoundation} ${GLUT} ${OpenGL})
target_link_libraries(${TARGET_NAME} ${CoreAudio} ${CoreFoundation} ${GLUT} ${OpenGL} ${AppKit})
# install command for OS X bundle
INSTALL(TARGETS ${TARGET_NAME}

View file

@ -2,15 +2,15 @@
Instructions for adding the Oculus library (LibOVR) to Interface
Stephen Birarda, March 6, 2014
You can download the Oculus SDK from https://developer.oculusvr.com/ (account creation required). Interface has been tested with SDK version 0.3.2.
You can download the Oculus SDK from https://developer.oculusvr.com/ (account creation required). Interface has been tested with SDK version 0.4.1.
1. Copy the Oculus SDK folders from the LibOVR directory (Lib, Include, Src) into the interface/externals/oculus folder.
1. Copy the Oculus SDK folders from the LibOVR directory (Lib, Include, Src) into the interface/externals/libovr folder.
This readme.txt should be there as well.
You may optionally choose to copy the SDK folders to a location outside the repository (so you can re-use with different checkouts and different projects).
If so our CMake find module expects you to set the ENV variable 'HIFI_LIB_DIR' to a directory containing a subfolder 'oculus' that contains the three folders mentioned above.
NOTE: For Windows users, you should copy libovr.lib and libovrd.lib from the \oculus\Lib\Win32\VS2010 directory to the \oculus\Lib\Win32\ directory.
NOTE: For Windows users, you should copy libovr.lib and libovrd.lib from the \oculus\Lib\Win32\VS2010 directory to the \libovr\Lib\Win32\ directory.
2. Clear your build directory, run cmake and build, and you should be all set.

View file

@ -0,0 +1,20 @@
#version 120
//
// metavoxel_heightfield_base.frag
// fragment shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the diffuse texture
uniform sampler2D diffuseMap;
void main(void) {
// compute the base color based on OpenGL lighting model
gl_FragColor = gl_Color * texture2D(diffuseMap, gl_TexCoord[0].st);
}

View file

@ -0,0 +1,33 @@
#version 120
//
// metavoxel_heightfield_base.vert
// vertex shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the height texture
uniform sampler2D heightMap;
// the distance between height points in texture space
uniform float heightScale;
// the scale between height and color textures
uniform float colorScale;
void main(void) {
// add the height to the position
float height = texture2D(heightMap, gl_MultiTexCoord0.st).r;
gl_Position = gl_ModelViewProjectionMatrix * (gl_Vertex + vec4(0.0, height, 0.0, 0.0));
// the zero height should be invisible
gl_FrontColor = vec4(1.0, 1.0, 1.0, step(height, 0.0));
// pass along the scaled/offset texture coordinates
gl_TexCoord[0] = (gl_MultiTexCoord0 - vec4(heightScale, heightScale, 0.0, 0.0)) * colorScale;
}

View file

@ -0,0 +1,21 @@
#version 120
//
// metavoxel_heightfield_light.frag
// fragment shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the interpolated normal
varying vec4 normal;
void main(void) {
// compute the base color based on OpenGL lighting model
gl_FragColor = gl_Color * (gl_FrontLightModelProduct.sceneColor + gl_FrontLightProduct[0].ambient +
gl_FrontLightProduct[0].diffuse * max(0.0, dot(normalize(normal), gl_LightSource[0].position)));
}

View file

@ -0,0 +1,45 @@
#version 120
//
// metavoxel_heighfield_light.vert
// vertex shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the height texture
uniform sampler2D heightMap;
// the distance between height points in texture space
uniform float heightScale;
// the interpolated position
varying vec4 position;
// the interpolated normal
varying vec4 normal;
void main(void) {
// transform and store the normal for interpolation
vec2 heightCoord = gl_MultiTexCoord0.st;
float deltaX = texture2D(heightMap, heightCoord - vec2(heightScale, 0.0)).r -
texture2D(heightMap, heightCoord + vec2(heightScale, 0.0)).r;
float deltaZ = texture2D(heightMap, heightCoord - vec2(0.0, heightScale)).r -
texture2D(heightMap, heightCoord + vec2(0.0, heightScale)).r;
normal = normalize(gl_ModelViewMatrix * vec4(deltaX, heightScale, deltaZ, 0.0));
// add the height to the position
float height = texture2D(heightMap, heightCoord).r;
position = gl_ModelViewMatrix * (gl_Vertex + vec4(0.0, height, 0.0, 0.0));
gl_Position = gl_ProjectionMatrix * position;
// the zero height should be invisible
gl_FrontColor = vec4(1.0, 1.0, 1.0, step(height, 0.0));
// and the shadow texture coordinates
gl_TexCoord[1] = vec4(dot(gl_EyePlaneS[0], position), dot(gl_EyePlaneT[0], position), dot(gl_EyePlaneR[0], position), 1.0);
}

View file

@ -0,0 +1,44 @@
#version 120
//
// metavoxel_heightfield_light_cascaded_shadow_map.frag
// fragment shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the shadow texture
uniform sampler2DShadow shadowMap;
// the distances to the cascade sections
uniform vec3 shadowDistances;
// the inverse of the size of the shadow map
const float shadowScale = 1.0 / 2048.0;
// the interpolated position
varying vec4 position;
// the interpolated normal
varying vec4 normal;
void main(void) {
// compute the index of the cascade to use and the corresponding texture coordinates
int shadowIndex = int(dot(step(vec3(position.z), shadowDistances), vec3(1.0, 1.0, 1.0)));
vec3 shadowTexCoord = vec3(dot(gl_EyePlaneS[shadowIndex], position), dot(gl_EyePlaneT[shadowIndex], position),
dot(gl_EyePlaneR[shadowIndex], position));
// compute the base color based on OpenGL lighting model
float diffuse = dot(normalize(normal), gl_LightSource[0].position);
float facingLight = step(0.0, diffuse) * 0.25 *
(shadow2D(shadowMap, shadowTexCoord + vec3(-shadowScale, -shadowScale, 0.0)).r +
shadow2D(shadowMap, shadowTexCoord + vec3(-shadowScale, shadowScale, 0.0)).r +
shadow2D(shadowMap, shadowTexCoord + vec3(shadowScale, -shadowScale, 0.0)).r +
shadow2D(shadowMap, shadowTexCoord + vec3(shadowScale, shadowScale, 0.0)).r);
gl_FragColor = gl_Color * (gl_FrontLightModelProduct.sceneColor + gl_FrontLightProduct[0].ambient +
gl_FrontLightProduct[0].diffuse * (diffuse * facingLight));
}

View file

@ -0,0 +1,33 @@
#version 120
//
// metavoxel_heightfield_light_shadow_map.frag
// fragment shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the shadow texture
uniform sampler2DShadow shadowMap;
// the inverse of the size of the shadow map
const float shadowScale = 1.0 / 2048.0;
// the interpolated normal
varying vec4 normal;
void main(void) {
// compute the base color based on OpenGL lighting model
float diffuse = dot(normalize(normal), gl_LightSource[0].position);
float facingLight = step(0.0, diffuse) * 0.25 *
(shadow2D(shadowMap, gl_TexCoord[1].stp + vec3(-shadowScale, -shadowScale, 0.0)).r +
shadow2D(shadowMap, gl_TexCoord[1].stp + vec3(-shadowScale, shadowScale, 0.0)).r +
shadow2D(shadowMap, gl_TexCoord[1].stp + vec3(shadowScale, -shadowScale, 0.0)).r +
shadow2D(shadowMap, gl_TexCoord[1].stp + vec3(shadowScale, shadowScale, 0.0)).r);
gl_FragColor = gl_Color * (gl_FrontLightModelProduct.sceneColor + gl_FrontLightProduct[0].ambient +
gl_FrontLightProduct[0].diffuse * (diffuse * facingLight));
}

View file

@ -0,0 +1,29 @@
#version 120
//
// metavoxel_heightfield_splat.frag
// fragment shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the number of splats per pass
const int SPLAT_COUNT = 4;
// the splat textures
uniform sampler2D diffuseMaps[SPLAT_COUNT];
// alpha values for the four splat textures
varying vec4 alphaValues;
void main(void) {
// blend the splat textures
gl_FragColor = gl_Color * (texture2D(diffuseMaps[0], gl_TexCoord[0].st) * alphaValues.x +
texture2D(diffuseMaps[1], gl_TexCoord[1].st) * alphaValues.y +
texture2D(diffuseMaps[2], gl_TexCoord[2].st) * alphaValues.z +
texture2D(diffuseMaps[3], gl_TexCoord[3].st) * alphaValues.w);
}

View file

@ -0,0 +1,64 @@
#version 120
//
// metavoxel_heighfield_splat.vert
// vertex shader
//
// Created by Andrzej Kapolka on 8/20/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
// the height texture
uniform sampler2D heightMap;
// the texture that contains the texture indices
uniform sampler2D textureMap;
// the distance between height points in texture space
uniform float heightScale;
// the scale between height and texture textures
uniform float textureScale;
// the splat texture offset
uniform vec2 splatTextureOffset;
// the splat textures scales on the S axis
uniform vec4 splatTextureScalesS;
// the splat texture scales on the T axis
uniform vec4 splatTextureScalesT;
// the lower bounds of the values corresponding to the splat textures
uniform vec4 textureValueMinima;
// the upper bounds of the values corresponding to the splat textures
uniform vec4 textureValueMaxima;
// alpha values for the four splat textures
varying vec4 alphaValues;
void main(void) {
// add the height to the position
float height = texture2D(heightMap, gl_MultiTexCoord0.st).r;
vec4 modelSpacePosition = gl_Vertex + vec4(0.0, height, 0.0, 0.0);
gl_Position = gl_ModelViewProjectionMatrix * modelSpacePosition;
// the zero height should be invisible
gl_FrontColor = vec4(1.0, 1.0, 1.0, 1.0 - step(height, 0.0));
// pass along the scaled/offset texture coordinates
vec4 textureSpacePosition = vec4(modelSpacePosition.xz, 0.0, 1.0) + vec4(splatTextureOffset, 0.0, 0.0);
gl_TexCoord[0] = textureSpacePosition * vec4(splatTextureScalesS[0], splatTextureScalesT[0], 0.0, 1.0);
gl_TexCoord[1] = textureSpacePosition * vec4(splatTextureScalesS[1], splatTextureScalesT[1], 0.0, 1.0);
gl_TexCoord[2] = textureSpacePosition * vec4(splatTextureScalesS[2], splatTextureScalesT[2], 0.0, 1.0);
gl_TexCoord[3] = textureSpacePosition * vec4(splatTextureScalesS[3], splatTextureScalesT[3], 0.0, 1.0);
// compute the alpha values for each texture
float value = texture2D(textureMap, (gl_MultiTexCoord0.st - vec2(heightScale, heightScale)) * textureScale).r;
vec4 valueVector = vec4(value, value, value, value);
alphaValues = step(textureValueMinima, valueVector) * step(valueVector, textureValueMaxima);
}

View file

@ -246,7 +246,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
connect(&domainHandler, SIGNAL(connectedToDomain(const QString&)), SLOT(updateWindowTitle()));
connect(&domainHandler, SIGNAL(disconnectedFromDomain()), SLOT(updateWindowTitle()));
connect(&domainHandler, &DomainHandler::settingsReceived, this, &Application::domainSettingsReceived);
connect(&domainHandler, &DomainHandler::hostnameChanged, Menu::getInstance(), &Menu::clearLoginDialogDisplayedFlag);
// hookup VoxelEditSender to PaymentManager so we can pay for octree edits
const PaymentManager& paymentManager = PaymentManager::getInstance();
connect(&_voxelEditSender, &VoxelEditPacketSender::octreePaymentRequired,
@ -894,7 +895,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
}
break;
case Qt::Key_Space:
case Qt::Key_Apostrophe:
resetSensors();
break;
@ -1053,7 +1054,6 @@ void Application::keyPressEvent(QKeyEvent* event) {
Menu::getInstance()->triggerOption(MenuOption::FrustumRenderMode);
}
break;
break;
case Qt::Key_Percent:
Menu::getInstance()->triggerOption(MenuOption::Stats);
break;
@ -1772,14 +1772,7 @@ void Application::init() {
_lastTimeUpdated.start();
Menu::getInstance()->loadSettings();
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
_audio.setDynamicJitterBuffers(false);
_audio.setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
} else {
_audio.setDynamicJitterBuffers(true);
}
_audio.setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
_audio.setReceivedAudioStreamSettings(Menu::getInstance()->getReceivedAudioStreamSettings());
qDebug("Loaded settings");
@ -3747,6 +3740,10 @@ ScriptEngine* Application::loadScript(const QString& scriptName, bool loadScript
scriptEngine->registerGlobalObject("Camera", cameraScriptable);
connect(scriptEngine, SIGNAL(finished(const QString&)), cameraScriptable, SLOT(deleteLater()));
#ifdef Q_OS_MAC
scriptEngine->registerGlobalObject("SpeechRecognizer", Menu::getInstance()->getSpeechRecognizer());
#endif
ClipboardScriptingInterface* clipboardScriptable = new ClipboardScriptingInterface();
scriptEngine->registerGlobalObject("Clipboard", clipboardScriptable);
connect(scriptEngine, SIGNAL(finished(const QString&)), clipboardScriptable, SLOT(deleteLater()));
@ -3829,6 +3826,10 @@ void Application::stopAllScripts(bool restart) {
it.value()->stop();
qDebug() << "stopping script..." << it.key();
}
// HACK: ATM scripts cannot set/get their animation priorities, so we clear priorities
// whenever a script stops in case it happened to have been setting joint rotations.
// TODO: expose animation priorities and provide a layered animation control system.
_myAvatar->clearJointAnimationPriorities();
}
void Application::stopScript(const QString &scriptName) {
@ -3836,6 +3837,10 @@ void Application::stopScript(const QString &scriptName) {
if (_scriptEnginesHash.contains(scriptURLString)) {
_scriptEnginesHash.value(scriptURLString)->stop();
qDebug() << "stopping script..." << scriptName;
// HACK: ATM scripts cannot set/get their animation priorities, so we clear priorities
// whenever a script stops in case it happened to have been setting joint rotations.
// TODO: expose animation priorities and provide a layered animation control system.
_myAvatar->clearJointAnimationPriorities();
}
}

View file

@ -72,7 +72,7 @@ Audio::Audio(QObject* parent) :
_proceduralAudioOutput(NULL),
_proceduralOutputDevice(NULL),
_inputRingBuffer(0),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, true, 0, 0, true),
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, InboundAudioStream::Settings()),
_isStereoInput(false),
_averagedLatency(0.0),
_lastInputLoudness(0),
@ -82,7 +82,6 @@ Audio::Audio(QObject* parent) :
_noiseGateSampleCounter(0),
_noiseGateOpen(false),
_noiseGateEnabled(true),
_peqEnabled(false),
_toneInjectionEnabled(false),
_noiseGateFramesToClose(0),
_totalInputAudioSamples(0),
@ -102,9 +101,11 @@ Audio::Audio(QObject* parent) :
_scopeOutputOffset(0),
_framesPerScope(DEFAULT_FRAMES_PER_SCOPE),
_samplesPerScope(NETWORK_SAMPLES_PER_FRAME * _framesPerScope),
_peqEnabled(false),
_scopeInput(0),
_scopeOutputLeft(0),
_scopeOutputRight(0),
_scopeLastFrame(),
_statsEnabled(false),
_statsShowInjectedStreams(false),
_outgoingAvatarAudioSequenceNumber(0),
@ -113,14 +114,17 @@ Audio::Audio(QObject* parent) :
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
_lastSentAudioPacket(0),
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
_audioOutputIODevice(*this)
_audioOutputIODevice(_receivedAudioStream)
{
// clear the array of locally injected samples
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
// Create the noise sample array
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedAudioStreamSamples, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedSilence, this, &Audio::addStereoSilenceToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade, this, &Audio::addLastFrameRepeatedWithFadeToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedStereoSamples, this, &Audio::addStereoSamplesToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedSamples, Qt::DirectConnection);
}
void Audio::init(QGLWidget *parent) {
@ -460,9 +464,12 @@ void Audio::handleAudioInput() {
static char audioDataPacket[MAX_PACKET_SIZE];
static int numBytesPacketHeader = numBytesForPacketHeaderGivenPacketType(PacketTypeMicrophoneAudioNoEcho);
static int leadingBytes = numBytesPacketHeader + sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* networkAudioSamples = (int16_t*) (audioDataPacket + leadingBytes);
// NOTE: we assume PacketTypeMicrophoneAudioWithEcho has same size headers as
// PacketTypeMicrophoneAudioNoEcho. If not, then networkAudioSamples will be pointing to the wrong place for writing
// audio samples with echo.
static int leadingBytes = numBytesPacketHeader + sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
static int16_t* networkAudioSamples = (int16_t*)(audioDataPacket + leadingBytes);
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
@ -475,7 +482,7 @@ void Audio::handleAudioInput() {
int16_t* ioBuffer = (int16_t*)inputByteArray.data();
_peq.render( ioBuffer, ioBuffer, inputByteArray.size() / sizeof(int16_t) );
_peq.render(ioBuffer, ioBuffer, inputByteArray.size() / sizeof(int16_t));
}
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) {
@ -668,32 +675,27 @@ void Audio::handleAudioInput() {
if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
unsigned int numMonoAudioChannels = 1;
unsigned int monoAudioChannel = 0;
addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, monoAudioChannel, numMonoAudioChannels);
_scopeInputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeInputOffset %= _samplesPerScope;
_scopeInputOffset = addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, NETWORK_SAMPLES_PER_FRAME, monoAudioChannel, numMonoAudioChannels);
}
NodeList* nodeList = NodeList::getInstance();
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
if (_recorder && _recorder.data()->isRecording()) {
_recorder.data()->record(reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes);
}
if (audioMixer && audioMixer->getActiveSocket()) {
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
glm::vec3 headPosition = interfaceAvatar->getHead()->getPosition();
glm::quat headOrientation = interfaceAvatar->getHead()->getFinalOrientationInWorldFrame();
quint8 isStereo = _isStereoInput ? 1 : 0;
int numAudioBytes = 0;
PacketType packetType;
if (_lastInputLoudness == 0) {
packetType = PacketTypeSilentAudioFrame;
// we need to indicate how many silent samples this is to the audio mixer
networkAudioSamples[0] = numNetworkSamples;
numAudioBytes = sizeof(int16_t);
} else {
numAudioBytes = numNetworkBytes;
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
packetType = PacketTypeMicrophoneAudioWithEcho;
} else {
@ -702,21 +704,31 @@ void Audio::handleAudioInput() {
}
char* currentPacketPtr = audioDataPacket + populatePacketHeader(audioDataPacket, packetType);
// pack sequence number
memcpy(currentPacketPtr, &_outgoingAvatarAudioSequenceNumber, sizeof(quint16));
currentPacketPtr += sizeof(quint16);
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
if (packetType == PacketTypeSilentAudioFrame) {
// pack num silent samples
quint16 numSilentSamples = numNetworkSamples;
memcpy(currentPacketPtr, &numSilentSamples, sizeof(quint16));
currentPacketPtr += sizeof(quint16);
} else {
// set the mono/stereo byte
*currentPacketPtr++ = isStereo;
// memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
currentPacketPtr += (sizeof(headPosition));
// memcpy the three float positions
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
currentPacketPtr += (sizeof(headPosition));
// memcpy our orientation
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
// memcpy our orientation
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
// audio samples have already been packed (written to networkAudioSamples)
currentPacketPtr += numNetworkBytes;
}
// first time this is 0
if (_lastSentAudioPacket == 0) {
@ -728,18 +740,58 @@ void Audio::handleAudioInput() {
_lastSentAudioPacket = now;
}
nodeList->writeDatagram(audioDataPacket, numAudioBytes + leadingBytes, audioMixer);
int packetBytes = currentPacketPtr - audioDataPacket;
nodeList->writeDatagram(audioDataPacket, packetBytes, audioMixer);
_outgoingAvatarAudioSequenceNumber++;
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(numAudioBytes + leadingBytes);
.updateValue(packetBytes);
}
delete[] inputAudioSamples;
}
}
void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
const int STEREO_FACTOR = 2;
void Audio::addStereoSilenceToScope(int silentSamplesPerChannel) {
if (!_scopeEnabled || _scopeEnabledPause) {
return;
}
addSilenceToScope(_scopeOutputLeft, _scopeOutputOffset, silentSamplesPerChannel);
_scopeOutputOffset = addSilenceToScope(_scopeOutputRight, _scopeOutputOffset, silentSamplesPerChannel);
}
void Audio::addStereoSamplesToScope(const QByteArray& samples) {
if (!_scopeEnabled || _scopeEnabledPause) {
return;
}
const int16_t* samplesData = reinterpret_cast<const int16_t*>(samples.data());
int samplesPerChannel = samples.size() / sizeof(int16_t) / STEREO_FACTOR;
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, samplesData, samplesPerChannel, 0, STEREO_FACTOR);
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, samplesData, samplesPerChannel, 1, STEREO_FACTOR);
_scopeLastFrame = samples.right(NETWORK_BUFFER_LENGTH_BYTES_STEREO);
}
void Audio::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
const int16_t* lastFrameData = reinterpret_cast<const int16_t*>(_scopeLastFrame.data());
int samplesRemaining = samplesPerChannel;
int indexOfRepeat = 0;
do {
int samplesToWriteThisIteration = std::min(samplesRemaining, (int)NETWORK_SAMPLES_PER_FRAME);
float fade = calculateRepeatedFrameFadeFactor(indexOfRepeat);
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, lastFrameData, samplesToWriteThisIteration, 0, STEREO_FACTOR, fade);
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, lastFrameData, samplesToWriteThisIteration, 1, STEREO_FACTOR, fade);
samplesRemaining -= samplesToWriteThisIteration;
indexOfRepeat++;
} while (samplesRemaining > 0);
}
void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
@ -784,30 +836,6 @@ void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QBy
numNetworkOutputSamples,
numDeviceOutputSamples,
_desiredOutputFormat, _outputFormat);
if (_scopeEnabled && !_scopeEnabledPause) {
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
const int16_t* samples = receivedSamples;
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
unsigned int audioChannel = 0;
addBufferToScope(
_scopeOutputLeft,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
audioChannel = 1;
addBufferToScope(
_scopeOutputRight,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeOutputOffset %= _samplesPerScope;
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
}
}
}
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
@ -820,9 +848,6 @@ void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
}
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
@ -855,12 +880,13 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
void Audio::sendDownstreamAudioStatsPacket() {
// since this function is called every second, we'll sample some of our stats here
// since this function is called every second, we'll sample for some of our stats here
_inputRingBufferMsecsAvailableStats.update(getInputRingBufferMsecsAvailable());
_audioOutputMsecsUnplayedStats.update(getAudioOutputMsecsUnplayed());
// also, call _receivedAudioStream's per-second callback
_receivedAudioStream.perSecondCallbackForUpdatingStats();
char packet[MAX_PACKET_SIZE];
// pack header
@ -878,7 +904,7 @@ void Audio::sendDownstreamAudioStatsPacket() {
dataAt += sizeof(quint16);
// pack downstream audio stream stats
AudioStreamStats stats = _receivedAudioStream.updateSeqHistoryAndGetAudioStreamStats();
AudioStreamStats stats = _receivedAudioStream.getAudioStreamStats();
memcpy(dataAt, &stats, sizeof(AudioStreamStats));
dataAt += sizeof(AudioStreamStats);
@ -911,7 +937,7 @@ void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& s
unsigned int delayCount = delay * _desiredOutputFormat.channelCount();
unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount;
if (silentCount) {
_spatialAudioRingBuffer.addSilentFrame(silentCount);
_spatialAudioRingBuffer.addSilentSamples(silentCount);
}
// Recalculate the number of remaining samples
@ -1215,8 +1241,6 @@ void Audio::selectAudioFilterSmiley() {
void Audio::toggleScope() {
_scopeEnabled = !_scopeEnabled;
if (_scopeEnabled) {
_scopeInputOffset = 0;
_scopeOutputOffset = 0;
allocateScope();
} else {
freeScope();
@ -1254,6 +1278,8 @@ void Audio::selectAudioScopeFiftyFrames() {
}
void Audio::allocateScope() {
_scopeInputOffset = 0;
_scopeOutputOffset = 0;
int num = _samplesPerScope * sizeof(int16_t);
_scopeInput = new QByteArray(num, 0);
_scopeOutputLeft = new QByteArray(num, 0);
@ -1285,12 +1311,18 @@ void Audio::freeScope() {
}
}
void Audio::addBufferToScope(
QByteArray* byteArray, unsigned int frameOffset, const int16_t* source, unsigned int sourceChannel, unsigned int sourceNumberOfChannels) {
int Audio::addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamplesPerChannel,
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade) {
if (!_scopeEnabled || _scopeEnabledPause) {
return 0;
}
// Constant multiplier to map sample value to vertical size of scope
float multiplier = (float)MULTIPLIER_SCOPE_HEIGHT / logf(2.0f);
// Used to scale each sample. (logf(sample) + fadeOffset) is same as logf(sample * fade).
float fadeOffset = logf(fade);
// Temporary variable receives sample value
float sample;
@ -1301,17 +1333,41 @@ void Audio::addBufferToScope(
// Short int pointer to mapped samples in byte array
int16_t* destination = (int16_t*) byteArray->data();
for (unsigned int i = 0; i < NETWORK_SAMPLES_PER_FRAME; i++) {
for (int i = 0; i < sourceSamplesPerChannel; i++) {
sample = (float)source[i * sourceNumberOfChannels + sourceChannel];
if (sample > 0) {
value = (int16_t)(multiplier * logf(sample));
} else if (sample < 0) {
value = (int16_t)(-multiplier * logf(-sample));
if (sample > 1) {
value = (int16_t)(multiplier * (logf(sample) + fadeOffset));
} else if (sample < -1) {
value = (int16_t)(-multiplier * (logf(-sample) + fadeOffset));
} else {
value = 0;
}
destination[i + frameOffset] = value;
destination[frameOffset] = value;
frameOffset = (frameOffset == _samplesPerScope - 1) ? 0 : frameOffset + 1;
}
return frameOffset;
}
int Audio::addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples) {
QMutexLocker lock(&_guard);
// Short int pointer to mapped samples in byte array
int16_t* destination = (int16_t*)byteArray->data();
if (silentSamples >= _samplesPerScope) {
memset(destination, 0, byteArray->size());
return frameOffset;
}
int samplesToBufferEnd = _samplesPerScope - frameOffset;
if (silentSamples > samplesToBufferEnd) {
memset(destination + frameOffset, 0, samplesToBufferEnd * sizeof(int16_t));
memset(destination, 0, silentSamples - samplesToBufferEnd * sizeof(int16_t));
} else {
memset(destination + frameOffset, 0, silentSamples * sizeof(int16_t));
}
return (frameOffset + silentSamples) % _samplesPerScope;
}
void Audio::renderStats(const float* color, int width, int height) {
@ -1512,17 +1568,17 @@ void Audio::renderScope(int width, int height) {
return;
static const float backgroundColor[4] = { 0.4f, 0.4f, 0.4f, 0.6f };
static const float gridColor[4] = { 0.3f, 0.3f, 0.3f, 0.6f };
static const float gridColor[4] = { 0.7f, 0.7f, 0.7f, 1.0f };
static const float inputColor[4] = { 0.3f, 1.0f, 0.3f, 1.0f };
static const float outputLeftColor[4] = { 1.0f, 0.3f, 0.3f, 1.0f };
static const float outputRightColor[4] = { 0.3f, 0.3f, 1.0f, 1.0f };
static const int gridRows = 2;
int gridCols = _framesPerScope;
int x = (width - SCOPE_WIDTH) / 2;
int y = (height - SCOPE_HEIGHT) / 2;
int w = SCOPE_WIDTH;
int h = SCOPE_HEIGHT;
int x = (width - (int)SCOPE_WIDTH) / 2;
int y = (height - (int)SCOPE_HEIGHT) / 2;
int w = (int)SCOPE_WIDTH;
int h = (int)SCOPE_HEIGHT;
renderBackground(backgroundColor, x, y, w, h);
renderGrid(gridColor, x, y, w, h, gridRows, gridCols);
@ -1712,7 +1768,7 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
// setup our general output device for audio-mixer audio
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFrameSize * sizeof(int16_t));
qDebug() << "Ring Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
qDebug() << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
_audioOutputIODevice.start();
_audioOutput->start(&_audioOutputIODevice);
@ -1787,13 +1843,11 @@ float Audio::getInputRingBufferMsecsAvailable() const {
}
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
MixedProcessedAudioStream& receivedAUdioStream = _parent._receivedAudioStream;
int samplesRequested = maxSize / sizeof(int16_t);
int samplesPopped;
int bytesWritten;
if ((samplesPopped = receivedAUdioStream.popSamples(samplesRequested, false)) > 0) {
AudioRingBuffer::ConstIterator lastPopOutput = receivedAUdioStream.getLastPopOutput();
if ((samplesPopped = _receivedAudioStream.popSamples(samplesRequested, false)) > 0) {
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
bytesWritten = samplesPopped * sizeof(int16_t);
} else {

View file

@ -17,9 +17,11 @@
#include "InterfaceConfig.h"
#include "AudioStreamStats.h"
#include "Recorder.h"
#include "RingBufferHistory.h"
#include "MovingMinMaxAvg.h"
#include "AudioFilter.h"
#include "AudioFilterBank.h"
#include <QAudio>
#include <QAudioInput>
@ -49,14 +51,14 @@ public:
class AudioOutputIODevice : public QIODevice {
public:
AudioOutputIODevice(Audio& parent) : _parent(parent) {};
AudioOutputIODevice(MixedProcessedAudioStream& receivedAudioStream) : _receivedAudioStream(receivedAudioStream) {};
void start() { open(QIODevice::ReadOnly); }
void stop() { close(); }
qint64 readData(char * data, qint64 maxSize);
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
private:
Audio& _parent;
MixedProcessedAudioStream& _receivedAudioStream;
};
@ -72,10 +74,7 @@ public:
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
void setDynamicJitterBuffers(bool dynamicJitterBuffers) { _receivedAudioStream.setDynamicJitterBuffers(dynamicJitterBuffers); }
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames) { _receivedAudioStream.setStaticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames); }
void setMaxFramesOverDesired(int maxFramesOverDesired) { _receivedAudioStream.setMaxFramesOverDesired(maxFramesOverDesired); }
void setReceivedAudioStreamSettings(const InboundAudioStream::Settings& settings) { _receivedAudioStream.setSettings(settings); }
int getDesiredJitterBufferFrames() const { return _receivedAudioStream.getDesiredJitterBufferFrames(); }
@ -102,6 +101,8 @@ public:
float getAudioOutputMsecsUnplayed() const;
float getAudioOutputAverageMsecsUnplayed() const { return (float)_audioOutputMsecsUnplayedStats.getWindowAverage(); }
void setRecorder(RecorderPointer recorder) { _recorder = recorder; }
public slots:
void start();
@ -109,7 +110,6 @@ public slots:
void addReceivedAudioToStream(const QByteArray& audioByteArray);
void parseAudioStreamStatsPacket(const QByteArray& packet);
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
void processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
void handleAudioInput();
void reset();
void resetStats();
@ -126,6 +126,10 @@ public slots:
void selectAudioScopeFiveFrames();
void selectAudioScopeTwentyFrames();
void selectAudioScopeFiftyFrames();
void addStereoSilenceToScope(int silentSamplesPerChannel);
void addLastFrameRepeatedWithFadeToScope(int samplesPerChannel);
void addStereoSamplesToScope(const QByteArray& samples);
void processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
void toggleAudioFilter();
void selectAudioFilterFlat();
void selectAudioFilterTrebleCut();
@ -252,8 +256,9 @@ private:
void reallocateScope(int frames);
// Audio scope methods for data acquisition
void addBufferToScope(
QByteArray* byteArray, unsigned int frameOffset, const int16_t* source, unsigned int sourceChannel, unsigned int sourceNumberOfChannels);
int addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamples,
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade = 1.0f);
int addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples);
// Audio scope methods for rendering
void renderBackground(const float* color, int x, int y, int width, int height);
@ -278,13 +283,14 @@ private:
int _samplesPerScope;
// Multi-band parametric EQ
bool _peqEnabled;
AudioFilterPEQ3 _peq;
bool _peqEnabled;
AudioFilterPEQ3m _peq;
QMutex _guard;
QByteArray* _scopeInput;
QByteArray* _scopeOutputLeft;
QByteArray* _scopeOutputRight;
QByteArray _scopeLastFrame;
#ifdef _WIN32
static const unsigned int STATS_WIDTH = 1500;
#else
@ -308,6 +314,8 @@ private:
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
AudioOutputIODevice _audioOutputIODevice;
WeakRecorderPointer _recorder;
};

View file

@ -48,6 +48,7 @@ void DatagramProcessor::processDatagrams() {
// only process this packet if we have a match on the packet version
switch (packetTypeForPacket(incomingPacket)) {
case PacketTypeMixedAudio:
case PacketTypeSilentAudioFrame:
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
Q_ARG(QByteArray, incomingPacket));
break;

View file

@ -82,8 +82,7 @@ const int CONSOLE_HEIGHT = 200;
Menu::Menu() :
_actionHash(),
_audioJitterBufferFrames(0),
_maxFramesOverDesired(0),
_receivedAudioStreamSettings(),
_bandwidthDialog(NULL),
_fieldOfView(DEFAULT_FIELD_OF_VIEW_DEGREES),
_realWorldFieldOfView(DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
@ -94,6 +93,9 @@ Menu::Menu() :
_octreeStatsDialog(NULL),
_lodToolsDialog(NULL),
_userLocationsDialog(NULL),
#ifdef Q_OS_MAC
_speechRecognizer(),
#endif
_maxVoxels(DEFAULT_MAX_VOXELS_PER_SYSTEM),
_voxelSizeScale(DEFAULT_OCTREE_SIZE_SCALE),
_oculusUIAngularSize(DEFAULT_OCULUS_UI_ANGULAR_SIZE),
@ -112,6 +114,7 @@ Menu::Menu() :
_loginAction(NULL),
_preferencesDialog(NULL),
_loginDialog(NULL),
_hasLoginDialogDisplayed(false),
_snapshotsLocation(),
_scriptsLocation(),
_walletPrivateKey()
@ -221,19 +224,16 @@ Menu::Menu() :
addActionToQMenuAndActionHash(editMenu, MenuOption::Attachments, 0, this, SLOT(editAttachments()));
addActionToQMenuAndActionHash(editMenu, MenuOption::Animations, 0, this, SLOT(editAnimations()));
addDisabledActionAndSeparator(editMenu, "Physics");
QObject* avatar = appInstance->getAvatar();
addCheckableActionToQMenuAndActionHash(editMenu, MenuOption::ObeyEnvironmentalGravity, Qt::SHIFT | Qt::Key_G, false,
avatar, SLOT(updateMotionBehaviorsFromMenu()));
addCheckableActionToQMenuAndActionHash(editMenu, MenuOption::StandOnNearbyFloors, 0, true,
avatar, SLOT(updateMotionBehaviorsFromMenu()));
addAvatarCollisionSubMenu(editMenu);
QMenu* toolsMenu = addMenu("Tools");
addActionToQMenuAndActionHash(toolsMenu, MenuOption::MetavoxelEditor, 0, this, SLOT(showMetavoxelEditor()));
addActionToQMenuAndActionHash(toolsMenu, MenuOption::ScriptEditor, Qt::ALT | Qt::Key_S, this, SLOT(showScriptEditor()));
#ifdef Q_OS_MAC
QAction* speechRecognizerAction = addCheckableActionToQMenuAndActionHash(toolsMenu, MenuOption::ControlWithSpeech,
Qt::CTRL | Qt::SHIFT | Qt::Key_C, _speechRecognizer.getEnabled(), &_speechRecognizer, SLOT(setEnabled(bool)));
connect(&_speechRecognizer, SIGNAL(enabledUpdated(bool)), speechRecognizerAction, SLOT(setChecked(bool)));
#endif
#ifdef HAVE_QXMPP
_chatAction = addActionToQMenuAndActionHash(toolsMenu,
MenuOption::Chat,
@ -257,6 +257,45 @@ Menu::Menu() :
this,
SLOT(toggleConsole()));
QMenu* avatarMenu = addMenu("Avatar");
QMenu* avatarSizeMenu = avatarMenu->addMenu("Size");
addActionToQMenuAndActionHash(avatarSizeMenu,
MenuOption::IncreaseAvatarSize,
Qt::Key_Plus,
appInstance->getAvatar(),
SLOT(increaseSize()));
addActionToQMenuAndActionHash(avatarSizeMenu,
MenuOption::DecreaseAvatarSize,
Qt::Key_Minus,
appInstance->getAvatar(),
SLOT(decreaseSize()));
addActionToQMenuAndActionHash(avatarSizeMenu,
MenuOption::ResetAvatarSize,
Qt::Key_Equal,
appInstance->getAvatar(),
SLOT(resetSize()));
QObject* avatar = appInstance->getAvatar();
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::ChatCircling, 0, false);
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::GlowWhenSpeaking, 0, true);
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::BlueSpeechSphere, 0, true);
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::ObeyEnvironmentalGravity, Qt::SHIFT | Qt::Key_G, false,
avatar, SLOT(updateMotionBehaviorsFromMenu()));
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::StandOnNearbyFloors, 0, true,
avatar, SLOT(updateMotionBehaviorsFromMenu()));
QMenu* collisionsMenu = avatarMenu->addMenu("Collide With...");
addCheckableActionToQMenuAndActionHash(collisionsMenu, MenuOption::CollideAsRagdoll);
addCheckableActionToQMenuAndActionHash(collisionsMenu, MenuOption::CollideWithAvatars,
0, true, avatar, SLOT(updateCollisionGroups()));
addCheckableActionToQMenuAndActionHash(collisionsMenu, MenuOption::CollideWithVoxels,
0, false, avatar, SLOT(updateCollisionGroups()));
addCheckableActionToQMenuAndActionHash(collisionsMenu, MenuOption::CollideWithParticles,
0, true, avatar, SLOT(updateCollisionGroups()));
addCheckableActionToQMenuAndActionHash(collisionsMenu, MenuOption::CollideWithEnvironment,
0, false, avatar, SLOT(updateCollisionGroups()));
QMenu* viewMenu = addMenu("View");
#ifdef Q_OS_MAC
@ -304,25 +343,6 @@ Menu::Menu() :
Qt::CTRL | Qt::SHIFT | Qt::Key_3, false,
&nodeBounds, SLOT(setShowParticleNodes(bool)));
QMenu* avatarSizeMenu = viewMenu->addMenu("Avatar Size");
addActionToQMenuAndActionHash(avatarSizeMenu,
MenuOption::IncreaseAvatarSize,
Qt::Key_Plus,
appInstance->getAvatar(),
SLOT(increaseSize()));
addActionToQMenuAndActionHash(avatarSizeMenu,
MenuOption::DecreaseAvatarSize,
Qt::Key_Minus,
appInstance->getAvatar(),
SLOT(decreaseSize()));
addActionToQMenuAndActionHash(avatarSizeMenu,
MenuOption::ResetAvatarSize,
Qt::Key_Equal,
appInstance->getAvatar(),
SLOT(resetSize()));
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::OffAxisProjection, 0, false);
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::TurnWithHead, 0, false);
addCheckableActionToQMenuAndActionHash(viewMenu, MenuOption::MoveWithLean, 0, false);
@ -338,111 +358,97 @@ Menu::Menu() :
QMenu* developerMenu = addMenu("Developer");
QMenu* renderOptionsMenu = developerMenu->addMenu("Rendering Options");
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Stars, Qt::Key_Asterisk, true);
QMenu* renderOptionsMenu = developerMenu->addMenu("Render");
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Atmosphere, Qt::SHIFT | Qt::Key_A, true);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Avatars, 0, true);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Metavoxels, 0, true);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Models, 0, true);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Particles, 0, true);
QMenu* shadowMenu = renderOptionsMenu->addMenu("Shadows");
QActionGroup* shadowGroup = new QActionGroup(shadowMenu);
shadowGroup->addAction(addCheckableActionToQMenuAndActionHash(shadowMenu, "None", 0, true));
shadowGroup->addAction(addCheckableActionToQMenuAndActionHash(shadowMenu, MenuOption::SimpleShadows, 0, false));
shadowGroup->addAction(addCheckableActionToQMenuAndActionHash(shadowMenu, MenuOption::CascadedShadows, 0, false));
shadowGroup->addAction(addCheckableActionToQMenuAndActionHash(shadowMenu, MenuOption::AvatarsReceiveShadows, 0, true));
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Stars, Qt::Key_Asterisk, true);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu,
MenuOption::Voxels,
Qt::SHIFT | Qt::Key_V,
true,
appInstance,
SLOT(setRenderVoxels(bool)));
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::EnableGlowEffect, 0, true);
addActionToQMenuAndActionHash(renderOptionsMenu,
MenuOption::GlowMode,
0,
appInstance->getGlowEffect(),
SLOT(cycleRenderMode()));
QMenu* shadowMenu = renderOptionsMenu->addMenu("Shadows");
QActionGroup* shadowGroup = new QActionGroup(shadowMenu);
shadowGroup->addAction(addCheckableActionToQMenuAndActionHash(shadowMenu, "None", 0, true));
shadowGroup->addAction(addCheckableActionToQMenuAndActionHash(shadowMenu, MenuOption::SimpleShadows, 0, false));
shadowGroup->addAction(addCheckableActionToQMenuAndActionHash(shadowMenu, MenuOption::CascadedShadows, 0, false));
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Metavoxels, 0, true);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::BuckyBalls, 0, false);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::StringHair, 0, false);
addCheckableActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::Particles, 0, true);
addActionToQMenuAndActionHash(renderOptionsMenu, MenuOption::LodTools, Qt::SHIFT | Qt::Key_L, this, SLOT(lodTools()));
QMenu* voxelOptionsMenu = developerMenu->addMenu("Voxel Options");
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu,
MenuOption::Voxels,
Qt::SHIFT | Qt::Key_V,
true,
appInstance,
SLOT(setRenderVoxels(bool)));
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::VoxelTextures);
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::AmbientOcclusion);
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::DontFadeOnVoxelServerChanges);
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::DisableAutoAdjustLOD);
QMenu* modelOptionsMenu = developerMenu->addMenu("Model Options");
addCheckableActionToQMenuAndActionHash(modelOptionsMenu, MenuOption::Models, 0, true);
addCheckableActionToQMenuAndActionHash(modelOptionsMenu, MenuOption::DisplayModelBounds, 0, false);
addCheckableActionToQMenuAndActionHash(modelOptionsMenu, MenuOption::DisplayModelElementProxy, 0, false);
addCheckableActionToQMenuAndActionHash(modelOptionsMenu, MenuOption::DisplayModelElementChildProxies, 0, false);
QMenu* avatarOptionsMenu = developerMenu->addMenu("Avatar Options");
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::Avatars, 0, true);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::AvatarsReceiveShadows, 0, true);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::RenderSkeletonCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::RenderHeadCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::RenderBoundingCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::CollideAsRagdoll);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::LookAtVectors, 0, false);
QMenu* avatarDebugMenu = developerMenu->addMenu("Avatar");
#ifdef HAVE_FACESHIFT
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu,
addCheckableActionToQMenuAndActionHash(avatarDebugMenu,
MenuOption::Faceshift,
0,
true,
appInstance->getFaceshift(),
SLOT(setTCPEnabled(bool)));
#endif
#ifdef HAVE_FACEPLUS
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::Faceplus, 0, true,
appInstance->getFaceplus(), SLOT(updateEnabled()));
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::Faceplus, 0, true,
appInstance->getFaceplus(), SLOT(updateEnabled()));
#endif
#ifdef HAVE_VISAGE
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::Visage, 0, false,
appInstance->getVisage(), SLOT(updateEnabled()));
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::Visage, 0, false,
appInstance->getVisage(), SLOT(updateEnabled()));
#endif
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::GlowWhenSpeaking, 0, true);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::ChatCircling, 0, false);
addCheckableActionToQMenuAndActionHash(avatarOptionsMenu, MenuOption::FocusIndicators, 0, false);
QMenu* sixenseOptionsMenu = developerMenu->addMenu("Sixense Options");
addCheckableActionToQMenuAndActionHash(sixenseOptionsMenu, MenuOption::SixenseMouseInput, 0, true);
addCheckableActionToQMenuAndActionHash(sixenseOptionsMenu, MenuOption::SixenseLasers, 0, true);
QMenu* handOptionsMenu = developerMenu->addMenu("Hand Options");
addCheckableActionToQMenuAndActionHash(handOptionsMenu,
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderSkeletonCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderHeadCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderBoundingCollisionShapes);
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderLookAtVectors, 0, false);
addCheckableActionToQMenuAndActionHash(avatarDebugMenu, MenuOption::RenderFocusIndicator, 0, false);
QMenu* modelDebugMenu = developerMenu->addMenu("Models");
addCheckableActionToQMenuAndActionHash(modelDebugMenu, MenuOption::DisplayModelBounds, 0, false);
addCheckableActionToQMenuAndActionHash(modelDebugMenu, MenuOption::DisplayModelElementProxy, 0, false);
addCheckableActionToQMenuAndActionHash(modelDebugMenu, MenuOption::DisplayModelElementChildProxies, 0, false);
QMenu* voxelOptionsMenu = developerMenu->addMenu("Voxels");
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::VoxelTextures);
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::AmbientOcclusion);
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::DontFadeOnVoxelServerChanges);
addCheckableActionToQMenuAndActionHash(voxelOptionsMenu, MenuOption::DisableAutoAdjustLOD);
QMenu* handOptionsMenu = developerMenu->addMenu("Hands");
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::AlignForearmsWithWrists, 0, false);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::AlternateIK, 0, false);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::DisplayHands, 0, true);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::DisplayHandTargets, 0, false);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::ShowIKConstraints, 0, false);
QMenu* sixenseOptionsMenu = handOptionsMenu->addMenu("Sixense");
addCheckableActionToQMenuAndActionHash(sixenseOptionsMenu,
MenuOption::FilterSixense,
0,
true,
appInstance->getSixenseManager(),
SLOT(setFilter(bool)));
addCheckableActionToQMenuAndActionHash(handOptionsMenu,
addCheckableActionToQMenuAndActionHash(sixenseOptionsMenu,
MenuOption::LowVelocityFilter,
0,
true,
appInstance,
SLOT(setLowVelocityFilter(bool)));
addCheckableActionToQMenuAndActionHash(sixenseOptionsMenu, MenuOption::SixenseMouseInput, 0, true);
addCheckableActionToQMenuAndActionHash(sixenseOptionsMenu, MenuOption::SixenseLasers, 0, false);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::DisplayHands, 0, true);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::DisplayHandTargets, 0, false);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::HandsCollideWithSelf, 0, false);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::ShowIKConstraints, 0, false);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::AlignForearmsWithWrists, 0, true);
addCheckableActionToQMenuAndActionHash(handOptionsMenu, MenuOption::AlternateIK, 0, false);
addCheckableActionToQMenuAndActionHash(developerMenu, MenuOption::DisableNackPackets, 0, false);
addCheckableActionToQMenuAndActionHash(developerMenu,
QMenu* networkMenu = developerMenu->addMenu("Network");
addCheckableActionToQMenuAndActionHash(networkMenu, MenuOption::DisableNackPackets, 0, false);
addCheckableActionToQMenuAndActionHash(networkMenu,
MenuOption::DisableActivityLogger,
0,
false,
@ -451,9 +457,7 @@ Menu::Menu() :
addActionToQMenuAndActionHash(developerMenu, MenuOption::WalletPrivateKey, 0, this, SLOT(changePrivateKey()));
addDisabledActionAndSeparator(developerMenu, "Testing");
QMenu* timingMenu = developerMenu->addMenu("Timing and Statistics Tools");
QMenu* timingMenu = developerMenu->addMenu("Timing and Stats");
QMenu* perfTimerMenu = timingMenu->addMenu("Performance Timer");
addCheckableActionToQMenuAndActionHash(perfTimerMenu, MenuOption::DisplayTimingDetails, 0, true);
addCheckableActionToQMenuAndActionHash(perfTimerMenu, MenuOption::ExpandUpdateTiming, 0, false);
@ -465,8 +469,10 @@ Menu::Menu() :
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::TestPing, 0, true);
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::FrameTimer);
addActionToQMenuAndActionHash(timingMenu, MenuOption::RunTimingTests, 0, this, SLOT(runTests()));
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::PipelineWarnings);
addCheckableActionToQMenuAndActionHash(timingMenu, MenuOption::SuppressShortTimings);
QMenu* frustumMenu = developerMenu->addMenu("View Frustum Debugging Tools");
QMenu* frustumMenu = developerMenu->addMenu("View Frustum");
addCheckableActionToQMenuAndActionHash(frustumMenu, MenuOption::DisplayFrustum, Qt::SHIFT | Qt::Key_F);
addActionToQMenuAndActionHash(frustumMenu,
MenuOption::FrustumRenderMode,
@ -476,11 +482,7 @@ Menu::Menu() :
updateFrustumRenderModeAction();
QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools");
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings);
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings);
QMenu* audioDebugMenu = developerMenu->addMenu("Audio Debugging Tools");
QMenu* audioDebugMenu = developerMenu->addMenu("Audio");
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioNoiseReduction,
0,
true,
@ -493,7 +495,7 @@ Menu::Menu() :
appInstance->getAudio(),
SLOT(toggleAudioFilter()));
QMenu* audioFilterMenu = audioDebugMenu->addMenu("Audio Filter Options");
QMenu* audioFilterMenu = audioDebugMenu->addMenu("Audio Filter");
addDisabledActionAndSeparator(audioFilterMenu, "Filter Response");
{
QAction *flat = addCheckableActionToQMenuAndActionHash(audioFilterMenu, MenuOption::AudioFilterFlat,
@ -557,7 +559,7 @@ Menu::Menu() :
appInstance->getAudio(),
SLOT(toggleScopePause()));
QMenu* audioScopeMenu = audioDebugMenu->addMenu("Audio Scope Options");
QMenu* audioScopeMenu = audioDebugMenu->addMenu("Audio Scope");
addDisabledActionAndSeparator(audioScopeMenu, "Display Frames");
{
QAction *fiveFrames = addCheckableActionToQMenuAndActionHash(audioScopeMenu, MenuOption::AudioScopeFiveFrames,
@ -648,13 +650,17 @@ Menu::Menu() :
appInstance->getAudio(),
SLOT(toggleStatsShowInjectedStreams()));
connect(appInstance->getAudio(), SIGNAL(muteToggled()), this, SLOT(audioMuteToggled()));
QMenu* experimentalOptionsMenu = developerMenu->addMenu("Experimental");
addCheckableActionToQMenuAndActionHash(experimentalOptionsMenu, MenuOption::BuckyBalls, 0, false);
addCheckableActionToQMenuAndActionHash(experimentalOptionsMenu, MenuOption::StringHair, 0, false);
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
this,
SLOT(pasteToVoxel()));
connect(appInstance->getAudio(), SIGNAL(muteToggled()), this, SLOT(audioMuteToggled()));
#ifndef Q_OS_MAC
QMenu* helpMenu = addMenu("Help");
QAction* helpAction = helpMenu->addAction(MenuOption::AboutApp);
@ -674,8 +680,15 @@ void Menu::loadSettings(QSettings* settings) {
lockedSettings = true;
}
_audioJitterBufferFrames = loadSetting(settings, "audioJitterBufferFrames", 0);
_maxFramesOverDesired = loadSetting(settings, "maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED);
_receivedAudioStreamSettings._dynamicJitterBuffers = settings->value("dynamicJitterBuffers", DEFAULT_DYNAMIC_JITTER_BUFFERS).toBool();
_receivedAudioStreamSettings._maxFramesOverDesired = settings->value("maxFramesOverDesired", DEFAULT_MAX_FRAMES_OVER_DESIRED).toInt();
_receivedAudioStreamSettings._staticDesiredJitterBufferFrames = settings->value("staticDesiredJitterBufferFrames", DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES).toInt();
_receivedAudioStreamSettings._useStDevForJitterCalc = settings->value("useStDevForJitterCalc", DEFAULT_USE_STDEV_FOR_JITTER_CALC).toBool();
_receivedAudioStreamSettings._windowStarveThreshold = settings->value("windowStarveThreshold", DEFAULT_WINDOW_STARVE_THRESHOLD).toInt();
_receivedAudioStreamSettings._windowSecondsForDesiredCalcOnTooManyStarves = settings->value("windowSecondsForDesiredCalcOnTooManyStarves", DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES).toInt();
_receivedAudioStreamSettings._windowSecondsForDesiredReduction = settings->value("windowSecondsForDesiredReduction", DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION).toInt();
_receivedAudioStreamSettings._repetitionWithFade = settings->value("repetitionWithFade", DEFAULT_REPETITION_WITH_FADE).toBool();
_fieldOfView = loadSetting(settings, "fieldOfView", DEFAULT_FIELD_OF_VIEW_DEGREES);
_realWorldFieldOfView = loadSetting(settings, "realWorldFieldOfView", DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES);
_faceshiftEyeDeflection = loadSetting(settings, "faceshiftEyeDeflection", DEFAULT_FACESHIFT_EYE_DEFLECTION);
@ -692,6 +705,10 @@ void Menu::loadSettings(QSettings* settings) {
QStandardPaths::writableLocation(QStandardPaths::DesktopLocation)).toString();
setScriptsLocation(settings->value("scriptsLocation", QString()).toString());
#ifdef Q_OS_MAC
_speechRecognizer.setEnabled(settings->value("speechRecognitionEnabled", false).toBool());
#endif
settings->beginGroup("View Frustum Offset Camera");
// in case settings is corrupt or missing loadSetting() will check for NaN
_viewFrustumOffset.yaw = loadSetting(settings, "viewFrustumOffsetYaw", 0.0f);
@ -725,8 +742,15 @@ void Menu::saveSettings(QSettings* settings) {
lockedSettings = true;
}
settings->setValue("audioJitterBufferFrames", _audioJitterBufferFrames);
settings->setValue("maxFramesOverDesired", _maxFramesOverDesired);
settings->setValue("dynamicJitterBuffers", _receivedAudioStreamSettings._dynamicJitterBuffers);
settings->setValue("maxFramesOverDesired", _receivedAudioStreamSettings._maxFramesOverDesired);
settings->setValue("staticDesiredJitterBufferFrames", _receivedAudioStreamSettings._staticDesiredJitterBufferFrames);
settings->setValue("useStDevForJitterCalc", _receivedAudioStreamSettings._useStDevForJitterCalc);
settings->setValue("windowStarveThreshold", _receivedAudioStreamSettings._windowStarveThreshold);
settings->setValue("windowSecondsForDesiredCalcOnTooManyStarves", _receivedAudioStreamSettings._windowSecondsForDesiredCalcOnTooManyStarves);
settings->setValue("windowSecondsForDesiredReduction", _receivedAudioStreamSettings._windowSecondsForDesiredReduction);
settings->setValue("repetitionWithFade", _receivedAudioStreamSettings._repetitionWithFade);
settings->setValue("fieldOfView", _fieldOfView);
settings->setValue("faceshiftEyeDeflection", _faceshiftEyeDeflection);
settings->setValue("maxVoxels", _maxVoxels);
@ -739,6 +763,9 @@ void Menu::saveSettings(QSettings* settings) {
settings->setValue("boundaryLevelAdjust", _boundaryLevelAdjust);
settings->setValue("snapshotsLocation", _snapshotsLocation);
settings->setValue("scriptsLocation", _scriptsLocation);
#ifdef Q_OS_MAC
settings->setValue("speechRecognitionEnabled", _speechRecognizer.getEnabled());
#endif
settings->beginGroup("View Frustum Offset Camera");
settings->setValue("viewFrustumOffsetYaw", _viewFrustumOffset.yaw);
settings->setValue("viewFrustumOffsetPitch", _viewFrustumOffset.pitch);
@ -1026,12 +1053,24 @@ void sendFakeEnterEvent() {
const float DIALOG_RATIO_OF_WINDOW = 0.30f;
void Menu::clearLoginDialogDisplayedFlag() {
// Needed for domains that don't require login.
_hasLoginDialogDisplayed = false;
}
void Menu::loginForCurrentDomain() {
if (!_loginDialog) {
if (!_loginDialog && !_hasLoginDialogDisplayed) {
_loginDialog = new LoginDialog(Application::getInstance()->getWindow());
_loginDialog->show();
_loginDialog->resizeAndPosition(false);
}
_hasLoginDialogDisplayed = true;
}
void Menu::showLoginForCurrentDomain() {
_hasLoginDialogDisplayed = false;
loginForCurrentDomain();
}
void Menu::editPreferences() {
@ -1378,7 +1417,7 @@ void Menu::toggleLoginMenuItem() {
// change the menu item to login
_loginAction->setText("Login");
connect(_loginAction, &QAction::triggered, this, &Menu::loginForCurrentDomain);
connect(_loginAction, &QAction::triggered, this, &Menu::showLoginForCurrentDomain);
}
}
@ -1457,7 +1496,9 @@ void Menu::toggleConsole() {
void Menu::audioMuteToggled() {
QAction *muteAction = _actionHash.value(MenuOption::MuteAudio);
muteAction->setChecked(Application::getInstance()->getAudio()->getMuted());
if (muteAction) {
muteAction->setChecked(Application::getInstance()->getAudio()->getMuted());
}
}
void Menu::bandwidthDetailsClosed() {
@ -1629,45 +1670,31 @@ void Menu::runTests() {
void Menu::updateFrustumRenderModeAction() {
QAction* frustumRenderModeAction = _actionHash.value(MenuOption::FrustumRenderMode);
switch (_frustumDrawMode) {
default:
case FRUSTUM_DRAW_MODE_ALL:
frustumRenderModeAction->setText("Render Mode - All");
break;
case FRUSTUM_DRAW_MODE_VECTORS:
frustumRenderModeAction->setText("Render Mode - Vectors");
break;
case FRUSTUM_DRAW_MODE_PLANES:
frustumRenderModeAction->setText("Render Mode - Planes");
break;
case FRUSTUM_DRAW_MODE_NEAR_PLANE:
frustumRenderModeAction->setText("Render Mode - Near");
break;
case FRUSTUM_DRAW_MODE_FAR_PLANE:
frustumRenderModeAction->setText("Render Mode - Far");
break;
case FRUSTUM_DRAW_MODE_KEYHOLE:
frustumRenderModeAction->setText("Render Mode - Keyhole");
break;
if (frustumRenderModeAction) {
switch (_frustumDrawMode) {
default:
case FRUSTUM_DRAW_MODE_ALL:
frustumRenderModeAction->setText("Render Mode - All");
break;
case FRUSTUM_DRAW_MODE_VECTORS:
frustumRenderModeAction->setText("Render Mode - Vectors");
break;
case FRUSTUM_DRAW_MODE_PLANES:
frustumRenderModeAction->setText("Render Mode - Planes");
break;
case FRUSTUM_DRAW_MODE_NEAR_PLANE:
frustumRenderModeAction->setText("Render Mode - Near");
break;
case FRUSTUM_DRAW_MODE_FAR_PLANE:
frustumRenderModeAction->setText("Render Mode - Far");
break;
case FRUSTUM_DRAW_MODE_KEYHOLE:
frustumRenderModeAction->setText("Render Mode - Keyhole");
break;
}
}
}
void Menu::addAvatarCollisionSubMenu(QMenu* overMenu) {
// add avatar collisions subMenu to overMenu
QMenu* subMenu = overMenu->addMenu("Collision Options");
Application* appInstance = Application::getInstance();
QObject* avatar = appInstance->getAvatar();
addCheckableActionToQMenuAndActionHash(subMenu, MenuOption::CollideWithEnvironment,
0, false, avatar, SLOT(updateCollisionGroups()));
addCheckableActionToQMenuAndActionHash(subMenu, MenuOption::CollideWithAvatars,
0, true, avatar, SLOT(updateCollisionGroups()));
addCheckableActionToQMenuAndActionHash(subMenu, MenuOption::CollideWithVoxels,
0, false, avatar, SLOT(updateCollisionGroups()));
addCheckableActionToQMenuAndActionHash(subMenu, MenuOption::CollideWithParticles,
0, true, avatar, SLOT(updateCollisionGroups()));
}
QAction* Menu::getActionFromName(const QString& menuName, QMenu* menu) {
QList<QAction*> menuActions;
if (menu) {
@ -1887,10 +1914,9 @@ void Menu::removeMenuItem(const QString& menu, const QString& menuitem) {
};
bool Menu::menuItemExists(const QString& menu, const QString& menuitem) {
QMenu* menuObj = getMenu(menu);
QAction* menuItemAction = _actionHash.value(menuitem);
if (menuObj && menuItemAction) {
return true;
if (menuItemAction) {
return (getMenu(menu) != NULL);
}
return false;
};

View file

@ -23,6 +23,10 @@
#include <MenuItemProperties.h>
#include <OctreeConstants.h>
#ifdef Q_OS_MAC
#include "SpeechRecognizer.h"
#endif
#include "location/LocationManager.h"
#include "ui/PreferencesDialog.h"
#include "ui/ChatWindow.h"
@ -85,10 +89,8 @@ public:
void triggerOption(const QString& menuOption);
QAction* getActionForOption(const QString& menuOption);
float getAudioJitterBufferFrames() const { return _audioJitterBufferFrames; }
void setAudioJitterBufferFrames(float audioJitterBufferSamples) { _audioJitterBufferFrames = audioJitterBufferSamples; }
int getMaxFramesOverDesired() const { return _maxFramesOverDesired; }
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
const InboundAudioStream::Settings& getReceivedAudioStreamSettings() const { return _receivedAudioStreamSettings; }
void setReceivedAudioStreamSettings(const InboundAudioStream::Settings& receivedAudioStreamSettings) { _receivedAudioStreamSettings = receivedAudioStreamSettings; }
float getFieldOfView() const { return _fieldOfView; }
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
float getRealWorldFieldOfView() const { return _realWorldFieldOfView; }
@ -137,6 +139,10 @@ public:
void setBoundaryLevelAdjust(int boundaryLevelAdjust);
int getBoundaryLevelAdjust() const { return _boundaryLevelAdjust; }
#ifdef Q_OS_MAC
SpeechRecognizer* getSpeechRecognizer() { return &_speechRecognizer; }
#endif
// User Tweakable PPS from Voxel Server
int getMaxVoxelPacketsPerSecond() const { return _maxVoxelPacketsPerSecond; }
void setMaxVoxelPacketsPerSecond(int maxVoxelPacketsPerSecond) { _maxVoxelPacketsPerSecond = maxVoxelPacketsPerSecond; }
@ -169,7 +175,9 @@ signals:
public slots:
void clearLoginDialogDisplayedFlag();
void loginForCurrentDomain();
void showLoginForCurrentDomain();
void bandwidthDetails();
void octreeStatsDetails();
void lodTools();
@ -246,8 +254,6 @@ private:
void updateFrustumRenderModeAction();
void addAvatarCollisionSubMenu(QMenu* overMenu);
QAction* getActionFromName(const QString& menuName, QMenu* menu);
QMenu* getSubMenuFromName(const QString& menuName, QMenu* menu);
QMenu* getMenuParent(const QString& menuName, QString& finalMenuPart);
@ -259,8 +265,7 @@ private:
QHash<QString, QAction*> _actionHash;
int _audioJitterBufferFrames; /// number of extra samples to wait before starting audio playback
int _maxFramesOverDesired;
InboundAudioStream::Settings _receivedAudioStreamSettings;
BandwidthDialog* _bandwidthDialog;
float _fieldOfView; /// in Degrees, doesn't apply to HMD like Oculus
float _realWorldFieldOfView; // The actual FOV set by the user's monitor size and view distance
@ -274,6 +279,9 @@ private:
OctreeStatsDialog* _octreeStatsDialog;
LodToolsDialog* _lodToolsDialog;
UserLocationsDialog* _userLocationsDialog;
#ifdef Q_OS_MAC
SpeechRecognizer _speechRecognizer;
#endif
int _maxVoxels;
float _voxelSizeScale;
float _oculusUIAngularSize;
@ -296,6 +304,7 @@ private:
QPointer<AttachmentsDialog> _attachmentsDialog;
QPointer<AnimationsDialog> _animationsDialog;
QPointer<LoginDialog> _loginDialog;
bool _hasLoginDialogDisplayed;
QAction* _chatAction;
QString _snapshotsLocation;
QString _scriptsLocation;
@ -342,25 +351,27 @@ namespace MenuOption {
const QString AvatarsReceiveShadows = "Avatars Receive Shadows";
const QString Bandwidth = "Bandwidth Display";
const QString BandwidthDetails = "Bandwidth Details";
const QString BlueSpeechSphere = "Blue Sphere While Speaking";
const QString BuckyBalls = "Bucky Balls";
const QString CascadedShadows = "Cascaded";
const QString Chat = "Chat...";
const QString ChatCircling = "Chat Circling";
const QString CollideAsRagdoll = "Collide As Ragdoll";
const QString CollideWithAvatars = "Collide With Avatars";
const QString CollideAsRagdoll = "Collide With Self (Ragdoll)";
const QString CollideWithAvatars = "Collide With Other Avatars";
const QString CollideWithEnvironment = "Collide With World Boundaries";
const QString CollideWithParticles = "Collide With Particles";
const QString CollideWithVoxels = "Collide With Voxels";
const QString Collisions = "Collisions";
const QString Console = "Console...";
const QString ControlWithSpeech = "Control With Speech";
const QString DecreaseAvatarSize = "Decrease Avatar Size";
const QString DecreaseVoxelSize = "Decrease Voxel Size";
const QString DisableActivityLogger = "Disable Activity Logger";
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
const QString DisableNackPackets = "Disable NACK Packets";
const QString DisplayFrustum = "Display Frustum";
const QString DisplayHands = "Display Hands";
const QString DisplayHandTargets = "Display Hand Targets";
const QString DisplayHands = "Show Hand Info";
const QString DisplayHandTargets = "Show Hand Targets";
const QString DisplayModelBounds = "Display Model Bounds";
const QString DisplayModelElementChildProxies = "Display Model Element Children";
const QString DisplayModelElementProxy = "Display Model Element Bounds";
@ -380,7 +391,6 @@ namespace MenuOption {
const QString Faceshift = "Faceshift";
const QString FilterSixense = "Smooth Sixense Movement";
const QString FirstPerson = "First Person";
const QString FocusIndicators = "Focus Indicators";
const QString FrameTimer = "Show Timer";
const QString FrustumRenderMode = "Render Mode";
const QString Fullscreen = "Fullscreen";
@ -391,7 +401,6 @@ namespace MenuOption {
const QString GoToDomain = "Go To Domain...";
const QString GoTo = "Go To...";
const QString GoToLocation = "Go To Location...";
const QString HandsCollideWithSelf = "Collide With Self";
const QString HeadMouse = "Head Mouse";
const QString IncreaseAvatarSize = "Increase Avatar Size";
const QString IncreaseVoxelSize = "Increase Voxel Size";
@ -401,7 +410,6 @@ namespace MenuOption {
const QString Login = "Login";
const QString Log = "Log";
const QString Logout = "Logout";
const QString LookAtVectors = "Look-at Vectors";
const QString LowVelocityFilter = "Low Velocity Filter";
const QString MetavoxelEditor = "Metavoxel Editor...";
const QString Metavoxels = "Metavoxels";
@ -421,13 +429,15 @@ namespace MenuOption {
const QString Pair = "Pair";
const QString Particles = "Particles";
const QString PasteToVoxel = "Paste to Voxel...";
const QString PipelineWarnings = "Show Render Pipeline Warnings";
const QString PipelineWarnings = "Log Render Pipeline Warnings";
const QString Preferences = "Preferences...";
const QString Quit = "Quit";
const QString ReloadAllScripts = "Reload All Scripts";
const QString RenderBoundingCollisionShapes = "Bounding Collision Shapes";
const QString RenderHeadCollisionShapes = "Head Collision Shapes";
const QString RenderSkeletonCollisionShapes = "Skeleton Collision Shapes";
const QString RenderBoundingCollisionShapes = "Show Bounding Collision Shapes";
const QString RenderFocusIndicator = "Show Eye Focus";
const QString RenderHeadCollisionShapes = "Show Head Collision Shapes";
const QString RenderLookAtVectors = "Show Look-at Vectors";
const QString RenderSkeletonCollisionShapes = "Show Skeleton Collision Shapes";
const QString ResetAvatarSize = "Reset Avatar Size";
const QString RunningScripts = "Running Scripts";
const QString RunTimingTests = "Run Timing Tests";
@ -459,7 +469,7 @@ namespace MenuOption {
const QString VoxelMode = "Cycle Voxel Mode";
const QString Voxels = "Voxels";
const QString VoxelTextures = "Voxel Textures";
const QString WalletPrivateKey = "Wallet Private Key";
const QString WalletPrivateKey = "Wallet Private Key...";
}
void sendFakeEnterEvent();

View file

@ -118,7 +118,7 @@ void MetavoxelSystem::render() {
viewFrustum->getNearBottomLeft(), viewFrustum->getNearBottomRight());
RenderVisitor renderVisitor(getLOD());
guideToAugmented(renderVisitor);
guideToAugmented(renderVisitor, true);
}
class RayHeightfieldIntersectionVisitor : public RayIntersectionVisitor {
@ -449,22 +449,29 @@ void MetavoxelSystem::renderHeightfieldCursor(const glm::vec3& position, float r
glDepthFunc(GL_LESS);
}
void MetavoxelSystem::deleteTextures(int heightID, int colorID) {
void MetavoxelSystem::deleteTextures(int heightID, int colorID, int textureID) {
glDeleteTextures(1, (GLuint*)&heightID);
glDeleteTextures(1, (GLuint*)&colorID);
glDeleteTextures(1, (GLuint*)&textureID);
}
MetavoxelClient* MetavoxelSystem::createClient(const SharedNodePointer& node) {
return new MetavoxelSystemClient(node, _updater);
}
void MetavoxelSystem::guideToAugmented(MetavoxelVisitor& visitor) {
void MetavoxelSystem::guideToAugmented(MetavoxelVisitor& visitor, bool render) {
foreach (const SharedNodePointer& node, NodeList::getInstance()->getNodeHash()) {
if (node->getType() == NodeType::MetavoxelServer) {
QMutexLocker locker(&node->getMutex());
MetavoxelSystemClient* client = static_cast<MetavoxelSystemClient*>(node->getLinkedData());
if (client) {
client->getAugmentedData().guide(visitor);
MetavoxelData data = client->getAugmentedData();
data.guide(visitor);
if (render) {
// save the rendered augmented data so that its cached texture references, etc., don't
// get collected when we replace it with more recent versions
client->setRenderedAugmentedData(data);
}
}
}
}
@ -601,15 +608,19 @@ const int HeightfieldBuffer::SHARED_EDGE = 1;
const int HeightfieldBuffer::HEIGHT_EXTENSION = 2 * HeightfieldBuffer::HEIGHT_BORDER + HeightfieldBuffer::SHARED_EDGE;
HeightfieldBuffer::HeightfieldBuffer(const glm::vec3& translation, float scale,
const QByteArray& height, const QByteArray& color) :
const QByteArray& height, const QByteArray& color, const QByteArray& texture,
const QVector<SharedObjectPointer>& textures) :
_translation(translation),
_scale(scale),
_heightBounds(translation, translation + glm::vec3(scale, scale, scale)),
_colorBounds(_heightBounds),
_height(height),
_color(color),
_texture(texture),
_textures(textures),
_heightTextureID(0),
_colorTextureID(0),
_textureTextureID(0),
_heightSize(glm::sqrt(height.size())),
_heightIncrement(scale / (_heightSize - HEIGHT_EXTENSION)),
_colorSize(glm::sqrt(color.size() / HeightfieldData::COLOR_BYTES)),
@ -628,10 +639,11 @@ HeightfieldBuffer::~HeightfieldBuffer() {
// the textures have to be deleted on the main thread (for its opengl context)
if (QThread::currentThread() != Application::getInstance()->thread()) {
QMetaObject::invokeMethod(Application::getInstance()->getMetavoxels(), "deleteTextures",
Q_ARG(int, _heightTextureID), Q_ARG(int, _colorTextureID));
Q_ARG(int, _heightTextureID), Q_ARG(int, _colorTextureID), Q_ARG(int, _textureTextureID));
} else {
glDeleteTextures(1, &_heightTextureID);
glDeleteTextures(1, &_colorTextureID);
glDeleteTextures(1, &_textureTextureID);
}
}
@ -667,13 +679,17 @@ public:
glm::vec3 vertex;
};
const int SPLAT_COUNT = 4;
const GLint SPLAT_TEXTURE_UNITS[] = { 3, 4, 5, 6 };
void HeightfieldBuffer::render(bool cursor) {
// initialize textures, etc. on first render
if (_heightTextureID == 0) {
glGenTextures(1, &_heightTextureID);
glBindTexture(GL_TEXTURE_2D, _heightTextureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, _heightSize, _heightSize, 0,
@ -692,6 +708,27 @@ void HeightfieldBuffer::render(bool cursor) {
int colorSize = glm::sqrt(_color.size() / HeightfieldData::COLOR_BYTES);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, colorSize, colorSize, 0, GL_RGB, GL_UNSIGNED_BYTE, _color.constData());
}
if (!_texture.isEmpty()) {
glGenTextures(1, &_textureTextureID);
glBindTexture(GL_TEXTURE_2D, _textureTextureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
int textureSize = glm::sqrt(_texture.size());
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, textureSize, textureSize, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, _texture.constData());
_networkTextures.resize(_textures.size());
for (int i = 0; i < _textures.size(); i++) {
const SharedObjectPointer texture = _textures.at(i);
if (texture) {
_networkTextures[i] = Application::getInstance()->getTextureCache()->getTexture(
static_cast<HeightfieldTexture*>(texture.data())->getURL(), SPLAT_TEXTURE);
}
}
}
}
// create the buffer objects lazily
int innerSize = _heightSize - 2 * HeightfieldBuffer::HEIGHT_BORDER;
@ -759,7 +796,115 @@ void HeightfieldBuffer::render(bool cursor) {
glBindTexture(GL_TEXTURE_2D, _heightTextureID);
if (!cursor) {
if (cursor) {
glDrawRangeElements(GL_TRIANGLES, 0, vertexCount - 1, indexCount, GL_UNSIGNED_INT, 0);
} else if (!_textures.isEmpty()) {
DefaultMetavoxelRendererImplementation::getBaseHeightfieldProgram().bind();
DefaultMetavoxelRendererImplementation::getBaseHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getBaseHeightScaleLocation(), 1.0f / _heightSize);
DefaultMetavoxelRendererImplementation::getBaseHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getBaseColorScaleLocation(), (float)_heightSize / innerSize);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
glDrawRangeElements(GL_TRIANGLES, 0, vertexCount - 1, indexCount, GL_UNSIGNED_INT, 0);
glDepthFunc(GL_LEQUAL);
glDepthMask(false);
glEnable(GL_BLEND);
glDisable(GL_ALPHA_TEST);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(-1.0f, -1.0f);
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().bind();
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getSplatHeightScaleLocation(), 1.0f / _heightSize);
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getSplatTextureScaleLocation(), (float)_heightSize / innerSize);
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getSplatTextureOffsetLocation(),
_translation.x / _scale, _translation.z / _scale);
glBindTexture(GL_TEXTURE_2D, _textureTextureID);
const int TEXTURES_PER_SPLAT = 4;
for (int i = 0; i < _textures.size(); i += TEXTURES_PER_SPLAT) {
QVector4D scalesS, scalesT;
for (int j = 0; j < SPLAT_COUNT; j++) {
glActiveTexture(GL_TEXTURE0 + SPLAT_TEXTURE_UNITS[j]);
int index = i + j;
if (index < _networkTextures.size()) {
const NetworkTexturePointer& texture = _networkTextures.at(index);
if (texture) {
HeightfieldTexture* heightfieldTexture = static_cast<HeightfieldTexture*>(_textures.at(index).data());
scalesS[j] = _scale / heightfieldTexture->getScaleS();
scalesT[j] = _scale / heightfieldTexture->getScaleT();
glBindTexture(GL_TEXTURE_2D, texture->getID());
} else {
glBindTexture(GL_TEXTURE_2D, 0);
}
} else {
glBindTexture(GL_TEXTURE_2D, 0);
}
}
const float QUARTER_STEP = 0.25f * EIGHT_BIT_MAXIMUM_RECIPROCAL;
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getSplatTextureScalesSLocation(), scalesS);
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getSplatTextureScalesTLocation(), scalesT);
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getSplatTextureValueMinimaLocation(),
(i + 1) * EIGHT_BIT_MAXIMUM_RECIPROCAL - QUARTER_STEP, (i + 2) * EIGHT_BIT_MAXIMUM_RECIPROCAL - QUARTER_STEP,
(i + 3) * EIGHT_BIT_MAXIMUM_RECIPROCAL - QUARTER_STEP, (i + 4) * EIGHT_BIT_MAXIMUM_RECIPROCAL - QUARTER_STEP);
DefaultMetavoxelRendererImplementation::getSplatHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getSplatTextureValueMaximaLocation(),
(i + 1) * EIGHT_BIT_MAXIMUM_RECIPROCAL + QUARTER_STEP, (i + 2) * EIGHT_BIT_MAXIMUM_RECIPROCAL + QUARTER_STEP,
(i + 3) * EIGHT_BIT_MAXIMUM_RECIPROCAL + QUARTER_STEP, (i + 4) * EIGHT_BIT_MAXIMUM_RECIPROCAL + QUARTER_STEP);
glDrawRangeElements(GL_TRIANGLES, 0, vertexCount - 1, indexCount, GL_UNSIGNED_INT, 0);
}
glEnable(GL_ALPHA_TEST);
glBlendFunc(GL_DST_COLOR, GL_ZERO);
for (int i = 0; i < SPLAT_COUNT; i++) {
glActiveTexture(GL_TEXTURE0 + SPLAT_TEXTURE_UNITS[i]);
glBindTexture(GL_TEXTURE_2D, 0);
}
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, 0);
if (Menu::getInstance()->isOptionChecked(MenuOption::SimpleShadows)) {
DefaultMetavoxelRendererImplementation::getShadowLightHeightfieldProgram().bind();
DefaultMetavoxelRendererImplementation::getShadowLightHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getShadowLightHeightScaleLocation(), 1.0f / _heightSize);
} else if (Menu::getInstance()->isOptionChecked(MenuOption::CascadedShadows)) {
DefaultMetavoxelRendererImplementation::getCascadedShadowLightHeightfieldProgram().bind();
DefaultMetavoxelRendererImplementation::getCascadedShadowLightHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getCascadedShadowLightHeightScaleLocation(), 1.0f / _heightSize);
} else {
DefaultMetavoxelRendererImplementation::getLightHeightfieldProgram().bind();
DefaultMetavoxelRendererImplementation::getLightHeightfieldProgram().setUniformValue(
DefaultMetavoxelRendererImplementation::getBaseHeightScaleLocation(), 1.0f / _heightSize);
}
glDrawRangeElements(GL_TRIANGLES, 0, vertexCount - 1, indexCount, GL_UNSIGNED_INT, 0);
DefaultMetavoxelRendererImplementation::getHeightfieldProgram().bind();
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_BLEND);
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_CONSTANT_ALPHA, GL_ONE);
glDepthFunc(GL_LESS);
glDepthMask(true);
glActiveTexture(GL_TEXTURE0);
} else {
int heightScaleLocation = DefaultMetavoxelRendererImplementation::getHeightScaleLocation();
int colorScaleLocation = DefaultMetavoxelRendererImplementation::getColorScaleLocation();
ProgramObject* program = &DefaultMetavoxelRendererImplementation::getHeightfieldProgram();
@ -777,11 +922,9 @@ void HeightfieldBuffer::render(bool cursor) {
program->setUniformValue(colorScaleLocation, (float)_heightSize / innerSize);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, _colorTextureID);
}
glDrawRangeElements(GL_TRIANGLES, 0, vertexCount - 1, indexCount, GL_UNSIGNED_INT, 0);
if (!cursor) {
glDrawRangeElements(GL_TRIANGLES, 0, vertexCount - 1, indexCount, GL_UNSIGNED_INT, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE0);
}
@ -898,6 +1041,74 @@ void DefaultMetavoxelRendererImplementation::init() {
_shadowDistancesLocation = _cascadedShadowMapHeightfieldProgram.uniformLocation("shadowDistances");
_cascadedShadowMapHeightfieldProgram.release();
_baseHeightfieldProgram.addShaderFromSourceFile(QGLShader::Vertex, Application::resourcesPath() +
"shaders/metavoxel_heightfield_base.vert");
_baseHeightfieldProgram.addShaderFromSourceFile(QGLShader::Fragment, Application::resourcesPath() +
"shaders/metavoxel_heightfield_base.frag");
_baseHeightfieldProgram.link();
_baseHeightfieldProgram.bind();
_baseHeightfieldProgram.setUniformValue("heightMap", 0);
_baseHeightfieldProgram.setUniformValue("diffuseMap", 1);
_baseHeightScaleLocation = _heightfieldProgram.uniformLocation("heightScale");
_baseColorScaleLocation = _heightfieldProgram.uniformLocation("colorScale");
_baseHeightfieldProgram.release();
_splatHeightfieldProgram.addShaderFromSourceFile(QGLShader::Vertex, Application::resourcesPath() +
"shaders/metavoxel_heightfield_splat.vert");
_splatHeightfieldProgram.addShaderFromSourceFile(QGLShader::Fragment, Application::resourcesPath() +
"shaders/metavoxel_heightfield_splat.frag");
_splatHeightfieldProgram.link();
_splatHeightfieldProgram.bind();
_splatHeightfieldProgram.setUniformValue("heightMap", 0);
_splatHeightfieldProgram.setUniformValue("textureMap", 1);
_splatHeightfieldProgram.setUniformValueArray("diffuseMaps", SPLAT_TEXTURE_UNITS, SPLAT_COUNT);
_splatHeightScaleLocation = _splatHeightfieldProgram.uniformLocation("heightScale");
_splatTextureScaleLocation = _splatHeightfieldProgram.uniformLocation("textureScale");
_splatTextureOffsetLocation = _splatHeightfieldProgram.uniformLocation("splatTextureOffset");
_splatTextureScalesSLocation = _splatHeightfieldProgram.uniformLocation("splatTextureScalesS");
_splatTextureScalesTLocation = _splatHeightfieldProgram.uniformLocation("splatTextureScalesT");
_splatTextureValueMinimaLocation = _splatHeightfieldProgram.uniformLocation("textureValueMinima");
_splatTextureValueMaximaLocation = _splatHeightfieldProgram.uniformLocation("textureValueMaxima");
_splatHeightfieldProgram.release();
_lightHeightfieldProgram.addShaderFromSourceFile(QGLShader::Vertex, Application::resourcesPath() +
"shaders/metavoxel_heightfield_light.vert");
_lightHeightfieldProgram.addShaderFromSourceFile(QGLShader::Fragment, Application::resourcesPath() +
"shaders/metavoxel_heightfield_light.frag");
_lightHeightfieldProgram.link();
_lightHeightfieldProgram.bind();
_lightHeightfieldProgram.setUniformValue("heightMap", 0);
_lightHeightScaleLocation = _lightHeightfieldProgram.uniformLocation("heightScale");
_lightHeightfieldProgram.release();
_shadowLightHeightfieldProgram.addShaderFromSourceFile(QGLShader::Vertex, Application::resourcesPath() +
"shaders/metavoxel_heightfield_light.vert");
_shadowLightHeightfieldProgram.addShaderFromSourceFile(QGLShader::Fragment, Application::resourcesPath() +
"shaders/metavoxel_heightfield_light_shadow_map.frag");
_shadowLightHeightfieldProgram.link();
_shadowLightHeightfieldProgram.bind();
_shadowLightHeightfieldProgram.setUniformValue("heightMap", 0);
_shadowLightHeightfieldProgram.setUniformValue("shadowMap", 2);
_shadowLightHeightScaleLocation = _shadowLightHeightfieldProgram.uniformLocation("heightScale");
_shadowLightHeightfieldProgram.release();
_cascadedShadowLightHeightfieldProgram.addShaderFromSourceFile(QGLShader::Vertex, Application::resourcesPath() +
"shaders/metavoxel_heightfield_light.vert");
_cascadedShadowLightHeightfieldProgram.addShaderFromSourceFile(QGLShader::Fragment, Application::resourcesPath() +
"shaders/metavoxel_heightfield_light_cascaded_shadow_map.frag");
_cascadedShadowLightHeightfieldProgram.link();
_cascadedShadowLightHeightfieldProgram.bind();
_cascadedShadowLightHeightfieldProgram.setUniformValue("heightMap", 0);
_cascadedShadowLightHeightfieldProgram.setUniformValue("shadowMap", 2);
_cascadedShadowLightHeightScaleLocation = _cascadedShadowLightHeightfieldProgram.uniformLocation("heightScale");
_shadowLightDistancesLocation = _cascadedShadowLightHeightfieldProgram.uniformLocation("shadowDistances");
_cascadedShadowLightHeightfieldProgram.release();
_heightfieldCursorProgram.addShaderFromSourceFile(QGLShader::Vertex, Application::resourcesPath() +
"shaders/metavoxel_heightfield_cursor.vert");
_heightfieldCursorProgram.addShaderFromSourceFile(QGLShader::Fragment, Application::resourcesPath() +
@ -1011,7 +1222,7 @@ int HeightfieldFetchVisitor::visit(MetavoxelInfo& info) {
if (!info.isLeaf && info.size > _buffer->getScale()) {
return DEFAULT_ORDER;
}
HeightfieldDataPointer height = info.inputValues.at(0).getInlineValue<HeightfieldDataPointer>();
HeightfieldHeightDataPointer height = info.inputValues.at(0).getInlineValue<HeightfieldHeightDataPointer>();
if (!height) {
return STOP_RECURSION;
}
@ -1065,11 +1276,11 @@ int HeightfieldFetchVisitor::visit(MetavoxelInfo& info) {
int colorSize = _buffer->getColorSize();
if (colorSize == 0) {
return STOP_RECURSION;
continue;
}
HeightfieldDataPointer color = info.inputValues.at(1).getInlineValue<HeightfieldDataPointer>();
HeightfieldColorDataPointer color = info.inputValues.at(1).getInlineValue<HeightfieldColorDataPointer>();
if (!color) {
return STOP_RECURSION;
continue;
}
const Box& colorBounds = _buffer->getColorBounds();
overlap = colorBounds.getIntersection(overlap);
@ -1138,6 +1349,7 @@ private:
HeightfieldRegionVisitor::HeightfieldRegionVisitor(const MetavoxelLOD& lod) :
MetavoxelVisitor(QVector<AttributePointer>() << AttributeRegistry::getInstance()->getHeightfieldAttribute() <<
AttributeRegistry::getInstance()->getHeightfieldColorAttribute() <<
AttributeRegistry::getInstance()->getHeightfieldTextureAttribute() <<
Application::getInstance()->getMetavoxels()->getHeightfieldBufferAttribute(), QVector<AttributePointer>() <<
Application::getInstance()->getMetavoxels()->getHeightfieldBufferAttribute(), lod),
regionBounds(glm::vec3(FLT_MAX, FLT_MAX, FLT_MAX), glm::vec3(-FLT_MAX, -FLT_MAX, -FLT_MAX)),
@ -1149,14 +1361,14 @@ int HeightfieldRegionVisitor::visit(MetavoxelInfo& info) {
return DEFAULT_ORDER;
}
HeightfieldBuffer* buffer = NULL;
HeightfieldDataPointer height = info.inputValues.at(0).getInlineValue<HeightfieldDataPointer>();
HeightfieldHeightDataPointer height = info.inputValues.at(0).getInlineValue<HeightfieldHeightDataPointer>();
if (height) {
const QByteArray& heightContents = height->getContents();
int size = glm::sqrt(heightContents.size());
int extendedSize = size + HeightfieldBuffer::HEIGHT_EXTENSION;
int heightContentsSize = extendedSize * extendedSize;
HeightfieldDataPointer color = info.inputValues.at(1).getInlineValue<HeightfieldDataPointer>();
HeightfieldColorDataPointer color = info.inputValues.at(1).getInlineValue<HeightfieldColorDataPointer>();
int colorContentsSize = 0;
if (color) {
const QByteArray& colorContents = color->getContents();
@ -1165,33 +1377,44 @@ int HeightfieldRegionVisitor::visit(MetavoxelInfo& info) {
colorContentsSize = extendedColorSize * extendedColorSize * HeightfieldData::COLOR_BYTES;
}
HeightfieldTextureDataPointer texture = info.inputValues.at(2).getInlineValue<HeightfieldTextureDataPointer>();
QByteArray textureContents;
QVector<SharedObjectPointer> textures;
if (texture) {
textureContents = texture->getContents();
textures = texture->getTextures();
}
const HeightfieldBuffer* existingBuffer = static_cast<const HeightfieldBuffer*>(
info.inputValues.at(2).getInlineValue<BufferDataPointer>().data());
info.inputValues.at(3).getInlineValue<BufferDataPointer>().data());
Box bounds = info.getBounds();
if (existingBuffer && existingBuffer->getHeight().size() == heightContentsSize &&
existingBuffer->getColor().size() == colorContentsSize) {
// we already have a buffer of the correct resolution
addRegion(bounds, existingBuffer->getHeightBounds());
return STOP_RECURSION;
buffer = new HeightfieldBuffer(info.minimum, info.size, existingBuffer->getHeight(),
existingBuffer->getColor(), textureContents, textures);
} else {
// we must create a new buffer and update its borders
buffer = new HeightfieldBuffer(info.minimum, info.size, QByteArray(heightContentsSize, 0),
QByteArray(colorContentsSize, 0), textureContents, textures);
const Box& heightBounds = buffer->getHeightBounds();
addRegion(bounds, heightBounds);
_intersections.clear();
_intersections.append(Box(heightBounds.minimum,
glm::vec3(bounds.maximum.x, heightBounds.maximum.y, bounds.minimum.z)));
_intersections.append(Box(glm::vec3(bounds.maximum.x, heightBounds.minimum.y, heightBounds.minimum.z),
glm::vec3(heightBounds.maximum.x, heightBounds.maximum.y, bounds.maximum.z)));
_intersections.append(Box(glm::vec3(bounds.minimum.x, heightBounds.minimum.y, bounds.maximum.z),
heightBounds.maximum));
_intersections.append(Box(glm::vec3(heightBounds.minimum.x, heightBounds.minimum.y, bounds.minimum.z),
glm::vec3(bounds.minimum.x, heightBounds.maximum.y, heightBounds.maximum.z)));
_fetchVisitor.init(buffer);
_data->guide(_fetchVisitor);
}
// we must create a new buffer and update its borders
buffer = new HeightfieldBuffer(info.minimum, info.size, QByteArray(heightContentsSize, 0),
QByteArray(colorContentsSize, 0));
const Box& heightBounds = buffer->getHeightBounds();
addRegion(bounds, heightBounds);
_intersections.clear();
_intersections.append(Box(heightBounds.minimum,
glm::vec3(bounds.maximum.x, heightBounds.maximum.y, bounds.minimum.z)));
_intersections.append(Box(glm::vec3(bounds.maximum.x, heightBounds.minimum.y, heightBounds.minimum.z),
glm::vec3(heightBounds.maximum.x, heightBounds.maximum.y, bounds.maximum.z)));
_intersections.append(Box(glm::vec3(bounds.minimum.x, heightBounds.minimum.y, bounds.maximum.z),
heightBounds.maximum));
_intersections.append(Box(glm::vec3(heightBounds.minimum.x, heightBounds.minimum.y, bounds.minimum.z),
glm::vec3(bounds.minimum.x, heightBounds.maximum.y, heightBounds.maximum.z)));
_fetchVisitor.init(buffer);
_data->guide(_fetchVisitor);
}
info.outputValues[0] = AttributeValue(_outputs.at(0), encodeInline(BufferDataPointer(buffer)));
return STOP_RECURSION;
@ -1249,7 +1472,7 @@ int HeightfieldUpdateVisitor::visit(MetavoxelInfo& info) {
return STOP_RECURSION;
}
HeightfieldBuffer* newBuffer = new HeightfieldBuffer(info.minimum, info.size,
buffer->getHeight(), buffer->getColor());
buffer->getHeight(), buffer->getColor(), buffer->getTexture(), buffer->getTextures());
_fetchVisitor.init(newBuffer);
_data->guide(_fetchVisitor);
info.outputValues[0] = AttributeValue(_outputs.at(0), encodeInline(BufferDataPointer(newBuffer)));
@ -1437,6 +1660,9 @@ void DefaultMetavoxelRendererImplementation::render(MetavoxelData& data, Metavox
ProgramObject* program = &_heightfieldProgram;
if (Menu::getInstance()->getShadowsEnabled()) {
if (Menu::getInstance()->isOptionChecked(MenuOption::CascadedShadows)) {
_cascadedShadowLightHeightfieldProgram.bind();
_cascadedShadowLightHeightfieldProgram.setUniform(_shadowLightDistancesLocation,
Application::getInstance()->getShadowDistances());
program = &_cascadedShadowMapHeightfieldProgram;
program->bind();
program->setUniform(_shadowDistancesLocation, Application::getInstance()->getShadowDistances());
@ -1482,6 +1708,24 @@ ProgramObject DefaultMetavoxelRendererImplementation::_cascadedShadowMapHeightfi
int DefaultMetavoxelRendererImplementation::_cascadedShadowMapHeightScaleLocation;
int DefaultMetavoxelRendererImplementation::_cascadedShadowMapColorScaleLocation;
int DefaultMetavoxelRendererImplementation::_shadowDistancesLocation;
ProgramObject DefaultMetavoxelRendererImplementation::_baseHeightfieldProgram;
int DefaultMetavoxelRendererImplementation::_baseHeightScaleLocation;
int DefaultMetavoxelRendererImplementation::_baseColorScaleLocation;
ProgramObject DefaultMetavoxelRendererImplementation::_splatHeightfieldProgram;
int DefaultMetavoxelRendererImplementation::_splatHeightScaleLocation;
int DefaultMetavoxelRendererImplementation::_splatTextureScaleLocation;
int DefaultMetavoxelRendererImplementation::_splatTextureOffsetLocation;
int DefaultMetavoxelRendererImplementation::_splatTextureScalesSLocation;
int DefaultMetavoxelRendererImplementation::_splatTextureScalesTLocation;
int DefaultMetavoxelRendererImplementation::_splatTextureValueMinimaLocation;
int DefaultMetavoxelRendererImplementation::_splatTextureValueMaximaLocation;
ProgramObject DefaultMetavoxelRendererImplementation::_lightHeightfieldProgram;
int DefaultMetavoxelRendererImplementation::_lightHeightScaleLocation;
ProgramObject DefaultMetavoxelRendererImplementation::_shadowLightHeightfieldProgram;
int DefaultMetavoxelRendererImplementation::_shadowLightHeightScaleLocation;
ProgramObject DefaultMetavoxelRendererImplementation::_cascadedShadowLightHeightfieldProgram;
int DefaultMetavoxelRendererImplementation::_cascadedShadowLightHeightScaleLocation;
int DefaultMetavoxelRendererImplementation::_shadowLightDistancesLocation;
ProgramObject DefaultMetavoxelRendererImplementation::_heightfieldCursorProgram;
static void enableClipPlane(GLenum plane, float x, float y, float z, float w) {

View file

@ -49,7 +49,7 @@ public:
Q_INVOKABLE float getHeightfieldHeight(const glm::vec3& location);
Q_INVOKABLE void deleteTextures(int heightID, int colorID);
Q_INVOKABLE void deleteTextures(int heightID, int colorID, int textureID);
protected:
@ -57,7 +57,7 @@ protected:
private:
void guideToAugmented(MetavoxelVisitor& visitor);
void guideToAugmented(MetavoxelVisitor& visitor, bool render = false);
AttributePointer _pointBufferAttribute;
AttributePointer _heightfieldBufferAttribute;
@ -92,6 +92,8 @@ public:
/// Returns a copy of the augmented data. This function is thread-safe.
MetavoxelData getAugmentedData();
void setRenderedAugmentedData(const MetavoxelData& data) { _renderedAugmentedData = data; }
virtual int parseData(const QByteArray& packet);
protected:
@ -102,6 +104,7 @@ protected:
private:
MetavoxelData _augmentedData;
MetavoxelData _renderedAugmentedData;
QReadWriteLock _augmentedDataLock;
};
@ -139,7 +142,9 @@ public:
static const int SHARED_EDGE;
static const int HEIGHT_EXTENSION;
HeightfieldBuffer(const glm::vec3& translation, float scale, const QByteArray& height, const QByteArray& color);
HeightfieldBuffer(const glm::vec3& translation, float scale, const QByteArray& height,
const QByteArray& color, const QByteArray& texture = QByteArray(),
const QVector<SharedObjectPointer>& textures = QVector<SharedObjectPointer>());
~HeightfieldBuffer();
const glm::vec3& getTranslation() const { return _translation; }
@ -154,6 +159,11 @@ public:
QByteArray& getColor() { return _color; }
const QByteArray& getColor() const { return _color; }
QByteArray& getTexture() { return _texture; }
const QByteArray& getTexture() const { return _texture; }
const QVector<SharedObjectPointer>& getTextures() const { return _textures; }
QByteArray getUnextendedHeight() const;
QByteArray getUnextendedColor() const;
@ -173,13 +183,17 @@ private:
Box _colorBounds;
QByteArray _height;
QByteArray _color;
QByteArray _texture;
QVector<SharedObjectPointer> _textures;
GLuint _heightTextureID;
GLuint _colorTextureID;
GLuint _textureTextureID;
QVector<NetworkTexturePointer> _networkTextures;
int _heightSize;
float _heightIncrement;
int _colorSize;
float _colorIncrement;
typedef QPair<QOpenGLBuffer, QOpenGLBuffer> BufferPair;
static QHash<int, BufferPair> _bufferPairs;
};
@ -231,6 +245,28 @@ public:
static int getCascadedShadowMapHeightScaleLocation() { return _cascadedShadowMapHeightScaleLocation; }
static int getCascadedShadowMapColorScaleLocation() { return _cascadedShadowMapColorScaleLocation; }
static ProgramObject& getBaseHeightfieldProgram() { return _baseHeightfieldProgram; }
static int getBaseHeightScaleLocation() { return _baseHeightScaleLocation; }
static int getBaseColorScaleLocation() { return _baseColorScaleLocation; }
static ProgramObject& getSplatHeightfieldProgram() { return _splatHeightfieldProgram; }
static int getSplatHeightScaleLocation() { return _splatHeightScaleLocation; }
static int getSplatTextureScaleLocation() { return _splatTextureScaleLocation; }
static int getSplatTextureOffsetLocation() { return _splatTextureOffsetLocation; }
static int getSplatTextureScalesSLocation() { return _splatTextureScalesSLocation; }
static int getSplatTextureScalesTLocation() { return _splatTextureScalesTLocation; }
static int getSplatTextureValueMinimaLocation() { return _splatTextureValueMinimaLocation; }
static int getSplatTextureValueMaximaLocation() { return _splatTextureValueMaximaLocation; }
static ProgramObject& getLightHeightfieldProgram() { return _lightHeightfieldProgram; }
static int getLightHeightScaleLocation() { return _lightHeightScaleLocation; }
static ProgramObject& getShadowLightHeightfieldProgram() { return _shadowLightHeightfieldProgram; }
static int getShadowLightHeightScaleLocation() { return _shadowLightHeightScaleLocation; }
static ProgramObject& getCascadedShadowLightHeightfieldProgram() { return _cascadedShadowLightHeightfieldProgram; }
static int getCascadedShadowLightHeightScaleLocation() { return _cascadedShadowLightHeightScaleLocation; }
static ProgramObject& getHeightfieldCursorProgram() { return _heightfieldCursorProgram; }
Q_INVOKABLE DefaultMetavoxelRendererImplementation();
@ -257,6 +293,29 @@ private:
static int _cascadedShadowMapColorScaleLocation;
static int _shadowDistancesLocation;
static ProgramObject _baseHeightfieldProgram;
static int _baseHeightScaleLocation;
static int _baseColorScaleLocation;
static ProgramObject _splatHeightfieldProgram;
static int _splatHeightScaleLocation;
static int _splatTextureScaleLocation;
static int _splatTextureOffsetLocation;
static int _splatTextureScalesSLocation;
static int _splatTextureScalesTLocation;
static int _splatTextureValueMinimaLocation;
static int _splatTextureValueMaximaLocation;
static ProgramObject _lightHeightfieldProgram;
static int _lightHeightScaleLocation;
static ProgramObject _shadowLightHeightfieldProgram;
static int _shadowLightHeightScaleLocation;
static ProgramObject _cascadedShadowLightHeightfieldProgram;
static int _cascadedShadowLightHeightScaleLocation;
static int _shadowLightDistancesLocation;
static ProgramObject _heightfieldCursorProgram;
};

View file

@ -0,0 +1,47 @@
//
// SpeechRecognizer.h
// interface/src
//
// Created by Ryan Huffman on 07/31/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_SpeechRecognizer_h
#define hifi_SpeechRecognizer_h
#include <QObject>
#include <QSet>
#include <QString>
class SpeechRecognizer : public QObject {
Q_OBJECT
public:
SpeechRecognizer();
~SpeechRecognizer();
void handleCommandRecognized(const char* command);
bool getEnabled() const { return _enabled; }
public slots:
void setEnabled(bool enabled);
void addCommand(const QString& command);
void removeCommand(const QString& command);
signals:
void commandRecognized(const QString& command);
void enabledUpdated(bool enabled);
protected:
void reloadCommands();
private:
bool _enabled;
QSet<QString> _commands;
void* _speechRecognizerDelegate;
void* _speechRecognizer;
};
#endif // hifi_SpeechRecognizer_h

View file

@ -0,0 +1,109 @@
//
// SpeechRecognizer.mm
// interface/src
//
// Created by Ryan Huffman on 07/31/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <QtGlobal>
#ifdef Q_OS_MAC
#import <Foundation/Foundation.h>
#import <AppKit/NSSpeechRecognizer.h>
#import <AppKit/NSWorkspace.h>
#include <QDebug>
#include "SpeechRecognizer.h"
@interface SpeechRecognizerDelegate : NSObject <NSSpeechRecognizerDelegate> {
SpeechRecognizer* _listener;
}
- (void)setListener:(SpeechRecognizer*)listener;
- (void)speechRecognizer:(NSSpeechRecognizer*)sender didRecognizeCommand:(id)command;
@end
@implementation SpeechRecognizerDelegate
- (void)setListener:(SpeechRecognizer*)listener {
_listener = listener;
}
- (void)speechRecognizer:(NSSpeechRecognizer*)sender didRecognizeCommand:(id)command {
_listener->handleCommandRecognized(((NSString*)command).UTF8String);
}
@end
SpeechRecognizer::SpeechRecognizer() :
QObject(),
_enabled(false),
_commands(),
_speechRecognizerDelegate([[SpeechRecognizerDelegate alloc] init]),
_speechRecognizer(NULL) {
[(id)_speechRecognizerDelegate setListener:this];
}
SpeechRecognizer::~SpeechRecognizer() {
if (_speechRecognizer) {
[(id)_speechRecognizer dealloc];
}
if (_speechRecognizerDelegate) {
[(id)_speechRecognizerDelegate dealloc];
}
}
void SpeechRecognizer::handleCommandRecognized(const char* command) {
emit commandRecognized(QString(command));
}
void SpeechRecognizer::setEnabled(bool enabled) {
if (enabled == _enabled) {
return;
}
_enabled = enabled;
if (_enabled) {
_speechRecognizer = [[NSSpeechRecognizer alloc] init];
reloadCommands();
[(id)_speechRecognizer setDelegate:(id)_speechRecognizerDelegate];
[(id)_speechRecognizer startListening];
} else {
[(id)_speechRecognizer stopListening];
[(id)_speechRecognizer dealloc];
_speechRecognizer = NULL;
}
emit enabledUpdated(_enabled);
}
void SpeechRecognizer::reloadCommands() {
if (_speechRecognizer) {
NSMutableArray* cmds = [NSMutableArray array];
for (QSet<QString>::const_iterator iter = _commands.constBegin(); iter != _commands.constEnd(); iter++) {
[cmds addObject:[NSString stringWithUTF8String:(*iter).toLocal8Bit().data()]];
}
[(id)_speechRecognizer setCommands:cmds];
}
}
void SpeechRecognizer::addCommand(const QString& command) {
_commands.insert(command);
reloadCommands();
}
void SpeechRecognizer::removeCommand(const QString& command) {
_commands.remove(command);
reloadCommands();
}
#endif // Q_OS_MAC

View file

@ -29,6 +29,7 @@
#include "Menu.h"
#include "ModelReferential.h"
#include "Physics.h"
#include "Recorder.h"
#include "world.h"
#include "devices/OculusManager.h"
#include "renderer/TextureCache.h"
@ -301,7 +302,7 @@ void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
return;
}
glm::vec3 toTarget = cameraPosition - Application::getInstance()->getAvatar()->getPosition();
glm::vec3 toTarget = cameraPosition - getPosition();
float distanceToTarget = glm::length(toTarget);
{
@ -348,7 +349,7 @@ void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
}
// If this is the avatar being looked at, render a little ball above their head
if (_isLookAtTarget && Menu::getInstance()->isOptionChecked(MenuOption::FocusIndicators)) {
if (_isLookAtTarget && Menu::getInstance()->isOptionChecked(MenuOption::RenderFocusIndicator)) {
const float LOOK_AT_INDICATOR_RADIUS = 0.03f;
const float LOOK_AT_INDICATOR_OFFSET = 0.22f;
const float LOOK_AT_INDICATOR_COLOR[] = { 0.8f, 0.0f, 0.0f, 0.75f };
@ -367,10 +368,12 @@ void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
// quick check before falling into the code below:
// (a 10 degree breadth of an almost 2 meter avatar kicks in at about 12m)
const float MIN_VOICE_SPHERE_DISTANCE = 12.0f;
if (distanceToTarget > MIN_VOICE_SPHERE_DISTANCE) {
if (Menu::getInstance()->isOptionChecked(MenuOption::BlueSpeechSphere)
&& distanceToTarget > MIN_VOICE_SPHERE_DISTANCE) {
// render voice intensity sphere for avatars that are farther away
const float MAX_SPHERE_ANGLE = 10.0f * RADIANS_PER_DEGREE;
const float MIN_SPHERE_ANGLE = 1.0f * RADIANS_PER_DEGREE;
const float MIN_SPHERE_ANGLE = 0.5f * RADIANS_PER_DEGREE;
const float MIN_SPHERE_SIZE = 0.01f;
const float SPHERE_LOUDNESS_SCALING = 0.0005f;
const float SPHERE_COLOR[] = { 0.5f, 0.8f, 0.8f };
@ -391,7 +394,7 @@ void Avatar::render(const glm::vec3& cameraPosition, RenderMode renderMode) {
}
}
const float DISPLAYNAME_DISTANCE = 10.0f;
const float DISPLAYNAME_DISTANCE = 20.0f;
setShowDisplayName(renderMode == NORMAL_RENDER_MODE && distanceToTarget < DISPLAYNAME_DISTANCE);
if (renderMode != NORMAL_RENDER_MODE || (isMyAvatar() &&
Application::getInstance()->getCamera()->getMode() == CAMERA_MODE_FIRST_PERSON)) {
@ -725,6 +728,17 @@ bool Avatar::findCollisions(const QVector<const Shape*>& shapes, CollisionList&
return collided;
}
QVector<glm::quat> Avatar::getJointRotations() const {
if (QThread::currentThread() != thread()) {
return AvatarData::getJointRotations();
}
QVector<glm::quat> jointRotations(_skeletonModel.getJointStateCount());
for (int i = 0; i < _skeletonModel.getJointStateCount(); ++i) {
_skeletonModel.getJointState(i, jointRotations[i]);
}
return jointRotations;
}
glm::quat Avatar::getJointRotation(int index) const {
if (QThread::currentThread() != thread()) {
return AvatarData::getJointRotation(index);

View file

@ -23,6 +23,7 @@
#include "Hand.h"
#include "Head.h"
#include "InterfaceConfig.h"
#include "Recorder.h"
#include "SkeletonModel.h"
#include "world.h"
@ -90,7 +91,7 @@ public:
const QVector<Model*>& getAttachmentModels() const { return _attachmentModels; }
glm::vec3 getChestPosition() const;
float getScale() const { return _scale; }
const glm::vec3& getVelocity() const { return _velocity; }
Q_INVOKABLE const glm::vec3& getVelocity() const { return _velocity; }
const Head* getHead() const { return static_cast<const Head*>(_headData); }
Head* getHead() { return static_cast<Head*>(_headData); }
Hand* getHand() { return static_cast<Hand*>(_handData); }
@ -121,6 +122,7 @@ public:
virtual bool isMyAvatar() { return false; }
virtual QVector<glm::quat> getJointRotations() const;
virtual glm::quat getJointRotation(int index) const;
virtual int getJointIndex(const QString& name) const;
virtual QStringList getJointNames() const;
@ -150,9 +152,9 @@ public:
Q_INVOKABLE glm::quat getJointCombinedRotation(int index) const;
Q_INVOKABLE glm::quat getJointCombinedRotation(const QString& name) const;
glm::vec3 getAcceleration() const { return _acceleration; }
glm::vec3 getAngularVelocity() const { return _angularVelocity; }
glm::vec3 getAngularAcceleration() const { return _angularAcceleration; }
Q_INVOKABLE glm::vec3 getAcceleration() const { return _acceleration; }
Q_INVOKABLE glm::vec3 getAngularVelocity() const { return _angularVelocity; }
Q_INVOKABLE glm::vec3 getAngularAcceleration() const { return _angularAcceleration; }
/// Scales a world space position vector relative to the avatar position and scale
@ -220,8 +222,6 @@ private:
void renderBillboard();
float getBillboardSize() const;
};
#endif // hifi_Avatar_h

View file

@ -82,7 +82,7 @@ void AvatarManager::updateOtherAvatars(float deltaTime) {
void AvatarManager::renderAvatars(Avatar::RenderMode renderMode, bool selfAvatarOnly) {
PerformanceWarning warn(Menu::getInstance()->isOptionChecked(MenuOption::PipelineWarnings),
"Application::renderAvatars()");
bool renderLookAtVectors = Menu::getInstance()->isOptionChecked(MenuOption::LookAtVectors);
bool renderLookAtVectors = Menu::getInstance()->isOptionChecked(MenuOption::RenderLookAtVectors);
glm::vec3 cameraPosition = Application::getInstance()->getCamera()->getPosition();

View file

@ -54,7 +54,7 @@ void FaceModel::maybeUpdateNeckRotation(const JointState& parentState, const FBX
state.setRotationInConstrainedFrame(glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalRoll(), glm::normalize(inverse * axes[2]))
* glm::angleAxis(RADIANS_PER_DEGREE * _owningHead->getFinalYaw(), glm::normalize(inverse * axes[1]))
* glm::angleAxis(- RADIANS_PER_DEGREE * _owningHead->getFinalPitch(), glm::normalize(inverse * axes[0]))
* joint.rotation);
* joint.rotation, DEFAULT_PRIORITY);
}
void FaceModel::maybeUpdateEyeRotation(const JointState& parentState, const FBXJoint& joint, JointState& state) {
@ -69,7 +69,7 @@ void FaceModel::maybeUpdateEyeRotation(const JointState& parentState, const FBXJ
glm::quat between = rotationBetween(front, lookAt);
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
state.setRotationInConstrainedFrame(glm::angleAxis(glm::clamp(glm::angle(between), -MAX_ANGLE, MAX_ANGLE), glm::axis(between)) *
joint.rotation);
joint.rotation, DEFAULT_PRIORITY);
}
void FaceModel::updateJointState(int index) {

View file

@ -64,13 +64,18 @@ void Head::reset() {
void Head::simulate(float deltaTime, bool isMine, bool billboard) {
// Update audio trailing average for rendering facial animations
if (isMine) {
FaceTracker* faceTracker = Application::getInstance()->getActiveFaceTracker();
if ((_isFaceshiftConnected = faceTracker)) {
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
_isFaceshiftConnected = true;
} else if (Application::getInstance()->getDDE()->isActive()) {
faceTracker = Application::getInstance()->getDDE();
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
// Only use face trackers when not playing back a recording.
if (!myAvatar->isPlaying()) {
FaceTracker* faceTracker = Application::getInstance()->getActiveFaceTracker();
if ((_isFaceshiftConnected = faceTracker)) {
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
_isFaceshiftConnected = true;
} else if (Application::getInstance()->getDDE()->isActive()) {
faceTracker = Application::getInstance()->getDDE();
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
}
}
}
@ -217,6 +222,18 @@ glm::vec3 Head::getScalePivot() const {
return _faceModel.isActive() ? _faceModel.getTranslation() : _position;
}
void Head::setFinalPitch(float finalPitch) {
_deltaPitch = glm::clamp(finalPitch, MIN_HEAD_PITCH, MAX_HEAD_PITCH) - _basePitch;
}
void Head::setFinalYaw(float finalYaw) {
_deltaYaw = glm::clamp(finalYaw, MIN_HEAD_YAW, MAX_HEAD_YAW) - _baseYaw;
}
void Head::setFinalRoll(float finalRoll) {
_deltaRoll = glm::clamp(finalRoll, MIN_HEAD_ROLL, MAX_HEAD_ROLL) - _baseRoll;
}
float Head::getFinalYaw() const {
return glm::clamp(_baseYaw + _deltaYaw, MIN_HEAD_YAW, MAX_HEAD_YAW);
}

View file

@ -48,8 +48,6 @@ public:
void setAverageLoudness(float averageLoudness) { _averageLoudness = averageLoudness; }
void setReturnToCenter (bool returnHeadToCenter) { _returnHeadToCenter = returnHeadToCenter; }
void setRenderLookatVectors(bool onOff) { _renderLookatVectors = onOff; }
void setLeanSideways(float leanSideways) { _leanSideways = leanSideways; }
void setLeanForward(float leanForward) { _leanForward = leanForward; }
/// \return orientationBase+Delta
glm::quat getFinalOrientationInLocalFrame() const;
@ -57,7 +55,6 @@ public:
/// \return orientationBody * (orientationBase+Delta)
glm::quat getFinalOrientationInWorldFrame() const;
/// \return orientationBody * orientationBasePitch
glm::quat getCameraOrientation () const;
@ -71,8 +68,6 @@ public:
glm::vec3 getRightDirection() const { return getOrientation() * IDENTITY_RIGHT; }
glm::vec3 getUpDirection() const { return getOrientation() * IDENTITY_UP; }
glm::vec3 getFrontDirection() const { return getOrientation() * IDENTITY_FRONT; }
float getLeanSideways() const { return _leanSideways; }
float getLeanForward() const { return _leanForward; }
float getFinalLeanSideways() const { return _leanSideways + _deltaLeanSideways; }
float getFinalLeanForward() const { return _leanForward + _deltaLeanForward; }
@ -100,6 +95,9 @@ public:
void setDeltaRoll(float roll) { _deltaRoll = roll; }
float getDeltaRoll() const { return _deltaRoll; }
virtual void setFinalYaw(float finalYaw);
virtual void setFinalPitch(float finalPitch);
virtual void setFinalRoll(float finalRoll);
virtual float getFinalPitch() const;
virtual float getFinalYaw() const;
virtual float getFinalRoll() const;

View file

@ -35,6 +35,7 @@
#include "ModelReferential.h"
#include "MyAvatar.h"
#include "Physics.h"
#include "Recorder.h"
#include "devices/Faceshift.h"
#include "devices/OculusManager.h"
#include "ui/TextRenderer.h"
@ -80,6 +81,7 @@ MyAvatar::MyAvatar() :
_billboardValid(false),
_physicsSimulation()
{
ShapeCollider::initDispatchTable();
for (int i = 0; i < MAX_DRIVE_KEYS; i++) {
_driveKeys[i] = 0.0f;
}
@ -136,6 +138,12 @@ void MyAvatar::update(float deltaTime) {
void MyAvatar::simulate(float deltaTime) {
PerformanceTimer perfTimer("simulate");
// Play back recording
if (_player && _player->isPlaying()) {
_player->play();
}
if (_scale != _targetScale) {
float scale = (1.0f - SMOOTHING_RATIO) * _scale + SMOOTHING_RATIO * _targetScale;
setScale(scale);
@ -148,7 +156,7 @@ void MyAvatar::simulate(float deltaTime) {
updateOrientation(deltaTime);
updatePosition(deltaTime);
}
{
PerformanceTimer perfTimer("hand");
// update avatar skeleton and simulate hand and head
@ -206,12 +214,21 @@ void MyAvatar::simulate(float deltaTime) {
{
PerformanceTimer perfTimer("ragdoll");
if (Menu::getInstance()->isOptionChecked(MenuOption::CollideAsRagdoll)) {
Ragdoll* ragdoll = _skeletonModel.getRagdoll();
if (ragdoll && Menu::getInstance()->isOptionChecked(MenuOption::CollideAsRagdoll)) {
const float minError = 0.00001f;
const float maxIterations = 3;
const quint64 maxUsec = 4000;
_physicsSimulation.setTranslation(_position);
_physicsSimulation.stepForward(deltaTime, minError, maxIterations, maxUsec);
// harvest any displacement of the Ragdoll that is a result of collisions
glm::vec3 ragdollDisplacement = ragdoll->getAndClearAccumulatedMovement();
const float MAX_RAGDOLL_DISPLACEMENT_2 = 1.0f;
float length2 = glm::length2(ragdollDisplacement);
if (length2 > EPSILON && length2 < MAX_RAGDOLL_DISPLACEMENT_2) {
setPosition(getPosition() + ragdollDisplacement);
}
} else {
_skeletonModel.moveShapesTowardJoints(1.0f);
}
@ -243,6 +260,11 @@ void MyAvatar::simulate(float deltaTime) {
}
}
// Record avatars movements.
if (_recorder && _recorder->isRecording()) {
_recorder->record();
}
// consider updating our billboard
maybeUpdateBillboard();
}
@ -250,7 +272,11 @@ void MyAvatar::simulate(float deltaTime) {
// Update avatar head rotation with sensor data
void MyAvatar::updateFromTrackers(float deltaTime) {
glm::vec3 estimatedPosition, estimatedRotation;
if (isPlaying() && !OculusManager::isConnected()) {
return;
}
if (Application::getInstance()->getPrioVR()->hasHeadRotation()) {
estimatedRotation = glm::degrees(safeEulerAngles(Application::getInstance()->getPrioVR()->getHeadRotation()));
estimatedRotation.x *= -1.0f;
@ -293,7 +319,7 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
Head* head = getHead();
if (OculusManager::isConnected()) {
if (OculusManager::isConnected() || isPlaying()) {
head->setDeltaPitch(estimatedRotation.x);
head->setDeltaYaw(estimatedRotation.y);
} else {
@ -310,7 +336,6 @@ void MyAvatar::updateFromTrackers(float deltaTime) {
head->setLeanForward(eulers.x);
return;
}
// Update torso lean distance based on accelerometer data
const float TORSO_LENGTH = 0.5f;
glm::vec3 relativePosition = estimatedPosition - glm::vec3(0.0f, -TORSO_LENGTH, 0.0f);
@ -481,6 +506,89 @@ bool MyAvatar::setJointReferential(const QUuid& id, int jointIndex) {
}
}
bool MyAvatar::isRecording() {
if (!_recorder) {
return false;
}
if (QThread::currentThread() != thread()) {
bool result;
QMetaObject::invokeMethod(this, "isRecording", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(bool, result));
return result;
}
return _recorder && _recorder->isRecording();
}
qint64 MyAvatar::recorderElapsed() {
if (!_recorder) {
return 0;
}
if (QThread::currentThread() != thread()) {
qint64 result;
QMetaObject::invokeMethod(this, "recorderElapsed", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(qint64, result));
return result;
}
return _recorder->elapsed();
}
void MyAvatar::startRecording() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startRecording", Qt::BlockingQueuedConnection);
return;
}
if (!_recorder) {
_recorder = RecorderPointer(new Recorder(this));
}
Application::getInstance()->getAudio()->setRecorder(_recorder);
_recorder->startRecording();
}
void MyAvatar::stopRecording() {
if (!_recorder) {
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "stopRecording", Qt::BlockingQueuedConnection);
return;
}
if (_recorder) {
_recorder->stopRecording();
}
}
void MyAvatar::saveRecording(QString filename) {
if (!_recorder) {
qDebug() << "There is no recording to save";
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "saveRecording", Qt::BlockingQueuedConnection,
Q_ARG(QString, filename));
return;
}
if (_recorder) {
_recorder->saveToFile(filename);
}
}
void MyAvatar::loadLastRecording() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "loadLastRecording", Qt::BlockingQueuedConnection);
return;
}
if (!_recorder) {
qDebug() << "There is no recording to load";
return;
}
if (!_player) {
_player = PlayerPointer(new Player(this));
}
_player->loadRecording(_recorder->getRecording());
}
void MyAvatar::setLocalGravity(glm::vec3 gravity) {
_motionBehaviors |= AVATAR_MOTION_OBEY_LOCAL_GRAVITY;
// Environmental and Local gravities are incompatible. Since Local is being set here
@ -861,19 +969,39 @@ glm::vec3 MyAvatar::getUprightHeadPosition() const {
return _position + getWorldAlignedOrientation() * glm::vec3(0.0f, getPelvisToHeadLength(), 0.0f);
}
const float JOINT_PRIORITY = 2.0f;
const float SCRIPT_PRIORITY = DEFAULT_PRIORITY + 1.0f;
const float RECORDER_PRIORITY = SCRIPT_PRIORITY + 1.0f;
void MyAvatar::setJointRotations(QVector<glm::quat> jointRotations) {
int numStates = glm::min(_skeletonModel.getJointStateCount(), jointRotations.size());
for (int i = 0; i < numStates; ++i) {
// HACK: ATM only Recorder calls setJointRotations() so we hardcode its priority here
_skeletonModel.setJointState(i, true, jointRotations[i], RECORDER_PRIORITY);
}
}
void MyAvatar::setJointData(int index, const glm::quat& rotation) {
Avatar::setJointData(index, rotation);
if (QThread::currentThread() == thread()) {
_skeletonModel.setJointState(index, true, rotation, JOINT_PRIORITY);
// HACK: ATM only JS scripts call setJointData() on MyAvatar so we hardcode the priority
_skeletonModel.setJointState(index, true, rotation, SCRIPT_PRIORITY);
}
}
void MyAvatar::clearJointData(int index) {
Avatar::clearJointData(index);
if (QThread::currentThread() == thread()) {
_skeletonModel.setJointState(index, false, glm::quat(), JOINT_PRIORITY);
// HACK: ATM only JS scripts call clearJointData() on MyAvatar so we hardcode the priority
_skeletonModel.setJointState(index, false, glm::quat(), 0.0f);
}
}
void MyAvatar::clearJointsData() {
clearJointAnimationPriorities();
}
void MyAvatar::clearJointAnimationPriorities() {
int numStates = _skeletonModel.getJointStateCount();
for (int i = 0; i < numStates; ++i) {
_skeletonModel.clearJointAnimationPriority(i);
}
}
@ -1768,12 +1896,8 @@ void MyAvatar::resetSize() {
}
void MyAvatar::goToLocationFromResponse(const QJsonObject& jsonObject) {
if (jsonObject["status"].toString() == "success") {
QJsonObject locationObject = jsonObject["data"].toObject()["address"].toObject();
goToLocationFromAddress(locationObject);
} else {
QMessageBox::warning(Application::getInstance()->getWindow(), "", "That user or location could not be found.");
}
QJsonObject locationObject = jsonObject["data"].toObject()["address"].toObject();
goToLocationFromAddress(locationObject);
}
void MyAvatar::goToLocationFromAddress(const QJsonObject& locationObject) {

View file

@ -106,18 +106,30 @@ public:
virtual int parseDataAtOffset(const QByteArray& packet, int offset);
static void sendKillAvatar();
Q_INVOKABLE glm::vec3 getHeadPosition() const { return getHead()->getPosition(); }
Q_INVOKABLE float getHeadFinalYaw() const { return getHead()->getFinalYaw(); }
Q_INVOKABLE float getHeadFinalRoll() const { return getHead()->getFinalRoll(); }
Q_INVOKABLE float getHeadFinalPitch() const { return getHead()->getFinalPitch(); }
Q_INVOKABLE float getHeadDeltaPitch() const { return getHead()->getDeltaPitch(); }
Q_INVOKABLE glm::vec3 getEyePosition() const { return getHead()->getEyePosition(); }
Q_INVOKABLE glm::vec3 getTargetAvatarPosition() const { return _targetAvatarPosition; }
AvatarData* getLookAtTargetAvatar() const { return _lookAtTargetAvatar.data(); }
void updateLookAtTargetAvatar();
void clearLookAtTargetAvatar();
virtual void setJointRotations(QVector<glm::quat> jointRotations);
virtual void setJointData(int index, const glm::quat& rotation);
virtual void clearJointData(int index);
virtual void clearJointsData();
virtual void setFaceModelURL(const QUrl& faceModelURL);
virtual void setSkeletonModelURL(const QUrl& skeletonModelURL);
virtual void setAttachmentData(const QVector<AttachmentData>& attachmentData);
void clearJointAnimationPriorities();
virtual void attach(const QString& modelURL, const QString& jointName = QString(),
const glm::vec3& translation = glm::vec3(), const glm::quat& rotation = glm::quat(), float scale = 1.0f,
bool allowDuplicates = false, bool useSaved = true);
@ -132,6 +144,10 @@ public:
/// Renders a laser pointer for UI picking
void renderLaserPointers();
glm::vec3 getLaserPointerTipPosition(const PalmData* palm);
const RecorderPointer getRecorder() const { return _recorder; }
const PlayerPointer getPlayer() const { return _player; }
public slots:
void goHome();
void increaseSize();
@ -155,6 +171,13 @@ public slots:
bool setModelReferential(const QUuid& id);
bool setJointReferential(const QUuid& id, int jointIndex);
bool isRecording();
qint64 recorderElapsed();
void startRecording();
void stopRecording();
void saveRecording(QString filename);
void loadLastRecording();
signals:
void transformChanged();
@ -192,6 +215,8 @@ private:
QList<AnimationHandlePointer> _animationHandles;
PhysicsSimulation _physicsSimulation;
RecorderPointer _recorder;
// private methods
float computeDistanceToFloor(const glm::vec3& startPoint);
void updateOrientation(float deltaTime);

View file

@ -14,14 +14,11 @@
#include <VerletCapsuleShape.h>
#include <VerletSphereShape.h>
#include <DistanceConstraint.h>
#include <FixedConstraint.h>
#include "Application.h"
#include "Avatar.h"
#include "Hand.h"
#include "Menu.h"
#include "MuscleConstraint.h"
#include "SkeletonModel.h"
#include "SkeletonRagdoll.h"
@ -54,7 +51,8 @@ void SkeletonModel::setJointStates(QVector<JointState> states) {
}
}
const float PALM_PRIORITY = 3.0f;
const float PALM_PRIORITY = DEFAULT_PRIORITY;
const float LEAN_PRIORITY = DEFAULT_PRIORITY;
void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
setTranslation(_owningAvatar->getPosition());
@ -65,9 +63,15 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
Model::simulate(deltaTime, fullUpdate);
if (!(isActive() && _owningAvatar->isMyAvatar())) {
if (!isActive() || !_owningAvatar->isMyAvatar()) {
return; // only simulate for own avatar
}
MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
if (myAvatar->isPlaying()) {
// Don't take inputs if playing back a recording.
return;
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
PrioVR* prioVR = Application::getInstance()->getPrioVR();
@ -227,7 +231,7 @@ void SkeletonModel::applyPalmData(int jointIndex, PalmData& palm) {
JointState& parentState = _jointStates[parentJointIndex];
parentState.setRotationInBindFrame(palmRotation, PALM_PRIORITY);
// lock hand to forearm by slamming its rotation (in parent-frame) to identity
_jointStates[jointIndex].setRotationInConstrainedFrame(glm::quat());
_jointStates[jointIndex].setRotationInConstrainedFrame(glm::quat(), PALM_PRIORITY);
} else {
inverseKinematics(jointIndex, palmPosition, palmRotation, PALM_PRIORITY);
}
@ -240,7 +244,7 @@ void SkeletonModel::updateJointState(int index) {
const JointState& parentState = _jointStates.at(joint.parentIndex);
const FBXGeometry& geometry = _geometry->getFBXGeometry();
if (index == geometry.leanJointIndex) {
maybeUpdateLeanRotation(parentState, joint, state);
maybeUpdateLeanRotation(parentState, state);
} else if (index == geometry.neckJointIndex) {
maybeUpdateNeckRotation(parentState, joint, state);
@ -257,17 +261,18 @@ void SkeletonModel::updateJointState(int index) {
}
}
void SkeletonModel::maybeUpdateLeanRotation(const JointState& parentState, const FBXJoint& joint, JointState& state) {
void SkeletonModel::maybeUpdateLeanRotation(const JointState& parentState, JointState& state) {
if (!_owningAvatar->isMyAvatar() || Application::getInstance()->getPrioVR()->isActive()) {
return;
}
// get the rotation axes in joint space and use them to adjust the rotation
glm::mat3 axes = glm::mat3_cast(glm::quat());
glm::mat3 inverse = glm::mat3(glm::inverse(parentState.getTransform() * glm::translate(state.getDefaultTranslationInConstrainedFrame()) *
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation)));
state.setRotationInConstrainedFrame(glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanSideways(),
glm::normalize(inverse * axes[2])) * glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanForward(),
glm::normalize(inverse * axes[0])) * joint.rotation);
glm::vec3 xAxis(1.0f, 0.0f, 0.0f);
glm::vec3 zAxis(0.0f, 0.0f, 1.0f);
glm::quat inverse = glm::inverse(parentState.getRotation() * state.getDefaultRotationInParentFrame());
state.setRotationInConstrainedFrame(
glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanSideways(), inverse * zAxis)
* glm::angleAxis(- RADIANS_PER_DEGREE * _owningAvatar->getHead()->getFinalLeanForward(), inverse * xAxis)
* state.getFBXJoint().rotation, LEAN_PRIORITY);
}
void SkeletonModel::maybeUpdateNeckRotation(const JointState& parentState, const FBXJoint& joint, JointState& state) {
@ -573,6 +578,7 @@ SkeletonRagdoll* SkeletonModel::buildRagdoll() {
if (!_ragdoll) {
_ragdoll = new SkeletonRagdoll(this);
if (_enableShapes) {
clearShapes();
buildShapes();
}
}
@ -597,6 +603,7 @@ void SkeletonModel::buildShapes() {
if (!_ragdoll) {
_ragdoll = new SkeletonRagdoll(this);
}
_ragdoll->setRootIndex(geometry.rootJointIndex);
_ragdoll->initPoints();
QVector<VerletPoint>& points = _ragdoll->getPoints();
@ -604,44 +611,47 @@ void SkeletonModel::buildShapes() {
float uniformScale = extractUniformScale(_scale);
const int numStates = _jointStates.size();
float totalMass = 0.0f;
for (int i = 0; i < numStates; i++) {
JointState& state = _jointStates[i];
const FBXJoint& joint = state.getFBXJoint();
float radius = uniformScale * joint.boneRadius;
float halfHeight = 0.5f * uniformScale * joint.distanceToParent;
Shape::Type type = joint.shapeType;
if (i == 0 || (type == Shape::CAPSULE_SHAPE && halfHeight < EPSILON)) {
int parentIndex = joint.parentIndex;
if (parentIndex == -1 || radius < EPSILON) {
type = UNKNOWN_SHAPE;
} else if (type == CAPSULE_SHAPE && halfHeight < EPSILON) {
// this shape is forced to be a sphere
type = Shape::SPHERE_SHAPE;
}
if (radius < EPSILON) {
type = Shape::UNKNOWN_SHAPE;
type = SPHERE_SHAPE;
}
Shape* shape = NULL;
int parentIndex = joint.parentIndex;
if (type == Shape::SPHERE_SHAPE) {
if (type == SPHERE_SHAPE) {
shape = new VerletSphereShape(radius, &(points[i]));
shape->setEntity(this);
points[i].setMass(massScale * glm::max(MIN_JOINT_MASS, DENSITY_OF_WATER * shape->getVolume()));
} else if (type == Shape::CAPSULE_SHAPE) {
float mass = massScale * glm::max(MIN_JOINT_MASS, DENSITY_OF_WATER * shape->getVolume());
points[i].setMass(mass);
totalMass += mass;
} else if (type == CAPSULE_SHAPE) {
assert(parentIndex != -1);
shape = new VerletCapsuleShape(radius, &(points[parentIndex]), &(points[i]));
shape->setEntity(this);
points[i].setMass(massScale * glm::max(MIN_JOINT_MASS, DENSITY_OF_WATER * shape->getVolume()));
float mass = massScale * glm::max(MIN_JOINT_MASS, DENSITY_OF_WATER * shape->getVolume());
points[i].setMass(mass);
totalMass += mass;
}
if (parentIndex != -1) {
if (shape && parentIndex != -1) {
// always disable collisions between joint and its parent
if (shape) {
disableCollisions(i, parentIndex);
}
} else {
// give the base joint a very large mass since it doesn't actually move
// in the local-frame simulation (it defines the origin)
points[i].setMass(VERY_BIG_MASS);
}
disableCollisions(i, parentIndex);
}
_shapes.push_back(shape);
}
// set the mass of the root
if (numStates > 0) {
points[_ragdoll->getRootIndex()].setMass(totalMass);
}
// This method moves the shapes to their default positions in Model frame.
computeBoundingShape(geometry);
@ -721,7 +731,7 @@ void SkeletonModel::computeBoundingShape(const FBXGeometry& geometry) {
shapeExtents.reset();
glm::vec3 localPosition = shape->getTranslation();
int type = shape->getType();
if (type == Shape::CAPSULE_SHAPE) {
if (type == CAPSULE_SHAPE) {
// add the two furthest surface points of the capsule
CapsuleShape* capsule = static_cast<CapsuleShape*>(shape);
glm::vec3 axis;
@ -733,7 +743,7 @@ void SkeletonModel::computeBoundingShape(const FBXGeometry& geometry) {
shapeExtents.addPoint(localPosition + axis);
shapeExtents.addPoint(localPosition - axis);
totalExtents.addExtents(shapeExtents);
} else if (type == Shape::SPHERE_SHAPE) {
} else if (type == SPHERE_SHAPE) {
float radius = shape->getBoundingRadius();
glm::vec3 axis = glm::vec3(radius);
shapeExtents.addPoint(localPosition + axis);
@ -837,13 +847,13 @@ void SkeletonModel::renderJointCollisionShapes(float alpha) {
glPushMatrix();
// shapes are stored in simulation-frame but we want position to be model-relative
if (shape->getType() == Shape::SPHERE_SHAPE) {
if (shape->getType() == SPHERE_SHAPE) {
glm::vec3 position = shape->getTranslation() - simulationTranslation;
glTranslatef(position.x, position.y, position.z);
// draw a grey sphere at shape position
glColor4f(0.75f, 0.75f, 0.75f, alpha);
glutSolidSphere(shape->getBoundingRadius(), BALL_SUBDIVISIONS, BALL_SUBDIVISIONS);
} else if (shape->getType() == Shape::CAPSULE_SHAPE) {
} else if (shape->getType() == CAPSULE_SHAPE) {
CapsuleShape* capsule = static_cast<CapsuleShape*>(shape);
// draw a blue sphere at the capsule endpoint

View file

@ -127,7 +127,7 @@ protected:
/// Updates the state of the joint at the specified index.
virtual void updateJointState(int index);
void maybeUpdateLeanRotation(const JointState& parentState, const FBXJoint& joint, JointState& state);
void maybeUpdateLeanRotation(const JointState& parentState, JointState& state);
void maybeUpdateNeckRotation(const JointState& parentState, const FBXJoint& joint, JointState& state);
void maybeUpdateEyeRotation(const JointState& parentState, const FBXJoint& joint, JointState& state);

View file

@ -36,8 +36,9 @@ void SkeletonRagdoll::stepForward(float deltaTime) {
void SkeletonRagdoll::slamPointPositions() {
QVector<JointState>& jointStates = _model->getJointStates();
int numStates = jointStates.size();
for (int i = 0; i < numStates; ++i) {
const int numPoints = _points.size();
assert(numPoints == jointStates.size());
for (int i = _rootIndex; i < numPoints; ++i) {
_points[i].initPosition(jointStates.at(i).getPosition());
}
}
@ -49,8 +50,7 @@ void SkeletonRagdoll::initPoints() {
initTransform();
// one point for each joint
QVector<JointState>& jointStates = _model->getJointStates();
int numStates = jointStates.size();
int numStates = _model->getJointStates().size();
_points.fill(VerletPoint(), numStates);
slamPointPositions();
}
@ -67,13 +67,10 @@ void SkeletonRagdoll::buildConstraints() {
float minBone = FLT_MAX;
float maxBone = -FLT_MAX;
QMultiMap<int, int> families;
for (int i = 0; i < numPoints; ++i) {
for (int i = _rootIndex; i < numPoints; ++i) {
const JointState& state = jointStates.at(i);
int parentIndex = state.getParentIndex();
if (parentIndex == -1) {
FixedConstraint* anchor = new FixedConstraint(&_translationInSimulationFrame, &(_points[i]));
_fixedConstraints.push_back(anchor);
} else {
if (parentIndex != -1) {
DistanceConstraint* bone = new DistanceConstraint(&(_points[i]), &(_points[parentIndex]));
bone->setDistance(state.getDistanceToParent());
_boneConstraints.push_back(bone);
@ -108,7 +105,7 @@ void SkeletonRagdoll::buildConstraints() {
float MAX_STRENGTH = 0.6f;
float MIN_STRENGTH = 0.05f;
// each joint gets a MuscleConstraint to its parent
for (int i = 1; i < numPoints; ++i) {
for (int i = _rootIndex + 1; i < numPoints; ++i) {
const JointState& state = jointStates.at(i);
int p = state.getParentIndex();
if (p == -1) {

View file

@ -33,7 +33,9 @@ public:
virtual void initPoints();
virtual void buildConstraints();
protected:
void updateMuscles();
private:
Model* _model;
QVector<MuscleConstraint*> _muscleConstraints;

View file

@ -389,7 +389,6 @@ void CaraFaceTracker::decodePacket(const QByteArray& buffer) {
if (theta > EPSILON) {
float rMag = glm::length(glm::vec3(r.x, r.y, r.z));
const float AVERAGE_CARA_FRAME_TIME = 0.04f;
const float ANGULAR_VELOCITY_MIN = 1.2f;
const float YAW_STANDARD_DEV_DEG = 2.5f;
_headAngularVelocity = theta / AVERAGE_CARA_FRAME_TIME * glm::vec3(r.x, r.y, r.z) / rMag;

View file

@ -194,12 +194,12 @@ float updateAndGetCoefficient(float * coefficient, float currentValue, bool scal
coefficient[AVG] = LONG_TERM_AVERAGE * coefficient[AVG] + (1.f - LONG_TERM_AVERAGE) * currentValue;
if (coefficient[MAX] > coefficient[MIN]) {
if (scaleToRange) {
return glm::clamp((currentValue - coefficient[AVG]) / (coefficient[MAX] - coefficient[MIN]), 0.f, 1.f);
return glm::clamp((currentValue - coefficient[AVG]) / (coefficient[MAX] - coefficient[MIN]), 0.0f, 1.0f);
} else {
return glm::clamp(currentValue - coefficient[AVG], 0.f, 1.f);
return glm::clamp(currentValue - coefficient[AVG], 0.0f, 1.0f);
}
} else {
return 0.f;
return 0.0f;
}
}
@ -242,13 +242,11 @@ void DdeFaceTracker::decodePacket(const QByteArray& buffer) {
// Set blendshapes
float EYE_MAGNIFIER = 4.0f;
float rightEye = (updateAndGetCoefficient(_rightEye, packet.expressions[0])) * EYE_MAGNIFIER;
float rightEye = glm::clamp((updateAndGetCoefficient(_rightEye, packet.expressions[0])) * EYE_MAGNIFIER, 0.0f, 1.0f);
_blendshapeCoefficients[_rightBlinkIndex] = rightEye;
float leftEye = (updateAndGetCoefficient(_leftEye, packet.expressions[1])) * EYE_MAGNIFIER;
float leftEye = glm::clamp((updateAndGetCoefficient(_leftEye, packet.expressions[1])) * EYE_MAGNIFIER, 0.0f, 1.0f);
_blendshapeCoefficients[_leftBlinkIndex] = leftEye;
// Right eye = packet.expressions[0];
float leftBrow = 1.0f - rescaleCoef(packet.expressions[14]);
if (leftBrow < 0.5f) {
@ -270,9 +268,9 @@ void DdeFaceTracker::decodePacket(const QByteArray& buffer) {
float JAW_OPEN_MAGNIFIER = 1.4f;
_blendshapeCoefficients[_jawOpenIndex] = rescaleCoef(packet.expressions[21]) * JAW_OPEN_MAGNIFIER;
_blendshapeCoefficients[_mouthSmileLeftIndex] = rescaleCoef(packet.expressions[24]);
_blendshapeCoefficients[_mouthSmileRightIndex] = rescaleCoef(packet.expressions[23]);
float SMILE_MULTIPLIER = 2.0f;
_blendshapeCoefficients[_mouthSmileLeftIndex] = glm::clamp(packet.expressions[24] * SMILE_MULTIPLIER, 0.0f, 1.0f);
_blendshapeCoefficients[_mouthSmileRightIndex] = glm::clamp(packet.expressions[23] * SMILE_MULTIPLIER, 0.0f, 1.0f);
} else {

View file

@ -26,11 +26,17 @@ using namespace fs;
using namespace std;
const quint16 FACESHIFT_PORT = 33433;
float STARTING_FACESHIFT_FRAME_TIME = 0.033f;
Faceshift::Faceshift() :
_tcpEnabled(true),
_tcpRetryCount(0),
_lastTrackingStateReceived(0),
_averageFrameTime(STARTING_FACESHIFT_FRAME_TIME),
_headAngularVelocity(0),
_headLinearVelocity(0),
_lastHeadTranslation(0),
_filteredHeadTranslation(0),
_eyeGazeLeftPitch(0.0f),
_eyeGazeLeftYaw(0.0f),
_eyeGazeRightPitch(0.0f),
@ -209,23 +215,41 @@ void Faceshift::receive(const QByteArray& buffer) {
float theta = 2 * acos(r.w);
if (theta > EPSILON) {
float rMag = glm::length(glm::vec3(r.x, r.y, r.z));
float AVERAGE_FACESHIFT_FRAME_TIME = 0.033f;
_headAngularVelocity = theta / AVERAGE_FACESHIFT_FRAME_TIME * glm::vec3(r.x, r.y, r.z) / rMag;
_headAngularVelocity = theta / _averageFrameTime * glm::vec3(r.x, r.y, r.z) / rMag;
} else {
_headAngularVelocity = glm::vec3(0,0,0);
}
_headRotation = newRotation;
const float ANGULAR_VELOCITY_FILTER_STRENGTH = 0.3f;
_headRotation = safeMix(_headRotation, newRotation, glm::clamp(glm::length(_headAngularVelocity) *
ANGULAR_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f));
const float TRANSLATION_SCALE = 0.02f;
_headTranslation = glm::vec3(data.m_headTranslation.x, data.m_headTranslation.y,
-data.m_headTranslation.z) * TRANSLATION_SCALE;
glm::vec3 newHeadTranslation = glm::vec3(data.m_headTranslation.x, data.m_headTranslation.y,
-data.m_headTranslation.z) * TRANSLATION_SCALE;
_headLinearVelocity = (newHeadTranslation - _lastHeadTranslation) / _averageFrameTime;
const float LINEAR_VELOCITY_FILTER_STRENGTH = 0.3f;
float velocityFilter = glm::clamp(1.0f - glm::length(_headLinearVelocity) *
LINEAR_VELOCITY_FILTER_STRENGTH, 0.0f, 1.0f);
_filteredHeadTranslation = velocityFilter * _filteredHeadTranslation + (1.0f - velocityFilter) * newHeadTranslation;
_lastHeadTranslation = newHeadTranslation;
_headTranslation = _filteredHeadTranslation;
_eyeGazeLeftPitch = -data.m_eyeGazeLeftPitch;
_eyeGazeLeftYaw = data.m_eyeGazeLeftYaw;
_eyeGazeRightPitch = -data.m_eyeGazeRightPitch;
_eyeGazeRightYaw = data.m_eyeGazeRightYaw;
_blendshapeCoefficients = QVector<float>::fromStdVector(data.m_coeffs);
_lastTrackingStateReceived = usecTimestampNow();
const float FRAME_AVERAGING_FACTOR = 0.99f;
quint64 usecsNow = usecTimestampNow();
if (_lastTrackingStateReceived != 0) {
_averageFrameTime = FRAME_AVERAGING_FACTOR * _averageFrameTime +
(1.0f - FRAME_AVERAGING_FACTOR) * (float)(usecsNow - _lastTrackingStateReceived) / 1000000.0f;
}
_lastTrackingStateReceived = usecsNow;
}
break;
}

View file

@ -98,8 +98,12 @@ private:
int _tcpRetryCount;
bool _tracking;
quint64 _lastTrackingStateReceived;
float _averageFrameTime;
glm::vec3 _headAngularVelocity;
glm::vec3 _headLinearVelocity;
glm::vec3 _lastHeadTranslation;
glm::vec3 _filteredHeadTranslation;
// degrees
float _eyeGazeLeftPitch;

View file

@ -85,9 +85,13 @@ void LocationManager::goTo(QString destination) {
if (!goToDestination(destination)) {
destination = QString(QUrl::toPercentEncoding(destination));
UserActivityLogger::getInstance().wentTo(OTHER_DESTINATION_TYPE, destination);
JSONCallbackParameters callbackParams;
callbackParams.jsonCallbackReceiver = this;
callbackParams.jsonCallbackMethod = "goToAddressFromResponse";
callbackParams.errorCallbackReceiver = this;
callbackParams.errorCallbackMethod = "handleAddressLookupError";
AccountManager::getInstance().authenticatedRequest(GET_ADDRESSES.arg(destination),
QNetworkAccessManager::GetOperation,
callbackParams);
@ -96,21 +100,17 @@ void LocationManager::goTo(QString destination) {
void LocationManager::goToAddressFromResponse(const QJsonObject& responseData) {
QJsonValue status = responseData["status"];
qDebug() << responseData;
if (!status.isUndefined() && status.toString() == "success") {
const QJsonObject& data = responseData["data"].toObject();
const QJsonValue& userObject = data["user"];
const QJsonValue& placeObject = data["place"];
if (!placeObject.isUndefined() && !userObject.isUndefined()) {
emit multipleDestinationsFound(userObject.toObject(), placeObject.toObject());
} else if (placeObject.isUndefined()) {
Application::getInstance()->getAvatar()->goToLocationFromAddress(userObject.toObject()["address"].toObject());
} else {
Application::getInstance()->getAvatar()->goToLocationFromAddress(placeObject.toObject()["address"].toObject());
}
const QJsonObject& data = responseData["data"].toObject();
const QJsonValue& userObject = data["user"];
const QJsonValue& placeObject = data["place"];
if (!placeObject.isUndefined() && !userObject.isUndefined()) {
emit multipleDestinationsFound(userObject.toObject(), placeObject.toObject());
} else if (placeObject.isUndefined()) {
Application::getInstance()->getAvatar()->goToLocationFromAddress(userObject.toObject()["address"].toObject());
} else {
QMessageBox::warning(Application::getInstance()->getWindow(), "", "That user or location could not be found.");
Application::getInstance()->getAvatar()->goToLocationFromAddress(placeObject.toObject()["address"].toObject());
}
}
@ -118,6 +118,8 @@ void LocationManager::goToUser(QString userName) {
JSONCallbackParameters callbackParams;
callbackParams.jsonCallbackReceiver = Application::getInstance()->getAvatar();
callbackParams.jsonCallbackMethod = "goToLocationFromResponse";
callbackParams.errorCallbackReceiver = this;
callbackParams.errorCallbackMethod = "handleAddressLookupError";
userName = QString(QUrl::toPercentEncoding(userName));
AccountManager::getInstance().authenticatedRequest(GET_USER_ADDRESS.arg(userName),
@ -129,6 +131,8 @@ void LocationManager::goToPlace(QString placeName) {
JSONCallbackParameters callbackParams;
callbackParams.jsonCallbackReceiver = Application::getInstance()->getAvatar();
callbackParams.jsonCallbackMethod = "goToLocationFromResponse";
callbackParams.errorCallbackReceiver = this;
callbackParams.errorCallbackMethod = "handleAddressLookupError";
placeName = QString(QUrl::toPercentEncoding(placeName));
AccountManager::getInstance().authenticatedRequest(GET_PLACE_ADDRESS.arg(placeName),
@ -212,6 +216,19 @@ bool LocationManager::goToDestination(QString destination) {
return false;
}
void LocationManager::handleAddressLookupError(QNetworkReply::NetworkError networkError,
const QString& errorString) {
QString messageBoxString;
if (networkError == QNetworkReply::ContentNotFoundError) {
messageBoxString = "That address could not be found.";
} else {
messageBoxString = errorString;
}
QMessageBox::warning(Application::getInstance()->getWindow(), "", messageBoxString);
}
void LocationManager::replaceLastOccurrence(const QChar search, const QChar replace, QString& string) {
int lastIndex;
lastIndex = string.lastIndexOf(search);

View file

@ -37,6 +37,9 @@ public:
void goToPlace(QString placeName);
void goToOrientation(QString orientation);
bool goToDestination(QString destination);
public slots:
void handleAddressLookupError(QNetworkReply::NetworkError networkError, const QString& errorString);
private:
void replaceLastOccurrence(const QChar search, const QChar replace, QString& string);

View file

@ -593,17 +593,20 @@ void NetworkGeometry::setGeometry(const FBXGeometry& geometry) {
NetworkMeshPart networkPart;
if (!part.diffuseTexture.filename.isEmpty()) {
networkPart.diffuseTexture = Application::getInstance()->getTextureCache()->getTexture(
_textureBase.resolved(QUrl(part.diffuseTexture.filename)), false, mesh.isEye, part.diffuseTexture.content);
_textureBase.resolved(QUrl(part.diffuseTexture.filename)), DEFAULT_TEXTURE,
mesh.isEye, part.diffuseTexture.content);
networkPart.diffuseTexture->setLoadPriorities(_loadPriorities);
}
if (!part.normalTexture.filename.isEmpty()) {
networkPart.normalTexture = Application::getInstance()->getTextureCache()->getTexture(
_textureBase.resolved(QUrl(part.normalTexture.filename)), true, false, part.normalTexture.content);
_textureBase.resolved(QUrl(part.normalTexture.filename)), NORMAL_TEXTURE,
false, part.normalTexture.content);
networkPart.normalTexture->setLoadPriorities(_loadPriorities);
}
if (!part.specularTexture.filename.isEmpty()) {
networkPart.specularTexture = Application::getInstance()->getTextureCache()->getTexture(
_textureBase.resolved(QUrl(part.specularTexture.filename)), true, false, part.specularTexture.content);
_textureBase.resolved(QUrl(part.specularTexture.filename)), SPECULAR_TEXTURE,
false, part.specularTexture.content);
networkPart.specularTexture->setLoadPriorities(_loadPriorities);
}
networkMesh.parts.append(networkPart);

View file

@ -73,7 +73,7 @@ void JointState::setFBXJoint(const FBXJoint* joint) {
}
}
void JointState::updateConstraint() {
void JointState::buildConstraint() {
if (_constraint) {
delete _constraint;
_constraint = NULL;
@ -145,7 +145,7 @@ glm::quat JointState::getVisibleRotationInParentFrame() const {
void JointState::restoreRotation(float fraction, float priority) {
assert(_fbxJoint != NULL);
if (priority == _animationPriority || _animationPriority == 0.0f) {
setRotationInConstrainedFrame(safeMix(_rotationInConstrainedFrame, _fbxJoint->rotation, fraction));
setRotationInConstrainedFrameInternal(safeMix(_rotationInConstrainedFrame, _fbxJoint->rotation, fraction));
_animationPriority = 0.0f;
}
}
@ -158,7 +158,7 @@ void JointState::setRotationInBindFrame(const glm::quat& rotation, float priorit
if (constrain && _constraint) {
_constraint->softClamp(targetRotation, _rotationInConstrainedFrame, 0.5f);
}
setRotationInConstrainedFrame(targetRotation);
setRotationInConstrainedFrameInternal(targetRotation);
_animationPriority = priority;
}
}
@ -173,10 +173,6 @@ void JointState::clearTransformTranslation() {
_visibleTransform[3][2] = 0.0f;
}
void JointState::setRotation(const glm::quat& rotation, bool constrain, float priority) {
applyRotationDelta(rotation * glm::inverse(getRotation()), true, priority);
}
void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, float priority) {
// NOTE: delta is in model-frame
assert(_fbxJoint != NULL);
@ -193,7 +189,7 @@ void JointState::applyRotationDelta(const glm::quat& delta, bool constrain, floa
_rotation = delta * getRotation();
return;
}
setRotationInConstrainedFrame(targetRotation);
setRotationInConstrainedFrameInternal(targetRotation);
}
/// Applies delta rotation to joint but mixes a little bit of the default pose as well.
@ -212,7 +208,7 @@ void JointState::mixRotationDelta(const glm::quat& delta, float mixFactor, float
if (_constraint) {
_constraint->softClamp(targetRotation, _rotationInConstrainedFrame, 0.5f);
}
setRotationInConstrainedFrame(targetRotation);
setRotationInConstrainedFrameInternal(targetRotation);
}
void JointState::mixVisibleRotationDelta(const glm::quat& delta, float mixFactor) {
@ -236,7 +232,17 @@ glm::quat JointState::computeVisibleParentRotation() const {
return _visibleRotation * glm::inverse(_fbxJoint->preRotation * _visibleRotationInConstrainedFrame * _fbxJoint->postRotation);
}
void JointState::setRotationInConstrainedFrame(const glm::quat& targetRotation) {
void JointState::setRotationInConstrainedFrame(glm::quat targetRotation, float priority, bool constrain) {
if (priority >= _animationPriority || _animationPriority == 0.0f) {
if (constrain && _constraint) {
_constraint->softClamp(targetRotation, _rotationInConstrainedFrame, 0.5f);
}
setRotationInConstrainedFrameInternal(targetRotation);
_animationPriority = priority;
}
}
void JointState::setRotationInConstrainedFrameInternal(const glm::quat& targetRotation) {
glm::quat parentRotation = computeParentRotation();
_rotationInConstrainedFrame = targetRotation;
_transformChanged = true;
@ -258,6 +264,11 @@ const bool JointState::rotationIsDefault(const glm::quat& rotation, float tolera
glm::abs(rotation.w - defaultRotation.w) < tolerance;
}
glm::quat JointState::getDefaultRotationInParentFrame() const {
// NOTE: the result is constant and could be cached in the FBXJoint
return _fbxJoint->preRotation * _fbxJoint->rotation * _fbxJoint->postRotation;
}
const glm::vec3& JointState::getDefaultTranslationInConstrainedFrame() const {
assert(_fbxJoint != NULL);
return _fbxJoint->translation;
@ -267,4 +278,4 @@ void JointState::slaveVisibleTransform() {
_visibleTransform = _transform;
_visibleRotation = getRotation();
_visibleRotationInConstrainedFrame = _rotationInConstrainedFrame;
}
}

View file

@ -19,6 +19,8 @@
#include <GLMHelpers.h>
#include <FBXReader.h>
const float DEFAULT_PRIORITY = 3.0f;
class AngularConstraint;
class JointState {
@ -30,7 +32,7 @@ public:
void setFBXJoint(const FBXJoint* joint);
const FBXJoint& getFBXJoint() const { return *_fbxJoint; }
void updateConstraint();
void buildConstraint();
void copyState(const JointState& state);
void initTransform(const glm::mat4& parentTransform);
@ -60,9 +62,6 @@ public:
int getParentIndex() const { return _fbxJoint->parentIndex; }
/// \param rotation rotation of joint in model-frame
void setRotation(const glm::quat& rotation, bool constrain, float priority);
/// \param delta is in the model-frame
void applyRotationDelta(const glm::quat& delta, bool constrain = true, float priority = 1.0f);
@ -84,13 +83,14 @@ public:
/// NOTE: the JointState's model-frame transform/rotation are NOT updated!
void setRotationInBindFrame(const glm::quat& rotation, float priority, bool constrain = false);
void setRotationInConstrainedFrame(const glm::quat& targetRotation);
void setRotationInConstrainedFrame(glm::quat targetRotation, float priority, bool constrain = false);
void setVisibleRotationInConstrainedFrame(const glm::quat& targetRotation);
const glm::quat& getRotationInConstrainedFrame() const { return _rotationInConstrainedFrame; }
const glm::quat& getVisibleRotationInConstrainedFrame() const { return _visibleRotationInConstrainedFrame; }
const bool rotationIsDefault(const glm::quat& rotation, float tolerance = EPSILON) const;
glm::quat getDefaultRotationInParentFrame() const;
const glm::vec3& getDefaultTranslationInConstrainedFrame() const;
@ -106,6 +106,7 @@ public:
glm::quat computeVisibleParentRotation() const;
private:
void setRotationInConstrainedFrameInternal(const glm::quat& targetRotation);
/// debug helper function
void loadBindRotation();

View file

@ -438,7 +438,7 @@ void Model::reset() {
}
const FBXGeometry& geometry = _geometry->getFBXGeometry();
for (int i = 0; i < _jointStates.size(); i++) {
_jointStates[i].setRotationInConstrainedFrame(geometry.joints.at(i).rotation);
_jointStates[i].setRotationInConstrainedFrame(geometry.joints.at(i).rotation, 0.0f);
}
}
@ -547,7 +547,7 @@ void Model::setJointStates(QVector<JointState> states) {
if (distance > radius) {
radius = distance;
}
_jointStates[i].updateConstraint();
_jointStates[i].buildConstraint();
}
for (int i = 0; i < _jointStates.size(); i++) {
_jointStates[i].slaveVisibleTransform();
@ -692,16 +692,26 @@ bool Model::getVisibleJointState(int index, glm::quat& rotation) const {
return !state.rotationIsDefault(rotation);
}
void Model::clearJointState(int index) {
if (index != -1 && index < _jointStates.size()) {
JointState& state = _jointStates[index];
state.setRotationInConstrainedFrame(glm::quat(), 0.0f);
}
}
void Model::clearJointAnimationPriority(int index) {
if (index != -1 && index < _jointStates.size()) {
_jointStates[index]._animationPriority = 0.0f;
}
}
void Model::setJointState(int index, bool valid, const glm::quat& rotation, float priority) {
if (index != -1 && index < _jointStates.size()) {
JointState& state = _jointStates[index];
if (priority >= state._animationPriority) {
if (valid) {
state.setRotationInConstrainedFrame(rotation);
state._animationPriority = priority;
} else {
state.restoreRotation(1.0f, priority);
}
if (valid) {
state.setRotationInConstrainedFrame(rotation, priority);
} else {
state.restoreRotation(1.0f, priority);
}
}
}
@ -1184,7 +1194,7 @@ void Model::inverseKinematics(int endIndex, glm::vec3 targetPosition, const glm:
}
// Apply the rotation, but use mixRotationDelta() which blends a bit of the default pose
// at in the process. This provides stability to the IK solution for most models.
// in the process. This provides stability to the IK solution for most models.
glm::quat oldNextRotation = nextState.getRotation();
float mixFactor = 0.03f;
nextState.mixRotationDelta(deltaRotation, mixFactor, priority);
@ -1729,10 +1739,7 @@ void AnimationHandle::applyFrame(float frameIndex) {
int mapping = _jointMappings.at(i);
if (mapping != -1) {
JointState& state = _model->_jointStates[mapping];
if (_priority >= state._animationPriority) {
state.setRotationInConstrainedFrame(safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction));
state._animationPriority = _priority;
}
state.setRotationInConstrainedFrame(safeMix(floorFrame.rotations.at(i), ceilFrame.rotations.at(i), frameFraction), _priority);
}
}
}

View file

@ -118,6 +118,12 @@ public:
/// \return whether or not the joint state is "valid" (that is, non-default)
bool getVisibleJointState(int index, glm::quat& rotation) const;
/// Clear the joint states
void clearJointState(int index);
/// Clear the joint animation priority
void clearJointAnimationPriority(int index);
/// Sets the joint state at the specified index.
void setJointState(int index, bool valid, const glm::quat& rotation = glm::quat(), float priority = 1.0f);

View file

@ -145,6 +145,8 @@ GLuint TextureCache::getPermutationNormalTextureID() {
}
const unsigned char OPAQUE_WHITE[] = { 0xFF, 0xFF, 0xFF, 0xFF };
const unsigned char TRANSPARENT_WHITE[] = { 0xFF, 0xFF, 0xFF, 0x0 };
const unsigned char OPAQUE_BLACK[] = { 0x0, 0x0, 0x0, 0xFF };
const unsigned char OPAQUE_BLUE[] = { 0x80, 0x80, 0xFF, 0xFF };
static void loadSingleColorTexture(const unsigned char* color) {
@ -175,19 +177,18 @@ GLuint TextureCache::getBlueTextureID() {
/// Extra data for creating textures.
class TextureExtra {
public:
bool normalMap;
TextureType type;
const QByteArray& content;
};
QSharedPointer<NetworkTexture> TextureCache::getTexture(const QUrl& url, bool normalMap,
bool dilatable, const QByteArray& content) {
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, TextureType type, bool dilatable, const QByteArray& content) {
if (!dilatable) {
TextureExtra extra = { normalMap, content };
TextureExtra extra = { type, content };
return ResourceCache::getResource(url, QUrl(), false, &extra).staticCast<NetworkTexture>();
}
QSharedPointer<NetworkTexture> texture = _dilatableNetworkTextures.value(url);
NetworkTexturePointer texture = _dilatableNetworkTextures.value(url);
if (texture.isNull()) {
texture = QSharedPointer<NetworkTexture>(new DilatableNetworkTexture(url, content), &Resource::allReferencesCleared);
texture = NetworkTexturePointer(new DilatableNetworkTexture(url, content), &Resource::allReferencesCleared);
texture->setSelf(texture);
texture->setCache(this);
_dilatableNetworkTextures.insert(url, texture);
@ -293,7 +294,7 @@ bool TextureCache::eventFilter(QObject* watched, QEvent* event) {
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url,
const QSharedPointer<Resource>& fallback, bool delayLoad, const void* extra) {
const TextureExtra* textureExtra = static_cast<const TextureExtra*>(extra);
return QSharedPointer<Resource>(new NetworkTexture(url, textureExtra->normalMap, textureExtra->content),
return QSharedPointer<Resource>(new NetworkTexture(url, textureExtra->type, textureExtra->content),
&Resource::allReferencesCleared);
}
@ -317,17 +318,34 @@ Texture::~Texture() {
glDeleteTextures(1, &_id);
}
NetworkTexture::NetworkTexture(const QUrl& url, bool normalMap, const QByteArray& content) :
NetworkTexture::NetworkTexture(const QUrl& url, TextureType type, const QByteArray& content) :
Resource(url, !content.isEmpty()),
_type(type),
_translucent(false) {
if (!url.isValid()) {
_loaded = true;
}
// default to white/blue
// default to white/blue/black
glBindTexture(GL_TEXTURE_2D, getID());
loadSingleColorTexture(normalMap ? OPAQUE_BLUE : OPAQUE_WHITE);
switch (type) {
case NORMAL_TEXTURE:
loadSingleColorTexture(OPAQUE_BLUE);
break;
case SPECULAR_TEXTURE:
loadSingleColorTexture(OPAQUE_BLACK);
break;
case SPLAT_TEXTURE:
loadSingleColorTexture(TRANSPARENT_WHITE);
break;
default:
loadSingleColorTexture(OPAQUE_WHITE);
break;
}
glBindTexture(GL_TEXTURE_2D, 0);
// if we have content, load it after we have our self pointer
@ -382,12 +400,28 @@ void ImageReader::run() {
qDebug() << "Image greater than maximum size:" << _url << image.width() << image.height();
image = image.scaled(MAXIMUM_SIZE, MAXIMUM_SIZE, Qt::KeepAspectRatio);
}
int imageArea = image.width() * image.height();
const int EIGHT_BIT_MAXIMUM = 255;
if (!image.hasAlphaChannel()) {
if (image.format() != QImage::Format_RGB888) {
image = image.convertToFormat(QImage::Format_RGB888);
}
QMetaObject::invokeMethod(texture.data(), "setImage", Q_ARG(const QImage&, image), Q_ARG(bool, false));
int redTotal = 0, greenTotal = 0, blueTotal = 0;
for (int y = 0; y < image.height(); y++) {
for (int x = 0; x < image.width(); x++) {
QRgb rgb = image.pixel(x, y);
redTotal += qRed(rgb);
greenTotal += qGreen(rgb);
blueTotal += qBlue(rgb);
}
}
QColor averageColor(EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM);
if (imageArea > 0) {
averageColor.setRgb(redTotal / imageArea, greenTotal / imageArea, blueTotal / imageArea);
}
QMetaObject::invokeMethod(texture.data(), "setImage", Q_ARG(const QImage&, image), Q_ARG(bool, false),
Q_ARG(const QColor&, averageColor));
return;
}
if (image.format() != QImage::Format_ARGB32) {
@ -397,11 +431,15 @@ void ImageReader::run() {
// check for translucency/false transparency
int opaquePixels = 0;
int translucentPixels = 0;
const int EIGHT_BIT_MAXIMUM = 255;
const int RGB_BITS = 24;
int redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0;
for (int y = 0; y < image.height(); y++) {
for (int x = 0; x < image.width(); x++) {
int alpha = image.pixel(x, y) >> RGB_BITS;
QRgb rgb = image.pixel(x, y);
redTotal += qRed(rgb);
greenTotal += qGreen(rgb);
blueTotal += qBlue(rgb);
int alpha = qAlpha(rgb);
alphaTotal += alpha;
if (alpha == EIGHT_BIT_MAXIMUM) {
opaquePixels++;
} else if (alpha != 0) {
@ -409,13 +447,13 @@ void ImageReader::run() {
}
}
}
int imageArea = image.width() * image.height();
if (opaquePixels == imageArea) {
qDebug() << "Image with alpha channel is completely opaque:" << _url;
image = image.convertToFormat(QImage::Format_RGB888);
}
QMetaObject::invokeMethod(texture.data(), "setImage", Q_ARG(const QImage&, image),
Q_ARG(bool, translucentPixels >= imageArea / 2));
Q_ARG(bool, translucentPixels >= imageArea / 2), Q_ARG(const QColor&, QColor(redTotal / imageArea,
greenTotal / imageArea, blueTotal / imageArea, alphaTotal / imageArea)));
}
void NetworkTexture::downloadFinished(QNetworkReply* reply) {
@ -427,8 +465,9 @@ void NetworkTexture::loadContent(const QByteArray& content) {
QThreadPool::globalInstance()->start(new ImageReader(_self, NULL, _url, content));
}
void NetworkTexture::setImage(const QImage& image, bool translucent) {
void NetworkTexture::setImage(const QImage& image, bool translucent, const QColor& averageColor) {
_translucent = translucent;
_averageColor = averageColor;
finishedLoading(true);
imageLoaded(image);
@ -440,7 +479,13 @@ void NetworkTexture::setImage(const QImage& image, bool translucent) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.width(), image.height(), 0,
GL_RGB, GL_UNSIGNED_BYTE, image.constBits());
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
if (_type == SPLAT_TEXTURE) {
// generate mipmaps for splat textures
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
} else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
@ -449,7 +494,7 @@ void NetworkTexture::imageLoaded(const QImage& image) {
}
DilatableNetworkTexture::DilatableNetworkTexture(const QUrl& url, const QByteArray& content) :
NetworkTexture(url, false, content),
NetworkTexture(url, DEFAULT_TEXTURE, content),
_innerRadius(0),
_outerRadius(0)
{

View file

@ -23,6 +23,10 @@ class QOpenGLFramebufferObject;
class NetworkTexture;
typedef QSharedPointer<NetworkTexture> NetworkTexturePointer;
enum TextureType { DEFAULT_TEXTURE, NORMAL_TEXTURE, SPECULAR_TEXTURE, SPLAT_TEXTURE };
/// Stores cached textures, including render-to-texture targets.
class TextureCache : public ResourceCache {
Q_OBJECT
@ -47,7 +51,7 @@ public:
GLuint getBlueTextureID();
/// Loads a texture from the specified URL.
QSharedPointer<NetworkTexture> getTexture(const QUrl& url, bool normalMap = false, bool dilatable = false,
NetworkTexturePointer getTexture(const QUrl& url, TextureType type = DEFAULT_TEXTURE, bool dilatable = false,
const QByteArray& content = QByteArray());
/// Returns a pointer to the primary framebuffer object. This render target includes a depth component, and is
@ -121,24 +125,29 @@ class NetworkTexture : public Resource, public Texture {
public:
NetworkTexture(const QUrl& url, bool normalMap, const QByteArray& content);
NetworkTexture(const QUrl& url, TextureType type, const QByteArray& content);
/// Checks whether it "looks like" this texture is translucent
/// (majority of pixels neither fully opaque or fully transparent).
bool isTranslucent() const { return _translucent; }
/// Returns the lazily-computed average texture color.
const QColor& getAverageColor() const { return _averageColor; }
protected:
virtual void downloadFinished(QNetworkReply* reply);
Q_INVOKABLE void loadContent(const QByteArray& content);
Q_INVOKABLE void setImage(const QImage& image, bool translucent);
Q_INVOKABLE void setImage(const QImage& image, bool translucent, const QColor& averageColor);
virtual void imageLoaded(const QImage& image);
private:
TextureType _type;
bool _translucent;
QColor _averageColor;
};
/// Caches derived, dilated textures.

View file

@ -20,6 +20,10 @@ LocationScriptingInterface* LocationScriptingInterface::getInstance() {
return &sharedInstance;
}
bool LocationScriptingInterface::isConnected() {
return NodeList::getInstance()->getDomainHandler().isConnected();
}
QString LocationScriptingInterface::getHref() {
return getProtocol() + "//" + getHostname() + getPathname();
}

View file

@ -22,6 +22,7 @@
class LocationScriptingInterface : public QObject {
Q_OBJECT
Q_PROPERTY(bool isConnected READ isConnected)
Q_PROPERTY(QString href READ getHref)
Q_PROPERTY(QString protocol READ getProtocol)
Q_PROPERTY(QString hostname READ getHostname)
@ -30,6 +31,7 @@ class LocationScriptingInterface : public QObject {
public:
static LocationScriptingInterface* getInstance();
bool isConnected();
QString getHref();
QString getProtocol() { return CUSTOM_URL_SCHEME; };
QString getPathname();

View file

@ -100,14 +100,52 @@ QScriptValue WindowScriptingInterface::showConfirm(const QString& message) {
return QScriptValue(response == QMessageBox::Yes);
}
void WindowScriptingInterface::chooseDirectory() {
QPushButton* button = reinterpret_cast<QPushButton*>(sender());
QString title = button->property("title").toString();
QString path = button->property("path").toString();
QRegExp displayAs = button->property("displayAs").toRegExp();
QRegExp validateAs = button->property("validateAs").toRegExp();
QString errorMessage = button->property("errorMessage").toString();
QString directory = QFileDialog::getExistingDirectory(button, title, path);
if (directory.isEmpty()) {
return;
}
if (!validateAs.exactMatch(directory)) {
QMessageBox::warning(NULL, "Invalid Directory", errorMessage);
return;
}
button->setProperty("path", directory);
displayAs.indexIn(directory);
QString buttonText = displayAs.cap(1) != "" ? displayAs.cap(1) : ".";
button->setText(buttonText);
}
QString WindowScriptingInterface::jsRegExp2QtRegExp(QString string) {
// Converts string representation of RegExp from JavaScript format to Qt format.
return string.mid(1, string.length() - 2) // No enclosing slashes.
.replace("\\/", "/"); // No escaping of forward slash.
}
/// Display a form layout with an edit box
/// \param const QString& title title to display
/// \param const QScriptValue form to display (array containing labels and values)
/// \return QScriptValue result form (unchanged is dialog canceled)
/// \param const QScriptValue form to display as an array of objects:
/// - label, value
/// - label, directory, title, display regexp, validate regexp, error message
/// - button ("Cancel")
/// \return QScriptValue `true` if 'OK' was clicked, `false` otherwise
QScriptValue WindowScriptingInterface::showForm(const QString& title, QScriptValue form) {
if (form.isArray() && form.property("length").toInt32() > 0) {
QDialog* editDialog = new QDialog(Application::getInstance()->getWindow());
editDialog->setWindowTitle(title);
bool cancelButton = false;
QVBoxLayout* layout = new QVBoxLayout();
editDialog->setLayout(layout);
@ -127,44 +165,104 @@ QScriptValue WindowScriptingInterface::showForm(const QString& title, QScriptVal
area->setWidget(container);
QVector<QLineEdit*> edits;
QVector<QPushButton*> directories;
for (int i = 0; i < form.property("length").toInt32(); ++i) {
QScriptValue item = form.property(i);
edits.push_back(new QLineEdit(item.property("value").toString()));
formLayout->addRow(item.property("label").toString(), edits.back());
if (item.property("button").toString() != "") {
cancelButton = cancelButton || item.property("button").toString().toLower() == "cancel";
} else if (item.property("directory").toString() != "") {
QString path = item.property("directory").toString();
QString title = item.property("title").toString();
if (title == "") {
title = "Choose Directory";
}
QString displayAsString = item.property("displayAs").toString();
QRegExp displayAs = QRegExp(displayAsString != "" ? jsRegExp2QtRegExp(displayAsString) : "^(.*)$");
QString validateAsString = item.property("validateAs").toString();
QRegExp validateAs = QRegExp(validateAsString != "" ? jsRegExp2QtRegExp(validateAsString) : ".*");
QString errorMessage = item.property("errorMessage").toString();
if (errorMessage == "") {
errorMessage = "Invalid directory";
}
QPushButton* directory = new QPushButton(displayAs.cap(1));
directory->setProperty("title", title);
directory->setProperty("path", path);
directory->setProperty("displayAs", displayAs);
directory->setProperty("validateAs", validateAs);
directory->setProperty("errorMessage", errorMessage);
displayAs.indexIn(path);
directory->setText(displayAs.cap(1) != "" ? displayAs.cap(1) : ".");
directory->setMinimumWidth(200);
directories.push_back(directory);
formLayout->addRow(new QLabel(item.property("label").toString()), directory);
connect(directory, SIGNAL(clicked(bool)), SLOT(chooseDirectory()));
} else {
QLineEdit* edit = new QLineEdit(item.property("value").toString());
edit->setMinimumWidth(200);
edits.push_back(edit);
formLayout->addRow(new QLabel(item.property("label").toString()), edit);
}
}
QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Ok);
QDialogButtonBox* buttons = new QDialogButtonBox(
QDialogButtonBox::Ok
| (cancelButton ? QDialogButtonBox::Cancel : QDialogButtonBox::NoButton)
);
connect(buttons, SIGNAL(accepted()), editDialog, SLOT(accept()));
connect(buttons, SIGNAL(rejected()), editDialog, SLOT(reject()));
layout->addWidget(buttons);
if (editDialog->exec() == QDialog::Accepted) {
int result = editDialog->exec();
if (result == QDialog::Accepted) {
int e = -1;
int d = -1;
for (int i = 0; i < form.property("length").toInt32(); ++i) {
QScriptValue item = form.property(i);
QScriptValue value = item.property("value");
bool ok = true;
if (value.isNumber()) {
value = edits.at(i)->text().toDouble(&ok);
} else if (value.isString()) {
value = edits.at(i)->text();
} else if (value.isBool()) {
if (edits.at(i)->text() == "true") {
value = true;
} else if (edits.at(i)->text() == "false") {
value = false;
} else {
ok = false;
}
}
if (ok) {
item.setProperty("value", value);
if (item.property("button").toString() != "") {
// Nothing to do
} else if (item.property("directory").toString() != "") {
d += 1;
value = directories.at(d)->property("path").toString();
item.setProperty("directory", value);
form.setProperty(i, item);
} else {
e += 1;
bool ok = true;
if (value.isNumber()) {
value = edits.at(e)->text().toDouble(&ok);
} else if (value.isString()) {
value = edits.at(e)->text();
} else if (value.isBool()) {
if (edits.at(e)->text() == "true") {
value = true;
} else if (edits.at(e)->text() == "false") {
value = false;
} else {
ok = false;
}
}
if (ok) {
item.setProperty("value", value);
form.setProperty(i, item);
}
}
}
}
delete editDialog;
return (result == QDialog::Accepted);
}
return form;
return false;
}
/// Display a prompt with a text box
@ -197,7 +295,6 @@ QScriptValue WindowScriptingInterface::showBrowse(const QString& title, const QS
// filename if the directory is valid.
QString path = "";
QFileInfo fileInfo = QFileInfo(directory);
qDebug() << "File: " << directory << fileInfo.isFile();
if (fileInfo.isDir()) {
fileInfo.setFile(directory, "__HIFI_INVALID_FILE__");
path = fileInfo.filePath();
@ -205,7 +302,6 @@ QScriptValue WindowScriptingInterface::showBrowse(const QString& title, const QS
QFileDialog fileDialog(Application::getInstance()->getWindow(), title, path, nameFilter);
fileDialog.setAcceptMode(acceptMode);
qDebug() << "Opening!";
QUrl fileUrl(directory);
if (acceptMode == QFileDialog::AcceptSave) {
fileDialog.setFileMode(QFileDialog::Directory);

View file

@ -42,9 +42,12 @@ private slots:
QScriptValue showBrowse(const QString& title, const QString& directory, const QString& nameFilter,
QFileDialog::AcceptMode acceptMode = QFileDialog::AcceptOpen);
QScriptValue showS3Browse(const QString& nameFilter);
void chooseDirectory();
private:
WindowScriptingInterface();
QString jsRegExp2QtRegExp(QString string);
};
#endif // hifi_WindowScriptingInterface_h

View file

@ -120,6 +120,7 @@ MetavoxelEditor::MetavoxelEditor() :
addTool(new EraseHeightfieldTool(this));
addTool(new HeightfieldHeightBrushTool(this));
addTool(new HeightfieldColorBrushTool(this));
addTool(new HeightfieldTextureBrushTool(this));
updateAttributes();
@ -956,14 +957,29 @@ void ImportHeightfieldTool::apply() {
HeightfieldBuffer* buffer = static_cast<HeightfieldBuffer*>(bufferData.data());
MetavoxelData data;
data.setSize(scale);
HeightfieldDataPointer heightPointer(new HeightfieldData(buffer->getUnextendedHeight()));
QByteArray height = buffer->getUnextendedHeight();
HeightfieldHeightDataPointer heightPointer(new HeightfieldHeightData(height));
data.setRoot(AttributeRegistry::getInstance()->getHeightfieldAttribute(), new MetavoxelNode(AttributeValue(
AttributeRegistry::getInstance()->getHeightfieldAttribute(), encodeInline(heightPointer))));
if (!buffer->getColor().isEmpty()) {
HeightfieldDataPointer colorPointer(new HeightfieldData(buffer->getUnextendedColor()));
data.setRoot(AttributeRegistry::getInstance()->getHeightfieldColorAttribute(), new MetavoxelNode(AttributeValue(
AttributeRegistry::getInstance()->getHeightfieldColorAttribute(), encodeInline(colorPointer))));
QByteArray color;
if (buffer->getColor().isEmpty()) {
const int WHITE_VALUE = 0xFF;
color = QByteArray(height.size() * HeightfieldData::COLOR_BYTES, WHITE_VALUE);
} else {
color = buffer->getUnextendedColor();
}
HeightfieldColorDataPointer colorPointer(new HeightfieldColorData(color));
data.setRoot(AttributeRegistry::getInstance()->getHeightfieldColorAttribute(), new MetavoxelNode(AttributeValue(
AttributeRegistry::getInstance()->getHeightfieldColorAttribute(), encodeInline(colorPointer))));
int size = glm::sqrt(height.size()) + HeightfieldBuffer::SHARED_EDGE;
QByteArray texture(size * size, 0);
HeightfieldTextureDataPointer texturePointer(new HeightfieldTextureData(texture));
data.setRoot(AttributeRegistry::getInstance()->getHeightfieldTextureAttribute(), new MetavoxelNode(AttributeValue(
AttributeRegistry::getInstance()->getHeightfieldTextureAttribute(), encodeInline(texturePointer))));
MetavoxelEditMessage message = { QVariant::fromValue(SetDataEdit(
_translation->getValue() + buffer->getTranslation() * scale, data)) };
Application::getInstance()->getMetavoxels()->applyEdit(message, true);
@ -1100,6 +1116,10 @@ HeightfieldBrushTool::HeightfieldBrushTool(MetavoxelEditor* editor, const QStrin
_radius->setValue(1.0);
}
bool HeightfieldBrushTool::appliesTo(const AttributePointer& attribute) const {
return attribute->inherits("HeightfieldAttribute");
}
void HeightfieldBrushTool::render() {
if (Application::getInstance()->isMouseHidden()) {
return;
@ -1153,5 +1173,29 @@ HeightfieldColorBrushTool::HeightfieldColorBrushTool(MetavoxelEditor* editor) :
}
QVariant HeightfieldColorBrushTool::createEdit(bool alternate) {
return QVariant::fromValue(PaintHeightfieldColorEdit(_position, _radius->value(), _color->getColor()));
return QVariant::fromValue(PaintHeightfieldColorEdit(_position, _radius->value(),
alternate ? QColor() : _color->getColor()));
}
HeightfieldTextureBrushTool::HeightfieldTextureBrushTool(MetavoxelEditor* editor) :
HeightfieldBrushTool(editor, "Texture Brush") {
_form->addRow(_textureEditor = new SharedObjectEditor(&HeightfieldTexture::staticMetaObject, false));
connect(_textureEditor, &SharedObjectEditor::objectChanged, this, &HeightfieldTextureBrushTool::updateTexture);
}
QVariant HeightfieldTextureBrushTool::createEdit(bool alternate) {
if (alternate) {
return QVariant::fromValue(PaintHeightfieldTextureEdit(_position, _radius->value(), SharedObjectPointer(), QColor()));
} else {
SharedObjectPointer texture = _textureEditor->getObject();
_textureEditor->detachObject();
return QVariant::fromValue(PaintHeightfieldTextureEdit(_position, _radius->value(), texture,
_texture ? _texture->getAverageColor() : QColor()));
}
}
void HeightfieldTextureBrushTool::updateTexture() {
HeightfieldTexture* texture = static_cast<HeightfieldTexture*>(_textureEditor->getObject().data());
_texture = Application::getInstance()->getTextureCache()->getTexture(texture->getURL());
}

View file

@ -28,6 +28,7 @@ class QScrollArea;
class QSpinBox;
class MetavoxelTool;
class SharedObjectEditor;
class Vec3Editor;
/// Allows editing metavoxels.
@ -311,6 +312,8 @@ public:
HeightfieldBrushTool(MetavoxelEditor* editor, const QString& name);
virtual bool appliesTo(const AttributePointer& attribute) const;
virtual void render();
virtual bool eventFilter(QObject* watched, QEvent* event);
@ -359,4 +362,26 @@ private:
QColorEditor* _color;
};
/// Allows texturing parts of the heightfield.
class HeightfieldTextureBrushTool : public HeightfieldBrushTool {
Q_OBJECT
public:
HeightfieldTextureBrushTool(MetavoxelEditor* editor);
protected:
virtual QVariant createEdit(bool alternate);
private slots:
void updateTexture();
private:
SharedObjectEditor* _textureEditor;
QSharedPointer<NetworkTexture> _texture;
};
#endif // hifi_MetavoxelEditor_h

View file

@ -149,9 +149,16 @@ void PreferencesDialog::loadPreferences() {
ui.faceshiftEyeDeflectionSider->setValue(menuInstance->getFaceshiftEyeDeflection() *
ui.faceshiftEyeDeflectionSider->maximum());
ui.audioJitterSpin->setValue(menuInstance->getAudioJitterBufferFrames());
const InboundAudioStream::Settings& streamSettings = menuInstance->getReceivedAudioStreamSettings();
ui.maxFramesOverDesiredSpin->setValue(menuInstance->getMaxFramesOverDesired());
ui.dynamicJitterBuffersCheckBox->setChecked(streamSettings._dynamicJitterBuffers);
ui.staticDesiredJitterBufferFramesSpin->setValue(streamSettings._staticDesiredJitterBufferFrames);
ui.maxFramesOverDesiredSpin->setValue(streamSettings._maxFramesOverDesired);
ui.useStdevForJitterCalcCheckBox->setChecked(streamSettings._useStDevForJitterCalc);
ui.windowStarveThresholdSpin->setValue(streamSettings._windowStarveThreshold);
ui.windowSecondsForDesiredCalcOnTooManyStarvesSpin->setValue(streamSettings._windowSecondsForDesiredCalcOnTooManyStarves);
ui.windowSecondsForDesiredReductionSpin->setValue(streamSettings._windowSecondsForDesiredReduction);
ui.repetitionWithFadeCheckBox->setChecked(streamSettings._repetitionWithFade);
ui.realWorldFieldOfViewSpin->setValue(menuInstance->getRealWorldFieldOfView());
@ -241,16 +248,18 @@ void PreferencesDialog::savePreferences() {
Menu::getInstance()->setInvertSixenseButtons(ui.invertSixenseButtonsCheckBox->isChecked());
Menu::getInstance()->setAudioJitterBufferFrames(ui.audioJitterSpin->value());
if (Menu::getInstance()->getAudioJitterBufferFrames() != 0) {
Application::getInstance()->getAudio()->setDynamicJitterBuffers(false);
Application::getInstance()->getAudio()->setStaticDesiredJitterBufferFrames(Menu::getInstance()->getAudioJitterBufferFrames());
} else {
Application::getInstance()->getAudio()->setDynamicJitterBuffers(true);
}
InboundAudioStream::Settings streamSettings;
streamSettings._dynamicJitterBuffers = ui.dynamicJitterBuffersCheckBox->isChecked();
streamSettings._staticDesiredJitterBufferFrames = ui.staticDesiredJitterBufferFramesSpin->value();
streamSettings._maxFramesOverDesired = ui.maxFramesOverDesiredSpin->value();
streamSettings._useStDevForJitterCalc = ui.useStdevForJitterCalcCheckBox->isChecked();
streamSettings._windowStarveThreshold = ui.windowStarveThresholdSpin->value();
streamSettings._windowSecondsForDesiredCalcOnTooManyStarves = ui.windowSecondsForDesiredCalcOnTooManyStarvesSpin->value();
streamSettings._windowSecondsForDesiredReduction = ui.windowSecondsForDesiredReductionSpin->value();
streamSettings._repetitionWithFade = ui.repetitionWithFadeCheckBox->isChecked();
Menu::getInstance()->setMaxFramesOverDesired(ui.maxFramesOverDesiredSpin->value());
Application::getInstance()->getAudio()->setMaxFramesOverDesired(Menu::getInstance()->getMaxFramesOverDesired());
Menu::getInstance()->setReceivedAudioStreamSettings(streamSettings);
Application::getInstance()->getAudio()->setReceivedAudioStreamSettings(streamSettings);
Application::getInstance()->resizeGL(Application::getInstance()->getGLWidget()->width(),
Application::getInstance()->getGLWidget()->height());

View file

@ -282,19 +282,10 @@ void Stats::display(
pingVoxel = totalPingVoxel/voxelServerCount;
}
Audio* audio = Application::getInstance()->getAudio();
lines = _expanded ? 4 : 3;
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
horizontalOffset += 5;
char audioJitter[30];
sprintf(audioJitter,
"Buffer msecs %.1f",
audio->getDesiredJitterBufferFrames() * BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC);
drawText(30, glWidget->height() - 22, scale, rotation, font, audioJitter, color);
char audioPing[30];
sprintf(audioPing, "Audio ping: %d", pingAudio);
@ -698,27 +689,6 @@ void Stats::display(
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
}
// draw local light stats
QVector<Model::LocalLight> localLights = Application::getInstance()->getAvatarManager().getLocalLights();
verticalOffset = 400;
horizontalOffset = 20;
char buffer[128];
for (int i = 0; i < localLights.size(); i++) {
glm::vec3 lightDirection = localLights.at(i).direction;
snprintf(buffer, sizeof(buffer), "Light %d direction (%.2f, %.2f, %.2f)", i, lightDirection.x, lightDirection.y, lightDirection.z);
drawText(horizontalOffset, verticalOffset, scale, rotation, font, buffer, color);
verticalOffset += STATS_PELS_PER_LINE;
glm::vec3 lightColor = localLights.at(i).color;
snprintf(buffer, sizeof(buffer), "Light %d color (%.2f, %.2f, %.2f)", i, lightColor.x, lightColor.y, lightColor.z);
drawText(horizontalOffset, verticalOffset, scale, rotation, font, buffer, color);
verticalOffset += STATS_PELS_PER_LINE;
}
}

View file

@ -1464,6 +1464,97 @@ padding: 10px;margin-top:10px</string>
</item>
</layout>
</item>
<!-- dynamic jitter buffers ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_23">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item>
<widget class="QLabel" name="label_20">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Enable Dynamic Jitter Buffers</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>dynamicJitterBuffersCheckBox</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_17">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="dynamicJitterBuffersCheckBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>32</width>
<height>0</height>
</size>
</property>
<property name="baseSize">
<size>
<width>0</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
<property name="iconSize">
<size>
<width>32</width>
<height>32</height>
</size>
</property>
</widget>
</item>
</layout>
</item>
<!-- static desired jitter frames____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_8">
<property name="spacing">
@ -1489,13 +1580,13 @@ padding: 10px;margin-top:10px</string>
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Audio Jitter Buffer Frames (0 for automatic)</string>
<string>Static Jitter Buffer Frames</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>audioJitterSpin</cstring>
<cstring>staticDesiredJitterBufferFramesSpin</cstring>
</property>
</widget>
</item>
@ -1518,7 +1609,7 @@ padding: 10px;margin-top:10px</string>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="audioJitterSpin">
<widget class="QSpinBox" name="staticDesiredJitterBufferFramesSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
@ -1555,6 +1646,7 @@ padding: 10px;margin-top:10px</string>
</item>
</layout>
</item>
<!-- max frames over desired ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_13">
<property name="spacing">
@ -1591,7 +1683,7 @@ padding: 10px;margin-top:10px</string>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_12">
<spacer name="horizontalSpacer_20">
<property name="font">
<font>
<family>Arial</family>
@ -1646,7 +1738,467 @@ padding: 10px;margin-top:10px</string>
</item>
</layout>
</item>
<!-- use stdev for jitter calc ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_19">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item>
<widget class="QLabel" name="label_16">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Use Stdev for Dynamic Jitter Calc</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>useStdevForJitterCalcCheckBox</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_21">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="useStdevForJitterCalcCheckBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>32</width>
<height>0</height>
</size>
</property>
<property name="baseSize">
<size>
<width>0</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
<property name="iconSize">
<size>
<width>32</width>
<height>32</height>
</size>
</property>
</widget>
</item>
</layout>
</item>
<!-- window starve threshold ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_20">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item alignment="Qt::AlignLeft">
<widget class="QLabel" name="label_17">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Window A Starve Threshold</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>windowStarveThresholdSpin</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_22">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="windowStarveThresholdSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>95</width>
<height>36</height>
</size>
</property>
<property name="maximumSize">
<size>
<width>70</width>
<height>16777215</height>
</size>
</property>
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="minimum">
<number>0</number>
</property>
<property name="maximum">
<number>10000</number>
</property>
<property name="value">
<number>1</number>
</property>
</widget>
</item>
</layout>
</item>
<!-- window A seconds ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_21">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item alignment="Qt::AlignLeft">
<widget class="QLabel" name="label_18">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Window A (raise desired on N starves) Seconds</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>windowSecondsForDesiredCalcOnTooManyStarvesSpin</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_23">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="windowSecondsForDesiredCalcOnTooManyStarvesSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>95</width>
<height>36</height>
</size>
</property>
<property name="maximumSize">
<size>
<width>70</width>
<height>16777215</height>
</size>
</property>
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="minimum">
<number>0</number>
</property>
<property name="maximum">
<number>10000</number>
</property>
<property name="value">
<number>1</number>
</property>
</widget>
</item>
</layout>
</item>
<!-- window B seconds ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_22">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item alignment="Qt::AlignLeft">
<widget class="QLabel" name="label_19">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Window B (desired ceiling) Seconds</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>windowSecondsForDesiredReductionSpin</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_24">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QSpinBox" name="windowSecondsForDesiredReductionSpin">
<property name="sizePolicy">
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>95</width>
<height>36</height>
</size>
</property>
<property name="maximumSize">
<size>
<width>70</width>
<height>16777215</height>
</size>
</property>
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="minimum">
<number>0</number>
</property>
<property name="maximum">
<number>10000</number>
</property>
<property name="value">
<number>1</number>
</property>
</widget>
</item>
</layout>
</item>
<!-- repetition with fade ____________________________________________________________________________ -->
<item>
<layout class="QHBoxLayout" name="horizontalLayout_24">
<property name="spacing">
<number>0</number>
</property>
<property name="topMargin">
<number>10</number>
</property>
<property name="rightMargin">
<number>0</number>
</property>
<property name="bottomMargin">
<number>10</number>
</property>
<item>
<widget class="QLabel" name="label_21">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="styleSheet">
<string notr="true">color: rgb(51, 51, 51)</string>
</property>
<property name="text">
<string>Repetition with Fade</string>
</property>
<property name="indent">
<number>15</number>
</property>
<property name="buddy">
<cstring>repetitionWithFadeCheckBox</cstring>
</property>
</widget>
</item>
<item>
<spacer name="horizontalSpacer_25">
<property name="font">
<font>
<family>Arial</family>
</font>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
<property name="sizeHint" stdset="0">
<size>
<width>40</width>
<height>20</height>
</size>
</property>
</spacer>
</item>
<item>
<widget class="QCheckBox" name="repetitionWithFadeCheckBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Minimum" vsizetype="Fixed">
<horstretch>0</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>32</width>
<height>0</height>
</size>
</property>
<property name="baseSize">
<size>
<width>0</width>
<height>0</height>
</size>
</property>
<property name="text">
<string/>
</property>
<property name="iconSize">
<size>
<width>32</width>
<height>32</height>
</size>
</property>
</widget>
</item>
</layout>
</item>
<item>
<layout class="QHBoxLayout" name="horizontalLayout_6">

View file

@ -1,26 +0,0 @@
//
// AudioFilter.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <math.h>
#include <vector>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFilter.h"
template<>
AudioFilterPEQ3::FilterParameter AudioFilterPEQ3::_profiles[ AudioFilterPEQ3::_profileCount ][ AudioFilterPEQ3::_filterCount ] = {
// Freq Gain Q Freq Gain Q Freq Gain Q
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // flat response (default)
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 0.1f, 1.0f } }, // treble cut
{ { 300.0f, 0.1f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // bass cut
{ { 300.0f, 1.5f, 0.71f }, { 1000.0f, 0.5f, 1.0f }, { 4000.0f, 1.50f, 0.71f } } // smiley curve
};

View file

@ -12,7 +12,7 @@
#ifndef hifi_AudioFilter_h
#define hifi_AudioFilter_h
////////////////////////////////////////////////////////////////////////////////////////////
//
// Implements a standard biquad filter in "Direct Form 1"
// Reference http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
//
@ -51,15 +51,15 @@ public:
//
// public interface
//
void setParameters( const float a0, const float a1, const float a2, const float b1, const float b2 ) {
void setParameters(const float a0, const float a1, const float a2, const float b1, const float b2) {
_a0 = a0; _a1 = a1; _a2 = a2; _b1 = b1; _b2 = b2;
}
void getParameters( float& a0, float& a1, float& a2, float& b1, float& b2 ) {
void getParameters(float& a0, float& a1, float& a2, float& b1, float& b2) {
a0 = _a0; a1 = _a1; a2 = _a2; b1 = _b1; b2 = _b2;
}
void render( const float* in, float* out, const int frames) {
void render(const float* in, float* out, const int frames) {
float x;
float y;
@ -90,209 +90,223 @@ public:
}
};
////////////////////////////////////////////////////////////////////////////////////////////
// Implements a single-band parametric EQ using a biquad "peaking EQ" configuration
//
// gain > 1.0 boosts the center frequency
// gain < 1.0 cuts the center frequency
//
class AudioParametricEQ {
//
// Implements common base class interface for all Audio Filter Objects
//
template< class T >
class AudioFilter {
protected:
//
// private data
// data
//
AudioBiquad _kernel;
float _sampleRate;
float _frequency;
float _gain;
float _slope;
//
// helpers
//
void updateKernel() {
/*
a0 = 1 + alpha*A
a1 = -2*cos(w0)
a2 = 1 - alpha*A
b1 = -2*cos(w0)
b2 = 1 - alpha/A
*/
const float a = _gain;
const float omega = TWO_PI * _frequency / _sampleRate;
const float alpha = 0.5f * sinf(omega) / _slope;
const float gamma = 1.0f / ( 1.0f + (alpha/a) );
const float a0 = 1.0f + (alpha*a);
const float a1 = -2.0f * cosf(omega);
const float a2 = 1.0f - (alpha*a);
const float b1 = a1;
const float b2 = 1.0f - (alpha/a);
_kernel.setParameters( a0*gamma,a1*gamma,a2*gamma,b1*gamma,b2*gamma );
static_cast<T*>(this)->updateKernel();
}
public:
//
// ctor/dtor
//
AudioParametricEQ() {
AudioFilter() {
setParameters(0.,0.,0.,0.);
updateKernel();
}
~AudioParametricEQ() {
~AudioFilter() {
}
//
// public interface
//
void setParameters( const float sampleRate, const float frequency, const float gain, const float slope ) {
_sampleRate = std::max(sampleRate,1.0f);
_frequency = std::max(frequency,2.0f);
_gain = std::max(gain,0.0f);
_slope = std::max(slope,0.00001f);
void setParameters(const float sampleRate, const float frequency, const float gain, const float slope) {
_sampleRate = std::max(sampleRate, 1.0f);
_frequency = std::max(frequency, 2.0f);
_gain = std::max(gain, 0.0f);
_slope = std::max(slope, 0.00001f);
updateKernel();
}
void getParameters( float& sampleRate, float& frequency, float& gain, float& slope ) {
void getParameters(float& sampleRate, float& frequency, float& gain, float& slope) {
sampleRate = _sampleRate; frequency = _frequency; gain = _gain; slope = _slope;
}
void render(const float* in, float* out, const int frames ) {
void render(const float* in, float* out, const int frames) {
_kernel.render(in,out,frames);
}
void reset() {
_kernel.reset();
}
};
////////////////////////////////////////////////////////////////////////////////////////////
// Helper/convenience class that implements a bank of EQ objects
//
template< typename T, const int N>
class AudioFilterBank {
//
// types
//
struct FilterParameter {
float _p1;
float _p2;
float _p3;
};
//
// private static data
//
static const int _filterCount = N;
static const int _profileCount = 4;
static FilterParameter _profiles[_profileCount][_filterCount];
//
// private data
//
T _filters[ _filterCount ];
float* _buffer;
float _sampleRate;
uint16_t _frameCount;
// Implements a low-shelf filter using a biquad
//
class AudioFilterLSF : public AudioFilter< AudioFilterLSF >
{
public:
//
// ctor/dtor
// helpers
//
AudioFilterBank()
: _buffer(NULL)
, _sampleRate(0.)
, _frameCount(0) {
}
~AudioFilterBank() {
finalize();
}
//
// public interface
//
void initialize( const float sampleRate, const int frameCount ) {
finalize();
void updateKernel() {
_buffer = (float*)malloc( frameCount * sizeof(float) );
if(!_buffer) {
return;
}
const float a = _gain;
const float aAdd1 = a + 1.0f;
const float aSub1 = a - 1.0f;
const float omega = TWO_PI * _frequency / _sampleRate;
const float aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float aSub1TimesCosOmega = aSub1 * cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
const float zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) - (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = 2*A*( (A-1) - (A+1)*cos(w0) )
b2 = A*( (A+1) - (A-1)*cos(w0) - 2*sqrt(A)*alpha )
a0 = (A+1) + (A-1)*cos(w0) + 2*sqrt(A)*alpha
a1 = -2*( (A-1) + (A+1)*cos(w0) )
a2 = (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float b0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta) * a;
const float b1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + ZERO) * a;
const float b2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta) * a;
const float a0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta);
const float a1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + ZERO);
const float a2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta);
_sampleRate = sampleRate;
_frameCount = frameCount;
reset();
loadProfile(0); // load default profile "flat response" into the bank (see AudioFilter.cpp)
const float normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
void finalize() {
if (_buffer ) {
free (_buffer);
_buffer = NULL;
}
}
void loadProfile( int profileIndex ) {
if (profileIndex >= 0 && profileIndex < _profileCount) {
for (int i = 0; i < _filterCount; ++i) {
FilterParameter p = _profiles[profileIndex][i];
_filters[i].setParameters(_sampleRate,p._p1,p._p2,p._p3);
}
}
}
void render( const float* in, float* out, const int frameCount ) {
for (int i = 0; i < _filterCount; ++i) {
_filters[i].render( in, out, frameCount );
}
}
void render( const int16_t* in, int16_t* out, const int frameCount ) {
if (!_buffer || ( frameCount > _frameCount ))
return;
const int scale = (2 << ((8*sizeof(int16_t))-1));
// convert int16_t to float32 (normalized to -1. ... 1.)
for (int i = 0; i < frameCount; ++i) {
_buffer[i] = ((float)(*in++)) / scale;
}
// for this filter, we share input/output buffers at each stage, but our design does not mandate this
render( _buffer, _buffer, frameCount );
// convert float32 to int16_t
for (int i = 0; i < frameCount; ++i) {
*out++ = (int16_t)(_buffer[i] * scale);
}
}
void reset() {
for (int i = 0; i < _filterCount; ++i ) {
_filters[i].reset();
}
}
};
////////////////////////////////////////////////////////////////////////////////////////////
// Specializations of AudioFilterBank
//
typedef AudioFilterBank< AudioParametricEQ, 1> AudioFilterPEQ1; // bank with one band of PEQ
typedef AudioFilterBank< AudioParametricEQ, 2> AudioFilterPEQ2; // bank with two bands of PEQ
typedef AudioFilterBank< AudioParametricEQ, 3> AudioFilterPEQ3; // bank with three bands of PEQ
// etc....
// Implements a hi-shelf filter using a biquad
//
class AudioFilterHSF : public AudioFilter< AudioFilterHSF >
{
public:
//
// helpers
//
void updateKernel() {
const float a = _gain;
const float aAdd1 = a + 1.0f;
const float aSub1 = a - 1.0f;
const float omega = TWO_PI * _frequency / _sampleRate;
const float aAdd1TimesCosOmega = aAdd1 * cosf(omega);
const float aSub1TimesCosOmega = aSub1 * cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
const float zeta = 2.0f * sqrtf(a) * alpha;
/*
b0 = A*( (A+1) + (A-1)*cos(w0) + 2*sqrt(A)*alpha )
b1 = -2*A*( (A-1) + (A+1)*cos(w0) )
b2 = A*( (A+1) + (A-1)*cos(w0) - 2*sqrt(A)*alpha )
a0 = (A+1) - (A-1)*cos(w0) + 2*sqrt(A)*alpha
a1 = 2*( (A-1) - (A+1)*cos(w0) )
a2 = (A+1) - (A-1)*cos(w0) - 2*sqrt(A)*alpha
*/
const float b0 = +1.0f * (aAdd1 + aSub1TimesCosOmega + zeta) * a;
const float b1 = -2.0f * (aSub1 + aAdd1TimesCosOmega + ZERO) * a;
const float b2 = +1.0f * (aAdd1 + aSub1TimesCosOmega - zeta) * a;
const float a0 = +1.0f * (aAdd1 - aSub1TimesCosOmega + zeta);
const float a1 = +2.0f * (aSub1 - aAdd1TimesCosOmega + ZERO);
const float a2 = +1.0f * (aAdd1 - aSub1TimesCosOmega - zeta);
const float normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a all-pass filter using a biquad
//
class AudioFilterALL : public AudioFilter< AudioFilterALL >
{
public:
//
// helpers
//
void updateKernel() {
const float omega = TWO_PI * _frequency / _sampleRate;
const float cosOmega = cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
/*
b0 = 1 - alpha
b1 = -2*cos(w0)
b2 = 1 + alpha
a0 = 1 + alpha
a1 = -2*cos(w0)
a2 = 1 - alpha
*/
const float b0 = +1.0f - alpha;
const float b1 = -2.0f * cosOmega;
const float b2 = +1.0f + alpha;
const float a0 = +1.0f + alpha;
const float a1 = -2.0f * cosOmega;
const float a2 = +1.0f - alpha;
const float normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
//
// Implements a single-band parametric EQ using a biquad "peaking EQ" configuration
//
class AudioFilterPEQ : public AudioFilter< AudioFilterPEQ >
{
public:
//
// helpers
//
void updateKernel() {
const float a = _gain;
const float omega = TWO_PI * _frequency / _sampleRate;
const float cosOmega = cosf(omega);
const float alpha = 0.5f * sinf(omega) / _slope;
const float alphaMulA = alpha * a;
const float alphaDivA = alpha / a;
/*
b0 = 1 + alpha*A
b1 = -2*cos(w0)
b2 = 1 - alpha*A
a0 = 1 + alpha/A
a1 = -2*cos(w0)
a2 = 1 - alpha/A
*/
const float b0 = +1.0f + alphaMulA;
const float b1 = -2.0f * cosOmega;
const float b2 = +1.0f - alphaMulA;
const float a0 = +1.0f + alphaDivA;
const float a1 = -2.0f * cosOmega;
const float a2 = +1.0f - alphaDivA;
const float normA0 = 1.0f / a0;
_kernel.setParameters(b0 * normA0, b1 * normA0 , b2 * normA0, a1 * normA0, a2 * normA0);
}
};
#endif // hifi_AudioFilter_h

View file

@ -0,0 +1,48 @@
//
// AudioFilterBank.cpp
// hifi
//
// Created by Craig Hansen-Sturm on 8/10/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <math.h>
#include <SharedUtil.h>
#include "AudioRingBuffer.h"
#include "AudioFilter.h"
#include "AudioFilterBank.h"
template<>
AudioFilterLSF1s::FilterParameter
AudioFilterLSF1s::_profiles[ AudioFilterLSF1s::_profileCount ][ AudioFilterLSF1s::_filterCount ] = {
// Freq Gain Slope
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterHSF1s::FilterParameter
AudioFilterHSF1s::_profiles[ AudioFilterHSF1s::_profileCount ][ AudioFilterHSF1s::_filterCount ] = {
// Freq Gain Slope
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterPEQ1s::FilterParameter
AudioFilterPEQ1s::_profiles[ AudioFilterPEQ1s::_profileCount ][ AudioFilterPEQ1s::_filterCount ] = {
// Freq Gain Q
{ { 1000.0f, 1.0f, 1.0f } } // flat response (default)
};
template<>
AudioFilterPEQ3m::FilterParameter
AudioFilterPEQ3m::_profiles[ AudioFilterPEQ3m::_profileCount ][ AudioFilterPEQ3m::_filterCount ] = {
// Freq Gain Q Freq Gain Q Freq Gain Q
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // flat response (default)
{ { 300.0f, 1.0f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 0.1f, 1.0f } }, // treble cut
{ { 300.0f, 0.1f, 1.0f }, { 1000.0f, 1.0f, 1.0f }, { 4000.0f, 1.0f, 1.0f } }, // bass cut
{ { 300.0f, 1.5f, 0.71f }, { 1000.0f, 0.5f, 1.0f }, { 4000.0f, 1.50f, 0.71f } } // smiley curve
};

View file

@ -0,0 +1,170 @@
//
// AudioFilterBank.h
// hifi
//
// Created by Craig Hansen-Sturm on 8/23/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioFilterBank_h
#define hifi_AudioFilterBank_h
//
// Helper/convenience class that implements a bank of Filter objects
//
template< typename T, const int N, const int C >
class AudioFilterBank {
//
// types
//
struct FilterParameter {
float _p1;
float _p2;
float _p3;
};
//
// private static data
//
static const int _filterCount = N;
static const int _channelCount = C;
static const int _profileCount = 4;
static FilterParameter _profiles[ _profileCount ][ _filterCount ];
//
// private data
//
T _filters[ _filterCount ][ _channelCount ];
float* _buffer[ _channelCount ];
float _sampleRate;
uint16_t _frameCount;
public:
//
// ctor/dtor
//
AudioFilterBank()
: _sampleRate(0.)
, _frameCount(0) {
for (int i = 0; i < _channelCount; ++i) {
_buffer[ i ] = NULL;
}
}
~AudioFilterBank() {
finalize();
}
//
// public interface
//
void initialize(const float sampleRate, const int frameCount) {
finalize();
for (int i = 0; i < _channelCount; ++i) {
_buffer[i] = (float*)malloc(frameCount * sizeof(float));
}
_sampleRate = sampleRate;
_frameCount = frameCount;
reset();
loadProfile(0); // load default profile "flat response" into the bank (see AudioFilterBank.cpp)
}
void finalize() {
for (int i = 0; i < _channelCount; ++i) {
if (_buffer[i]) {
free (_buffer[i]);
_buffer[i] = NULL;
}
}
}
void loadProfile(int profileIndex) {
if (profileIndex >= 0 && profileIndex < _profileCount) {
for (int i = 0; i < _filterCount; ++i) {
FilterParameter p = _profiles[profileIndex][i];
for (int j = 0; j < _channelCount; ++j) {
_filters[i][j].setParameters(_sampleRate,p._p1,p._p2,p._p3);
}
}
}
}
void setParameters(int filterStage, int filterChannel, const float sampleRate, const float frequency, const float gain,
const float slope) {
if (filterStage >= 0 && filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].setParameters(sampleRate,frequency,gain,slope);
}
}
void getParameters(int filterStage, int filterChannel, float& sampleRate, float& frequency, float& gain, float& slope) {
if (filterStage >= 0 && filterStage < _filterCount && filterChannel >= 0 && filterChannel < _channelCount) {
_filters[filterStage][filterChannel].getParameters(sampleRate,frequency,gain,slope);
}
}
void render(const int16_t* in, int16_t* out, const int frameCount) {
if (!_buffer || (frameCount > _frameCount))
return;
const int scale = (2 << ((8 * sizeof(int16_t)) - 1));
// de-interleave and convert int16_t to float32 (normalized to -1. ... 1.)
for (int i = 0; i < frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
_buffer[j][i] = ((float)(*in++)) / scale;
}
}
// now step through each filter
for (int i = 0; i < _channelCount; ++i) {
for (int j = 0; j < _filterCount; ++j) {
_filters[j][i].render( &_buffer[i][0], &_buffer[i][0], frameCount );
}
}
// convert float32 to int16_t and interleave
for (int i = 0; i < frameCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
*out++ = (int16_t)(_buffer[j][i] * scale);
}
}
}
void reset() {
for (int i = 0; i < _filterCount; ++i) {
for (int j = 0; j < _channelCount; ++j) {
_filters[i][j].reset();
}
}
}
};
//
// Specializations of AudioFilterBank
//
typedef AudioFilterBank< AudioFilterLSF, 1, 1> AudioFilterLSF1m; // mono bank with one band of LSF
typedef AudioFilterBank< AudioFilterLSF, 1, 2> AudioFilterLSF1s; // stereo bank with one band of LSF
typedef AudioFilterBank< AudioFilterHSF, 1, 1> AudioFilterHSF1m; // mono bank with one band of HSF
typedef AudioFilterBank< AudioFilterHSF, 1, 2> AudioFilterHSF1s; // stereo bank with one band of HSF
typedef AudioFilterBank< AudioFilterPEQ, 1, 1> AudioFilterPEQ1m; // mono bank with one band of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 2, 1> AudioFilterPEQ2m; // mono bank with two bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 3, 1> AudioFilterPEQ3m; // mono bank with three bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 1, 2> AudioFilterPEQ1s; // stereo bank with one band of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 2, 2> AudioFilterPEQ2s; // stereo bank with two bands of PEQ
typedef AudioFilterBank< AudioFilterPEQ, 3, 2> AudioFilterPEQ3s; // stereo bank with three bands of PEQ
// etc....
#endif // hifi_AudioFilter_h

View file

@ -27,7 +27,6 @@ AudioInjector::AudioInjector(QObject* parent) :
_options(),
_shouldStop(false)
{
}
AudioInjector::AudioInjector(Sound* sound, const AudioInjectorOptions& injectorOptions) :
@ -35,7 +34,10 @@ AudioInjector::AudioInjector(Sound* sound, const AudioInjectorOptions& injectorO
_options(injectorOptions),
_shouldStop(false)
{
}
void AudioInjector::setOptions(AudioInjectorOptions& options) {
_options = options;
}
const uchar MAX_INJECTOR_VOLUME = 0xFF;
@ -55,8 +57,6 @@ void AudioInjector::injectAudio() {
}
NodeList* nodeList = NodeList::getInstance();
// setup the packet for injected audio
QByteArray injectAudioPacket = byteArrayWithPopulatedHeader(PacketTypeInjectAudio);
QDataStream packetStream(&injectAudioPacket, QIODevice::Append);
@ -73,9 +73,11 @@ void AudioInjector::injectAudio() {
packetStream << loopbackFlag;
// pack the position for injected audio
int positionOptionOffset = injectAudioPacket.size();
packetStream.writeRawData(reinterpret_cast<const char*>(&_options.getPosition()), sizeof(_options.getPosition()));
// pack our orientation for injected audio
int orientationOptionOffset = injectAudioPacket.size();
packetStream.writeRawData(reinterpret_cast<const char*>(&_options.getOrientation()), sizeof(_options.getOrientation()));
// pack zero for radius
@ -101,6 +103,12 @@ void AudioInjector::injectAudio() {
int bytesToCopy = std::min(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL,
soundByteArray.size() - currentSendPosition);
memcpy(injectAudioPacket.data() + positionOptionOffset,
&_options.getPosition(),
sizeof(_options.getPosition()));
memcpy(injectAudioPacket.data() + orientationOptionOffset,
&_options.getOrientation(),
sizeof(_options.getOrientation()));
// resize the QByteArray to the right size
injectAudioPacket.resize(numPreAudioDataBytes + bytesToCopy);
@ -112,6 +120,7 @@ void AudioInjector::injectAudio() {
memcpy(injectAudioPacket.data() + numPreAudioDataBytes, soundByteArray.data() + currentSendPosition, bytesToCopy);
// grab our audio mixer from the NodeList, if it exists
NodeList* nodeList = NodeList::getInstance();
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
// send off this audio packet

View file

@ -29,6 +29,7 @@ public:
public slots:
void injectAudio();
void stop() { _shouldStop = true; }
void setOptions(AudioInjectorOptions& options);
signals:
void finished();
private:

View file

@ -19,7 +19,6 @@ AudioInjectorOptions::AudioInjectorOptions(QObject* parent) :
_orientation(glm::vec3(0.0f, 0.0f, 0.0f)),
_loopbackAudioInterface(NULL)
{
}
AudioInjectorOptions::AudioInjectorOptions(const AudioInjectorOptions& other) {
@ -29,3 +28,11 @@ AudioInjectorOptions::AudioInjectorOptions(const AudioInjectorOptions& other) {
_orientation = other._orientation;
_loopbackAudioInterface = other._loopbackAudioInterface;
}
void AudioInjectorOptions::operator=(const AudioInjectorOptions& other) {
_position = other._position;
_volume = other._volume;
_loop = other._loop;
_orientation = other._orientation;
_loopbackAudioInterface = other._loopbackAudioInterface;
}

View file

@ -30,6 +30,7 @@ class AudioInjectorOptions : public QObject {
public:
AudioInjectorOptions(QObject* parent = 0);
AudioInjectorOptions(const AudioInjectorOptions& other);
void operator=(const AudioInjectorOptions& other);
const glm::vec3& getPosition() const { return _position; }
void setPosition(const glm::vec3& position) { _position = position; }
@ -37,8 +38,8 @@ public:
float getVolume() const { return _volume; }
void setVolume(float volume) { _volume = volume; }
float getLoop() const { return _loop; }
void setLoop(float loop) { _loop = loop; }
bool getLoop() const { return _loop; }
void setLoop(bool loop) { _loop = loop; }
const glm::quat& getOrientation() const { return _orientation; }
void setOrientation(const glm::quat& orientation) { _orientation = orientation; }

View file

@ -20,18 +20,16 @@
#include "AudioRingBuffer.h"
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode, int numFramesCapacity) :
_frameCapacity(numFramesCapacity),
_sampleCapacity(numFrameSamples * numFramesCapacity),
_isFull(false),
_numFrameSamples(numFrameSamples),
_randomAccessMode(randomAccessMode),
_overflowCount(0)
_frameCapacity(numFramesCapacity),
_sampleCapacity(numFrameSamples * numFramesCapacity),
_bufferLength(numFrameSamples * (numFramesCapacity + 1)),
_numFrameSamples(numFrameSamples),
_randomAccessMode(randomAccessMode),
_overflowCount(0)
{
if (numFrameSamples) {
_buffer = new int16_t[_sampleCapacity];
if (_randomAccessMode) {
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
}
_buffer = new int16_t[_bufferLength];
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
_nextOutput = _buffer;
_endOfLastWrite = _buffer;
} else {
@ -53,28 +51,29 @@ void AudioRingBuffer::reset() {
void AudioRingBuffer::resizeForFrameSize(int numFrameSamples) {
delete[] _buffer;
_sampleCapacity = numFrameSamples * _frameCapacity;
_bufferLength = numFrameSamples * (_frameCapacity + 1);
_numFrameSamples = numFrameSamples;
_buffer = new int16_t[_sampleCapacity];
_buffer = new int16_t[_bufferLength];
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
if (_randomAccessMode) {
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
memset(_buffer, 0, _bufferLength * sizeof(int16_t));
}
reset();
}
void AudioRingBuffer::clear() {
_isFull = false;
_endOfLastWrite = _buffer;
_nextOutput = _buffer;
}
int AudioRingBuffer::readSamples(int16_t* destination, int maxSamples) {
return readData((char*) destination, maxSamples * sizeof(int16_t));
return readData((char*)destination, maxSamples * sizeof(int16_t)) / sizeof(int16_t);
}
int AudioRingBuffer::readData(char *data, int maxSize) {
// only copy up to the number of samples we have available
int numReadSamples = std::min((int) (maxSize / sizeof(int16_t)), samplesAvailable());
int numReadSamples = std::min((int)(maxSize / sizeof(int16_t)), samplesAvailable());
// If we're in random access mode, then we consider our number of available read samples slightly
// differently. Namely, if anything has been written, we say we have as many samples as they ask for
@ -83,16 +82,16 @@ int AudioRingBuffer::readData(char *data, int maxSize) {
numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0;
}
if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) {
if (_nextOutput + numReadSamples > _buffer + _bufferLength) {
// we're going to need to do two reads to get this data, it wraps around the edge
// read to the end of the buffer
int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput;
int numSamplesToEnd = (_buffer + _bufferLength) - _nextOutput;
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
if (_randomAccessMode) {
memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it
}
// read the rest from the beginning of the buffer
memcpy(data + (numSamplesToEnd * sizeof(int16_t)), _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
if (_randomAccessMode) {
@ -108,22 +107,19 @@ int AudioRingBuffer::readData(char *data, int maxSize) {
// push the position of _nextOutput by the number of samples read
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numReadSamples);
if (numReadSamples > 0) {
_isFull = false;
}
return numReadSamples * sizeof(int16_t);
}
int AudioRingBuffer::writeSamples(const int16_t* source, int maxSamples) {
return writeData((const char*) source, maxSamples * sizeof(int16_t));
int AudioRingBuffer::writeSamples(const int16_t* source, int maxSamples) {
return writeData((const char*)source, maxSamples * sizeof(int16_t)) / sizeof(int16_t);
}
int AudioRingBuffer::writeData(const char* data, int maxSize) {
// make sure we have enough bytes left for this to be the right amount of audio
// otherwise we should not copy that data, and leave the buffer pointers where they are
int samplesToCopy = std::min((int)(maxSize / sizeof(int16_t)), _sampleCapacity);
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (samplesToCopy > samplesRoomFor) {
// there's not enough room for this write. erase old data to make room for this new data
@ -132,19 +128,16 @@ int AudioRingBuffer::writeData(const char* data, int maxSize) {
_overflowCount++;
qDebug() << "Overflowed ring buffer! Overwriting old data";
}
if (_endOfLastWrite + samplesToCopy <= _buffer + _sampleCapacity) {
if (_endOfLastWrite + samplesToCopy <= _buffer + _bufferLength) {
memcpy(_endOfLastWrite, data, samplesToCopy * sizeof(int16_t));
} else {
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
int numSamplesToEnd = (_buffer + _bufferLength) - _endOfLastWrite;
memcpy(_endOfLastWrite, data, numSamplesToEnd * sizeof(int16_t));
memcpy(_buffer, data + (numSamplesToEnd * sizeof(int16_t)), (samplesToCopy - numSamplesToEnd) * sizeof(int16_t));
}
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy);
if (samplesToCopy > 0 && _endOfLastWrite == _nextOutput) {
_isFull = true;
}
return samplesToCopy * sizeof(int16_t);
}
@ -158,61 +151,52 @@ const int16_t& AudioRingBuffer::operator[] (const int index) const {
}
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
if (numSamples > 0) {
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
_isFull = false;
}
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
}
int AudioRingBuffer::samplesAvailable() const {
if (!_endOfLastWrite) {
return 0;
}
if (_isFull) {
return _sampleCapacity;
}
int sampleDifference = _endOfLastWrite - _nextOutput;
if (sampleDifference < 0) {
sampleDifference += _sampleCapacity;
sampleDifference += _bufferLength;
}
return sampleDifference;
}
int AudioRingBuffer::addSilentFrame(int numSilentSamples) {
int AudioRingBuffer::addSilentSamples(int silentSamples) {
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (numSilentSamples > samplesRoomFor) {
if (silentSamples > samplesRoomFor) {
// there's not enough room for this write. write as many silent samples as we have room for
numSilentSamples = samplesRoomFor;
silentSamples = samplesRoomFor;
qDebug() << "Dropping some silent samples to prevent ring buffer overflow";
}
// memset zeroes into the buffer, accomodate a wrap around the end
// push the _endOfLastWrite to the correct spot
if (_endOfLastWrite + numSilentSamples <= _buffer + _sampleCapacity) {
memset(_endOfLastWrite, 0, numSilentSamples * sizeof(int16_t));
if (_endOfLastWrite + silentSamples <= _buffer + _bufferLength) {
memset(_endOfLastWrite, 0, silentSamples * sizeof(int16_t));
} else {
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
int numSamplesToEnd = (_buffer + _bufferLength) - _endOfLastWrite;
memset(_endOfLastWrite, 0, numSamplesToEnd * sizeof(int16_t));
memset(_buffer, 0, (numSilentSamples - numSamplesToEnd) * sizeof(int16_t));
}
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, numSilentSamples);
if (numSilentSamples > 0 && _nextOutput == _endOfLastWrite) {
_isFull = true;
memset(_buffer, 0, (silentSamples - numSamplesToEnd) * sizeof(int16_t));
}
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, silentSamples);
return numSilentSamples * sizeof(int16_t);
return silentSamples;
}
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _sampleCapacity) {
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _bufferLength) {
// this shift will wrap the position around to the beginning of the ring
return position + numSamplesShift - _sampleCapacity;
return position + numSamplesShift - _bufferLength;
} else if (numSamplesShift < 0 && position + numSamplesShift < _buffer) {
// this shift will go around to the end of the ring
return position + numSamplesShift + _sampleCapacity;
return position + numSamplesShift + _bufferLength;
} else {
return position + numSamplesShift;
}
@ -221,7 +205,7 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
float loudness = 0.0f;
const int16_t* sampleAt = frameStart;
const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
const int16_t* _bufferLastAt = _buffer + _bufferLength - 1;
for (int i = 0; i < _numFrameSamples; ++i) {
loudness += fabsf(*sampleAt);
@ -229,11 +213,14 @@ float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
}
loudness /= _numFrameSamples;
loudness /= MAX_SAMPLE_VALUE;
return loudness;
}
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
if (frameStart.isNull()) {
return 0.0f;
}
return getFrameLoudness(&(*frameStart));
}
@ -241,3 +228,44 @@ float AudioRingBuffer::getNextOutputFrameLoudness() const {
return getFrameLoudness(_nextOutput);
}
int AudioRingBuffer::writeSamples(ConstIterator source, int maxSamples) {
int samplesToCopy = std::min(maxSamples, _sampleCapacity);
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (samplesToCopy > samplesRoomFor) {
// there's not enough room for this write. erase old data to make room for this new data
int samplesToDelete = samplesToCopy - samplesRoomFor;
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
_overflowCount++;
qDebug() << "Overflowed ring buffer! Overwriting old data";
}
int16_t* bufferLast = _buffer + _bufferLength - 1;
for (int i = 0; i < samplesToCopy; i++) {
*_endOfLastWrite = *source;
_endOfLastWrite = (_endOfLastWrite == bufferLast) ? _buffer : _endOfLastWrite + 1;
++source;
}
return samplesToCopy;
}
int AudioRingBuffer::writeSamplesWithFade(ConstIterator source, int maxSamples, float fade) {
int samplesToCopy = std::min(maxSamples, _sampleCapacity);
int samplesRoomFor = _sampleCapacity - samplesAvailable();
if (samplesToCopy > samplesRoomFor) {
// there's not enough room for this write. erase old data to make room for this new data
int samplesToDelete = samplesToCopy - samplesRoomFor;
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, samplesToDelete);
_overflowCount++;
qDebug() << "Overflowed ring buffer! Overwriting old data";
}
int16_t* bufferLast = _buffer + _bufferLength - 1;
for (int i = 0; i < samplesToCopy; i++) {
*_endOfLastWrite = (int16_t)((float)(*source) * fade);
_endOfLastWrite = (_endOfLastWrite == bufferLast) ? _buffer : _endOfLastWrite + 1;
++source;
}
return samplesToCopy;
}

View file

@ -28,7 +28,7 @@ const int NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
/ (float) SAMPLE_RATE) * USECS_PER_SECOND);
/ (float)SAMPLE_RATE) * USECS_PER_SECOND);
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
@ -42,33 +42,33 @@ public:
void reset();
void resizeForFrameSize(int numFrameSamples);
void clear();
int getSampleCapacity() const { return _sampleCapacity; }
int getFrameCapacity() const { return _frameCapacity; }
int readSamples(int16_t* destination, int maxSamples);
int writeSamples(const int16_t* source, int maxSamples);
int readData(char* data, int maxSize);
int writeData(const char* data, int maxSize);
int16_t& operator[](const int index);
const int16_t& operator[] (const int index) const;
void shiftReadPosition(unsigned int numSamples);
float getNextOutputFrameLoudness() const;
int samplesAvailable() const;
int framesAvailable() const { return samplesAvailable() / _numFrameSamples; }
int getNumFrameSamples() const { return _numFrameSamples; }
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
int addSilentFrame(int numSilentSamples);
int addSilentSamples(int samples);
private:
float getFrameLoudness(const int16_t* frameStart) const;
@ -77,12 +77,12 @@ protected:
// disallow copying of AudioRingBuffer objects
AudioRingBuffer(const AudioRingBuffer&);
AudioRingBuffer& operator= (const AudioRingBuffer&);
int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const;
int _frameCapacity;
int _sampleCapacity;
bool _isFull;
int _bufferLength; // actual length of _buffer: will be one frame larger than _sampleCapacity
int _numFrameSamples;
int16_t* _nextOutput;
int16_t* _endOfLastWrite;
@ -95,23 +95,25 @@ public:
class ConstIterator { //public std::iterator < std::forward_iterator_tag, int16_t > {
public:
ConstIterator()
: _capacity(0),
: _bufferLength(0),
_bufferFirst(NULL),
_bufferLast(NULL),
_at(NULL) {}
ConstIterator(int16_t* bufferFirst, int capacity, int16_t* at)
: _capacity(capacity),
: _bufferLength(capacity),
_bufferFirst(bufferFirst),
_bufferLast(bufferFirst + capacity - 1),
_at(at) {}
bool isNull() const { return _at == NULL; }
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
const int16_t& operator*() { return *_at; }
ConstIterator& operator=(const ConstIterator& rhs) {
_capacity = rhs._capacity;
_bufferLength = rhs._bufferLength;
_bufferFirst = rhs._bufferFirst;
_bufferLast = rhs._bufferLast;
_at = rhs._at;
@ -145,40 +147,54 @@ public:
}
ConstIterator operator+(int i) {
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(i));
return ConstIterator(_bufferFirst, _bufferLength, atShiftedBy(i));
}
ConstIterator operator-(int i) {
return ConstIterator(_bufferFirst, _capacity, atShiftedBy(-i));
return ConstIterator(_bufferFirst, _bufferLength, atShiftedBy(-i));
}
void readSamples(int16_t* dest, int numSamples) {
int16_t* at = _at;
for (int i = 0; i < numSamples; i++) {
*dest = *(*this);
*dest = *at;
++dest;
++(*this);
at = (at == _bufferLast) ? _bufferFirst : at + 1;
}
}
void readSamplesWithFade(int16_t* dest, int numSamples, float fade) {
int16_t* at = _at;
for (int i = 0; i < numSamples; i++) {
*dest = (float)*at * fade;
++dest;
at = (at == _bufferLast) ? _bufferFirst : at + 1;
}
}
private:
int16_t* atShiftedBy(int i) {
i = (_at - _bufferFirst + i) % _capacity;
i = (_at - _bufferFirst + i) % _bufferLength;
if (i < 0) {
i += _capacity;
i += _bufferLength;
}
return _bufferFirst + i;
}
private:
int _capacity;
int _bufferLength;
int16_t* _bufferFirst;
int16_t* _bufferLast;
int16_t* _at;
};
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
ConstIterator nextOutput() const { return ConstIterator(_buffer, _bufferLength, _nextOutput); }
ConstIterator lastFrameWritten() const { return ConstIterator(_buffer, _bufferLength, _endOfLastWrite) - _numFrameSamples; }
float getFrameLoudness(ConstIterator frameStart) const;
int writeSamples(ConstIterator source, int maxSamples);
int writeSamplesWithFade(ConstIterator source, int maxSamples, float fade);
};
#endif // hifi_AudioRingBuffer_h

View file

@ -14,30 +14,37 @@
#include "InboundAudioStream.h"
#include "PacketHeaders.h"
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity,
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc) :
const int STARVE_HISTORY_CAPACITY = 50;
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) :
_ringBuffer(numFrameSamples, false, numFramesCapacity),
_lastPopSucceeded(false),
_lastPopOutput(),
_dynamicJitterBuffers(dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(useStDevForJitterCalc),
_calculatedJitterBufferFramesUsingMaxGap(0),
_calculatedJitterBufferFramesUsingStDev(0),
_desiredJitterBufferFrames(dynamicJitterBuffers ? 1 : staticDesiredJitterBufferFrames),
_maxFramesOverDesired(maxFramesOverDesired),
_dynamicJitterBuffers(settings._dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(settings._useStDevForJitterCalc),
_desiredJitterBufferFrames(settings._dynamicJitterBuffers ? 1 : settings._staticDesiredJitterBufferFrames),
_maxFramesOverDesired(settings._maxFramesOverDesired),
_isStarved(true),
_hasStarted(false),
_consecutiveNotMixedCount(0),
_starveCount(0),
_silentFramesDropped(0),
_oldFramesDropped(0),
_incomingSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS),
_lastFrameReceivedTime(0),
_interframeTimeGapStatsForJitterCalc(TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES, TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS),
_interframeTimeGapStatsForStatsPacket(TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES, TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS),
_incomingSequenceNumberStats(STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_lastPacketReceivedTime(0),
_timeGapStatsForDesiredCalcOnTooManyStarves(0, settings._windowSecondsForDesiredCalcOnTooManyStarves),
_calculatedJitterBufferFramesUsingMaxGap(0),
_stdevStatsForDesiredCalcOnTooManyStarves(),
_calculatedJitterBufferFramesUsingStDev(0),
_timeGapStatsForDesiredReduction(0, settings._windowSecondsForDesiredReduction),
_starveHistoryWindowSeconds(settings._windowSecondsForDesiredCalcOnTooManyStarves),
_starveHistory(STARVE_HISTORY_CAPACITY),
_starveThreshold(settings._windowStarveThreshold),
_framesAvailableStat(),
_currentJitterBufferFrames(0)
_currentJitterBufferFrames(0),
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
_repetitionWithFade(settings._repetitionWithFade)
{
}
@ -59,11 +66,14 @@ void InboundAudioStream::resetStats() {
_silentFramesDropped = 0;
_oldFramesDropped = 0;
_incomingSequenceNumberStats.reset();
_lastFrameReceivedTime = 0;
_interframeTimeGapStatsForJitterCalc.reset();
_interframeTimeGapStatsForStatsPacket.reset();
_lastPacketReceivedTime = 0;
_timeGapStatsForDesiredCalcOnTooManyStarves.reset();
_stdevStatsForDesiredCalcOnTooManyStarves = StDev();
_timeGapStatsForDesiredReduction.reset();
_starveHistory.clear();
_framesAvailableStat.reset();
_currentJitterBufferFrames = 0;
_timeGapStatsForStatsPacket.reset();
}
void InboundAudioStream::clearBuffer() {
@ -72,8 +82,11 @@ void InboundAudioStream::clearBuffer() {
_currentJitterBufferFrames = 0;
}
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
void InboundAudioStream::perSecondCallbackForUpdatingStats() {
_incomingSequenceNumberStats.pushStatsToHistory();
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
_timeGapStatsForDesiredReduction.currentIntervalComplete();
_timeGapStatsForStatsPacket.currentIntervalComplete();
}
int InboundAudioStream::parseData(const QByteArray& packet) {
@ -83,36 +96,51 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
// parse header
int numBytesHeader = numBytesForPacketHeader(packet);
const char* sequenceAt = packet.constData() + numBytesHeader;
const char* dataAt = packet.constData() + numBytesHeader;
int readBytes = numBytesHeader;
// parse sequence number and track it
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
quint16 sequence = *(reinterpret_cast<const quint16*>(dataAt));
dataAt += sizeof(quint16);
readBytes += sizeof(quint16);
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
frameReceivedUpdateTimingStats();
packetReceivedUpdateTimingStats();
// TODO: handle generalized silent packet here?????
int networkSamples;
// parse the info after the seq number and before the audio data.(the stream properties)
int numAudioSamples;
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), numAudioSamples);
if (packetType == PacketTypeSilentAudioFrame) {
quint16 numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt));
readBytes += sizeof(quint16);
networkSamples = (int)numSilentSamples;
} else {
// parse the info after the seq number and before the audio data (the stream properties)
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), networkSamples);
}
// handle this packet based on its arrival status.
// For now, late packets are ignored. It may be good in the future to insert the late audio frame
// into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
switch (arrivalInfo._status) {
case SequenceNumberStats::Early: {
// Packet is early; write droppable silent samples for each of the skipped packets.
// NOTE: we assume that each dropped packet contains the same number of samples
// as the packet we just received.
int packetsDropped = arrivalInfo._seqDiffFromExpected;
writeSamplesForDroppedPackets(packetsDropped * numAudioSamples);
writeSamplesForDroppedPackets(packetsDropped * networkSamples);
// fall through to OnTime case
}
case SequenceNumberStats::OnTime: {
readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples);
// Packet is on time; parse its data to the ringbuffer
if (packetType == PacketTypeSilentAudioFrame) {
writeDroppableSilentSamples(networkSamples);
} else {
readBytes += parseAudioData(packetType, packet.mid(readBytes), networkSamples);
}
break;
}
default: {
// For now, late packets are ignored. It may be good in the future to insert the late audio packet data
// into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
break;
}
}
@ -139,6 +167,43 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
return readBytes;
}
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
}
int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
// calculate how many silent frames we should drop.
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
int numSilentFramesToDrop = 0;
if (silentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
// our avg jitter buffer size exceeds its desired value, so ignore some silent
// frames to get that size as close to desired as possible
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
int numSilentFramesReceived = silentSamples / samplesPerFrame;
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
// without waiting for _framesAvailableStat to fill up to 10s of samples.
_currentJitterBufferFrames -= numSilentFramesToDrop;
_silentFramesDropped += numSilentFramesToDrop;
_framesAvailableStat.reset();
}
int ret = _ringBuffer.addSilentSamples(silentSamples - numSilentFramesToDrop * samplesPerFrame);
return ret;
}
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
int samplesPopped = 0;
int samplesAvailable = _ringBuffer.samplesAvailable();
@ -216,12 +281,61 @@ void InboundAudioStream::framesAvailableChanged() {
}
void InboundAudioStream::setToStarved() {
_isStarved = true;
_consecutiveNotMixedCount = 0;
_starveCount++;
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
// be considered refilled. in that case, there's no need to set _isStarved to true.
_isStarved = (_ringBuffer.framesAvailable() < _desiredJitterBufferFrames);
// record the time of this starve in the starve history
quint64 now = usecTimestampNow();
_starveHistory.insert(now);
if (_dynamicJitterBuffers) {
// dynamic jitter buffers are enabled. check if this starve put us over the window
// starve threshold
quint64 windowEnd = now - _starveHistoryWindowSeconds * USECS_PER_SECOND;
RingBufferHistory<quint64>::Iterator starvesIterator = _starveHistory.begin();
RingBufferHistory<quint64>::Iterator end = _starveHistory.end();
int starvesInWindow = 1;
do {
++starvesIterator;
if (*starvesIterator < windowEnd) {
break;
}
starvesInWindow++;
} while (starvesIterator != end);
// this starve put us over the starve threshold. update _desiredJitterBufferFrames to
// value determined by window A.
if (starvesInWindow >= _starveThreshold) {
int calculatedJitterBufferFrames;
if (_useStDevForJitterCalc) {
calculatedJitterBufferFrames = _calculatedJitterBufferFramesUsingStDev;
} else {
// we don't know when the next packet will arrive, so it's possible the gap between the last packet and the
// next packet will exceed the max time gap in the window. If the time since the last packet has already exceeded
// the window max gap, then we should use that value to calculate desired frames.
int framesSinceLastPacket = ceilf((float)(now - _lastPacketReceivedTime) / (float)BUFFER_SEND_INTERVAL_USECS);
calculatedJitterBufferFrames = std::max(_calculatedJitterBufferFramesUsingMaxGap, framesSinceLastPacket);
}
// make sure _desiredJitterBufferFrames does not become lower here
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
}
}
}
}
void InboundAudioStream::setSettings(const Settings& settings) {
setMaxFramesOverDesired(settings._maxFramesOverDesired);
setDynamicJitterBuffers(settings._dynamicJitterBuffers);
setStaticDesiredJitterBufferFrames(settings._staticDesiredJitterBufferFrames);
setUseStDevForJitterCalc(settings._useStDevForJitterCalc);
setWindowStarveThreshold(settings._windowStarveThreshold);
setWindowSecondsForDesiredCalcOnTooManyStarves(settings._windowSecondsForDesiredCalcOnTooManyStarves);
setWindowSecondsForDesiredReduction(settings._windowSecondsForDesiredReduction);
setRepetitionWithFade(settings._repetitionWithFade);
}
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
@ -229,6 +343,7 @@ void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
_desiredJitterBufferFrames = _staticDesiredJitterBufferFrames;
} else {
if (!_dynamicJitterBuffers) {
// if we're enabling dynamic jitter buffer frames, start desired frames at 1
_desiredJitterBufferFrames = 1;
}
}
@ -242,90 +357,102 @@ void InboundAudioStream::setStaticDesiredJitterBufferFrames(int staticDesiredJit
}
}
void InboundAudioStream::setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves) {
_timeGapStatsForDesiredCalcOnTooManyStarves.setWindowIntervals(windowSecondsForDesiredCalcOnTooManyStarves);
_starveHistoryWindowSeconds = windowSecondsForDesiredCalcOnTooManyStarves;
}
void InboundAudioStream::setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction) {
_timeGapStatsForDesiredReduction.setWindowIntervals(windowSecondsForDesiredReduction);
}
int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
const int MIN_FRAMES_DESIRED = 0;
const int MAX_FRAMES_DESIRED = _ringBuffer.getFrameCapacity();
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
}
void InboundAudioStream::frameReceivedUpdateTimingStats() {
void InboundAudioStream::packetReceivedUpdateTimingStats() {
// update our timegap stats and desired jitter buffer frames if necessary
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
const int NUM_INITIAL_PACKETS_DISCARD = 3;
quint64 now = usecTimestampNow();
if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) {
quint64 gap = now - _lastFrameReceivedTime;
_interframeTimeGapStatsForStatsPacket.update(gap);
quint64 gap = now - _lastPacketReceivedTime;
_timeGapStatsForStatsPacket.update(gap);
const float USECS_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * USECS_PER_SECOND / (float)SAMPLE_RATE;
// update all stats used for desired frames calculations under dynamic jitter buffer mode
_timeGapStatsForDesiredCalcOnTooManyStarves.update(gap);
_stdevStatsForDesiredCalcOnTooManyStarves.addValue(gap);
_timeGapStatsForDesiredReduction.update(gap);
// update stats for Freddy's method of jitter calc
_interframeTimeGapStatsForJitterCalc.update(gap);
if (_interframeTimeGapStatsForJitterCalc.getNewStatsAvailableFlag()) {
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_interframeTimeGapStatsForJitterCalc.getWindowMax() / USECS_PER_FRAME);
_interframeTimeGapStatsForJitterCalc.clearNewStatsAvailableFlag();
if (_dynamicJitterBuffers && !_useStDevForJitterCalc) {
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingMaxGap);
}
if (_timeGapStatsForDesiredCalcOnTooManyStarves.getNewStatsAvailableFlag()) {
_calculatedJitterBufferFramesUsingMaxGap = ceilf((float)_timeGapStatsForDesiredCalcOnTooManyStarves.getWindowMax()
/ (float)BUFFER_SEND_INTERVAL_USECS);
_timeGapStatsForDesiredCalcOnTooManyStarves.clearNewStatsAvailableFlag();
}
// update stats for Philip's method of jitter calc
_stdev.addValue(gap);
const int STANDARD_DEVIATION_SAMPLE_COUNT = 500;
if (_stdev.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
if (_stdevStatsForDesiredCalcOnTooManyStarves.getSamples() > STANDARD_DEVIATION_SAMPLE_COUNT) {
const float NUM_STANDARD_DEVIATIONS = 3.0f;
_calculatedJitterBufferFramesUsingStDev = (int)ceilf(NUM_STANDARD_DEVIATIONS * _stdev.getStDev() / USECS_PER_FRAME);
_stdev.reset();
_calculatedJitterBufferFramesUsingStDev = ceilf(NUM_STANDARD_DEVIATIONS * _stdevStatsForDesiredCalcOnTooManyStarves.getStDev()
/ (float)BUFFER_SEND_INTERVAL_USECS);
_stdevStatsForDesiredCalcOnTooManyStarves.reset();
}
if (_dynamicJitterBuffers && _useStDevForJitterCalc) {
_desiredJitterBufferFrames = clampDesiredJitterBufferFramesValue(_calculatedJitterBufferFramesUsingStDev);
if (_dynamicJitterBuffers) {
// if the max gap in window B (_timeGapStatsForDesiredReduction) corresponds to a smaller number of frames than _desiredJitterBufferFrames,
// then reduce _desiredJitterBufferFrames to that number of frames.
if (_timeGapStatsForDesiredReduction.getNewStatsAvailableFlag() && _timeGapStatsForDesiredReduction.isWindowFilled()) {
int calculatedJitterBufferFrames = ceilf((float)_timeGapStatsForDesiredReduction.getWindowMax() / (float)BUFFER_SEND_INTERVAL_USECS);
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
}
_timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag();
}
}
}
_lastFrameReceivedTime = now;
_lastPacketReceivedTime = now;
}
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
// calculate how many silent frames we should drop.
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
int numSilentFramesToDrop = 0;
if (numSilentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
// our avg jitter buffer size exceeds its desired value, so ignore some silent
// frames to get that size as close to desired as possible
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
int numSilentFramesReceived = numSilentSamples / samplesPerFrame;
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
// without waiting for _framesAvailableStat to fill up to 10s of samples.
_currentJitterBufferFrames -= numSilentFramesToDrop;
_silentFramesDropped += numSilentFramesToDrop;
_framesAvailableStat.reset();
int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) {
if (_repetitionWithFade) {
return writeLastFrameRepeatedWithFade(networkSamples);
}
return _ringBuffer.addSilentFrame(numSilentSamples - numSilentFramesToDrop * samplesPerFrame);
return writeDroppableSilentSamples(networkSamples);
}
int InboundAudioStream::writeSamplesForDroppedPackets(int numSamples) {
return writeDroppableSilentSamples(numSamples);
int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) {
AudioRingBuffer::ConstIterator frameToRepeat = _ringBuffer.lastFrameWritten();
int frameSize = _ringBuffer.getNumFrameSamples();
int samplesToWrite = samples;
int indexOfRepeat = 0;
do {
int samplesToWriteThisIteration = std::min(samplesToWrite, frameSize);
float fade = calculateRepeatedFrameFadeFactor(indexOfRepeat);
if (fade == 1.0f) {
samplesToWrite -= _ringBuffer.writeSamples(frameToRepeat, samplesToWriteThisIteration);
} else {
samplesToWrite -= _ringBuffer.writeSamplesWithFade(frameToRepeat, samplesToWriteThisIteration, fade);
}
indexOfRepeat++;
} while (samplesToWrite > 0);
return samples;
}
AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
AudioStreamStats streamStats;
streamStats._timeGapMin = _interframeTimeGapStatsForStatsPacket.getMin();
streamStats._timeGapMax = _interframeTimeGapStatsForStatsPacket.getMax();
streamStats._timeGapAverage = _interframeTimeGapStatsForStatsPacket.getAverage();
streamStats._timeGapWindowMin = _interframeTimeGapStatsForStatsPacket.getWindowMin();
streamStats._timeGapWindowMax = _interframeTimeGapStatsForStatsPacket.getWindowMax();
streamStats._timeGapWindowAverage = _interframeTimeGapStatsForStatsPacket.getWindowAverage();
streamStats._timeGapMin = _timeGapStatsForStatsPacket.getMin();
streamStats._timeGapMax = _timeGapStatsForStatsPacket.getMax();
streamStats._timeGapAverage = _timeGapStatsForStatsPacket.getAverage();
streamStats._timeGapWindowMin = _timeGapStatsForStatsPacket.getWindowMin();
streamStats._timeGapWindowMax = _timeGapStatsForStatsPacket.getWindowMax();
streamStats._timeGapWindowAverage = _timeGapStatsForStatsPacket.getWindowAverage();
streamStats._framesAvailable = _ringBuffer.framesAvailable();
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
@ -341,7 +468,24 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
return streamStats;
}
AudioStreamStats InboundAudioStream::updateSeqHistoryAndGetAudioStreamStats() {
_incomingSequenceNumberStats.pushStatsToHistory();
return getAudioStreamStats();
float calculateRepeatedFrameFadeFactor(int indexOfRepeat) {
// fade factor scheme is from this paper:
// http://inst.eecs.berkeley.edu/~ee290t/sp04/lectures/packet_loss_recov_paper11.pdf
const float INITIAL_MSECS_NO_FADE = 20.0f;
const float MSECS_FADE_TO_ZERO = 320.0f;
const float INITIAL_FRAMES_NO_FADE = INITIAL_MSECS_NO_FADE * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
const float FRAMES_FADE_TO_ZERO = MSECS_FADE_TO_ZERO * (float)USECS_PER_MSEC / (float)BUFFER_SEND_INTERVAL_USECS;
const float SAMPLE_RANGE = std::numeric_limits<int16_t>::max();
if (indexOfRepeat <= INITIAL_FRAMES_NO_FADE) {
return 1.0f;
} else if (indexOfRepeat <= INITIAL_FRAMES_NO_FADE + FRAMES_FADE_TO_ZERO) {
return pow(SAMPLE_RANGE, -(indexOfRepeat - INITIAL_FRAMES_NO_FADE) / FRAMES_FADE_TO_ZERO);
//return 1.0f - ((indexOfRepeat - INITIAL_FRAMES_NO_FADE) / FRAMES_FADE_TO_ZERO);
}
return 0.0f;
}

View file

@ -22,43 +22,84 @@
#include "TimeWeightedAvg.h"
// This adds some number of frames to the desired jitter buffer frames target we use when we're dropping frames.
// The larger this value is, the less aggressive we are about reducing the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames long when dropping frames,
// The larger this value is, the less frames we drop when attempting to reduce the jitter buffer length.
// Setting this to 0 will try to get the jitter buffer to be exactly _desiredJitterBufferFrames when dropping frames,
// which could lead to a starve soon after.
const int DESIRED_JITTER_BUFFER_FRAMES_PADDING = 1;
// the time gaps stats for _desiredJitterBufferFrames calculation
// will recalculate the max for the past 5000 samples every 500 samples
const int TIME_GAPS_FOR_JITTER_CALC_INTERVAL_SAMPLES = 500;
const int TIME_GAPS_FOR_JITTER_CALC_WINDOW_INTERVALS = 10;
// the time gap stats for constructing AudioStreamStats will
// recalculate min/max/avg every ~1 second for the past ~30 seconds of time gap data
const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
// this controls the length of the window for stats used in the stats packet (not the stats used in
// _desiredJitterBufferFrames calculation)
const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
// this controls the window size of the time-weighted avg of frames available. Every time the window fills up,
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 2 * USECS_PER_SECOND;
// the internal history buffer of the incoming seq stats will cover 30s to calculate
// packet loss % over last 30s
const int INCOMING_SEQ_STATS_HISTORY_LENGTH_SECONDS = 30;
const int INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
const int FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
// default values for members of the Settings struct
const int DEFAULT_MAX_FRAMES_OVER_DESIRED = 10;
const int DEFAULT_DESIRED_JITTER_BUFFER_FRAMES = 1;
const bool DEFAULT_DYNAMIC_JITTER_BUFFERS = true;
const int DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES = 1;
const bool DEFAULT_USE_STDEV_FOR_JITTER_CALC = false;
const int DEFAULT_WINDOW_STARVE_THRESHOLD = 3;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
const bool DEFAULT_REPETITION_WITH_FADE = true;
class InboundAudioStream : public NodeData {
Q_OBJECT
public:
InboundAudioStream(int numFrameSamples, int numFramesCapacity,
bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired,
bool useStDevForJitterCalc = false);
class Settings {
public:
Settings()
: _maxFramesOverDesired(DEFAULT_MAX_FRAMES_OVER_DESIRED),
_dynamicJitterBuffers(DEFAULT_DYNAMIC_JITTER_BUFFERS),
_staticDesiredJitterBufferFrames(DEFAULT_STATIC_DESIRED_JITTER_BUFFER_FRAMES),
_useStDevForJitterCalc(DEFAULT_USE_STDEV_FOR_JITTER_CALC),
_windowStarveThreshold(DEFAULT_WINDOW_STARVE_THRESHOLD),
_windowSecondsForDesiredCalcOnTooManyStarves(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES),
_windowSecondsForDesiredReduction(DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION),
_repetitionWithFade(DEFAULT_REPETITION_WITH_FADE)
{}
Settings(int maxFramesOverDesired, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
bool useStDevForJitterCalc, int windowStarveThreshold, int windowSecondsForDesiredCalcOnTooManyStarves,
int _windowSecondsForDesiredReduction, bool repetitionWithFade)
: _maxFramesOverDesired(maxFramesOverDesired),
_dynamicJitterBuffers(dynamicJitterBuffers),
_staticDesiredJitterBufferFrames(staticDesiredJitterBufferFrames),
_useStDevForJitterCalc(useStDevForJitterCalc),
_windowStarveThreshold(windowStarveThreshold),
_windowSecondsForDesiredCalcOnTooManyStarves(windowSecondsForDesiredCalcOnTooManyStarves),
_windowSecondsForDesiredReduction(windowSecondsForDesiredCalcOnTooManyStarves),
_repetitionWithFade(repetitionWithFade)
{}
// max number of frames over desired in the ringbuffer.
int _maxFramesOverDesired;
// if false, _desiredJitterBufferFrames will always be _staticDesiredJitterBufferFrames. Otherwise,
// either fred or philip's method will be used to calculate _desiredJitterBufferFrames based on packet timegaps.
bool _dynamicJitterBuffers;
// settings for static jitter buffer mode
int _staticDesiredJitterBufferFrames;
// settings for dynamic jitter buffer mode
bool _useStDevForJitterCalc; // if true, philip's method is used. otherwise, fred's method is used.
int _windowStarveThreshold;
int _windowSecondsForDesiredCalcOnTooManyStarves;
int _windowSecondsForDesiredReduction;
// if true, the prev frame will be repeated (fading to silence) for dropped frames.
// otherwise, silence will be inserted.
bool _repetitionWithFade;
};
public:
InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings);
void reset();
void resetStats();
virtual void resetStats();
void clearBuffer();
virtual int parseData(const QByteArray& packet);
@ -72,14 +113,18 @@ public:
void setToStarved();
void setDynamicJitterBuffers(bool dynamicJitterBuffers);
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
void setSettings(const Settings& settings);
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
void setDynamicJitterBuffers(bool setDynamicJitterBuffers);
void setStaticDesiredJitterBufferFrames(int staticDesiredJitterBufferFrames);
void setUseStDevForJitterCalc(bool useStDevForJitterCalc) { _useStDevForJitterCalc = useStDevForJitterCalc; }
void setWindowStarveThreshold(int windowStarveThreshold) { _starveThreshold = windowStarveThreshold; }
void setWindowSecondsForDesiredCalcOnTooManyStarves(int windowSecondsForDesiredCalcOnTooManyStarves);
void setWindowSecondsForDesiredReduction(int windowSecondsForDesiredReduction);
void setRepetitionWithFade(bool repetitionWithFade) { _repetitionWithFade = repetitionWithFade; }
virtual AudioStreamStats getAudioStreamStats() const;
@ -110,11 +155,17 @@ public:
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
public slots:
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
/// is enabled, those stats are used to calculate _desiredJitterBufferFrames.
/// If the stats are not used and dynamic jitter buffers is disabled, it's not necessary to call this function.
void perSecondCallbackForUpdatingStats();
private:
void frameReceivedUpdateTimingStats();
void packetReceivedUpdateTimingStats();
int clampDesiredJitterBufferFramesValue(int desired) const;
int writeSamplesForDroppedPackets(int numSamples);
int writeSamplesForDroppedPackets(int networkSamples);
void popSamplesNoCheck(int samples);
void framesAvailableChanged();
@ -126,13 +177,19 @@ protected:
/// parses the info between the seq num and the audio data in the network packet and calculates
/// how many audio samples this packet contains (used when filling in samples for dropped packets).
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
/// default implementation assumes no stream properties and raw audio samples after stream propertiess
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& networkSamples);
/// parses the audio data in the network packet.
/// default implementation assumes packet contains raw audio samples after stream properties
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples);
int writeDroppableSilentSamples(int numSilentSamples);
/// writes silent samples to the buffer that may be dropped to reduce latency caused by the buffer
virtual int writeDroppableSilentSamples(int silentSamples);
/// writes the last written frame repeatedly, gradually fading to silence.
/// used for writing samples for dropped packets.
virtual int writeLastFrameRepeatedWithFade(int samples);
protected:
@ -147,8 +204,6 @@ protected:
// if jitter buffer is dynamic, this determines what method of calculating _desiredJitterBufferFrames
// if true, Philip's timegap std dev calculation is used. Otherwise, Freddy's max timegap calculation is used
bool _useStDevForJitterCalc;
int _calculatedJitterBufferFramesUsingMaxGap;
int _calculatedJitterBufferFramesUsingStDev;
int _desiredJitterBufferFrames;
@ -168,16 +223,28 @@ protected:
SequenceNumberStats _incomingSequenceNumberStats;
quint64 _lastFrameReceivedTime;
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForJitterCalc;
StDev _stdev;
MovingMinMaxAvg<quint64> _interframeTimeGapStatsForStatsPacket;
quint64 _lastPacketReceivedTime;
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredCalcOnTooManyStarves; // for Freddy's method
int _calculatedJitterBufferFramesUsingMaxGap;
StDev _stdevStatsForDesiredCalcOnTooManyStarves; // for Philip's method
int _calculatedJitterBufferFramesUsingStDev; // the most recent desired frames calculated by Philip's method
MovingMinMaxAvg<quint64> _timeGapStatsForDesiredReduction;
int _starveHistoryWindowSeconds;
RingBufferHistory<quint64> _starveHistory;
int _starveThreshold;
TimeWeightedAvg<int> _framesAvailableStat;
// this value is based on the time-weighted avg from _framesAvailableStat. it is only used for
// this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for
// dropping silent frames right now.
int _currentJitterBufferFrames;
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
bool _repetitionWithFade;
};
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);
#endif // hifi_InboundAudioStream_h

View file

@ -19,8 +19,8 @@
#include "InjectedAudioStream.h"
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
PositionalAudioStream(PositionalAudioStream::Injector, false, dynamicJitterBuffer, staticDesiredJitterBufferFrames, maxFramesOverDesired),
InjectedAudioStream::InjectedAudioStream(const QUuid& streamIdentifier, const InboundAudioStream::Settings& settings) :
PositionalAudioStream(PositionalAudioStream::Injector, false, settings),
_streamIdentifier(streamIdentifier),
_radius(0.0f),
_attenuationRatio(0)

View file

@ -18,7 +18,7 @@
class InjectedAudioStream : public PositionalAudioStream {
public:
InjectedAudioStream(const QUuid& streamIdentifier, bool dynamicJitterBuffer, int staticDesiredJitterBufferFrames, int maxFramesOverDesired);
InjectedAudioStream(const QUuid& streamIdentifier, const InboundAudioStream::Settings& settings);
float getRadius() const { return _radius; }
float getAttenuationRatio() const { return _attenuationRatio; }

View file

@ -11,13 +11,7 @@
#include "MixedAudioStream.h"
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return 0;
}

View file

@ -17,12 +17,9 @@
class MixedAudioStream : public InboundAudioStream {
public:
MixedAudioStream(int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
MixedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
};
#endif // hifi_MixedAudioStream_h

View file

@ -11,35 +11,53 @@
#include "MixedProcessedAudioStream.h"
MixedProcessedAudioStream ::MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
static const int STEREO_FACTOR = 2;
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings)
: InboundAudioStream(numFrameSamples, numFramesCapacity, settings)
{
}
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
int deviceOutputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormatChannelsTimesSampleRate / SAMPLE_RATE;
int deviceOutputFrameSize = networkToDeviceSamples(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
}
int MixedProcessedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
// mixed audio packets do not have any info between the seq num and the audio data.
int numNetworkSamples = packetAfterSeqNum.size() / sizeof(int16_t);
int MixedProcessedAudioStream::writeDroppableSilentSamples(int silentSamples) {
int deviceSilentSamplesWritten = InboundAudioStream::writeDroppableSilentSamples(networkToDeviceSamples(silentSamples));
emit addedSilence(deviceToNetworkSamples(deviceSilentSamplesWritten) / STEREO_FACTOR);
// since numAudioSamples is used to know how many samples to add for each dropped packet before this one,
// we want to set it to the number of device audio samples since this stream contains device audio samples, not network samples.
const int STEREO_DIVIDER = 2;
numAudioSamples = numNetworkSamples * _outputFormatChannelsTimesSampleRate / (STEREO_DIVIDER * SAMPLE_RATE);
return 0;
return deviceSilentSamplesWritten;
}
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int samples) {
int deviceSamplesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(networkToDeviceSamples(samples));
emit addedLastFrameRepeatedWithFade(deviceToNetworkSamples(deviceSamplesWritten) / STEREO_FACTOR);
return deviceSamplesWritten;
}
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples) {
emit addedStereoSamples(packetAfterStreamProperties);
QByteArray outputBuffer;
emit processSamples(packetAfterStreamProperties, outputBuffer);
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
return packetAfterStreamProperties.size();
}
int MixedProcessedAudioStream::networkToDeviceSamples(int networkSamples) {
return (quint64)networkSamples * (quint64)_outputFormatChannelsTimesSampleRate / (quint64)(STEREO_FACTOR * SAMPLE_RATE);
}
int MixedProcessedAudioStream::deviceToNetworkSamples(int deviceSamples) {
return (quint64)deviceSamples * (quint64)(STEREO_FACTOR * SAMPLE_RATE) / (quint64)_outputFormatChannelsTimesSampleRate;
}

View file

@ -14,21 +14,32 @@
#include "InboundAudioStream.h"
class Audio;
class MixedProcessedAudioStream : public InboundAudioStream {
Q_OBJECT
public:
MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, const InboundAudioStream::Settings& settings);
signals:
void addedSilence(int silentSamplesPerChannel);
void addedLastFrameRepeatedWithFade(int samplesPerChannel);
void addedStereoSamples(const QByteArray& samples);
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
public:
void outputFormatChanged(int outputFormatChannelCountTimesSampleRate);
protected:
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
int writeDroppableSilentSamples(int silentSamples);
int writeLastFrameRepeatedWithFade(int samples);
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int networkSamples);
private:
int networkToDeviceSamples(int networkSamples);
int deviceToNetworkSamples(int deviceSamples);
private:
int _outputFormatChannelsTimesSampleRate;

View file

@ -21,32 +21,41 @@
#include <PacketHeaders.h>
#include <UUID.h>
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers,
int staticDesiredJitterBufferFrames, int maxFramesOverDesired) :
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings) :
InboundAudioStream(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired),
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, settings),
_type(type),
_position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
_shouldLoopbackForNode(false),
_isStereo(isStereo),
_lastPopOutputTrailingLoudness(0.0f),
_lastPopOutputLoudness(0.0f),
_listenerUnattenuatedZone(NULL)
{
// constant defined in AudioMixer.h. However, we don't want to include this here
// we will soon find a better common home for these audio-related constants
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
_filter.initialize(SAMPLE_RATE, (NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (SAMPLE_PHASE_DELAY_AT_90 * 2)) / 2);
}
void PositionalAudioStream::updateLastPopOutputTrailingLoudness() {
float lastPopLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
void PositionalAudioStream::resetStats() {
_lastPopOutputTrailingLoudness = 0.0f;
_lastPopOutputLoudness = 0.0f;
}
void PositionalAudioStream::updateLastPopOutputLoudnessAndTrailingLoudness() {
_lastPopOutputLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
const int TRAILING_AVERAGE_FRAMES = 100;
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
const float LOUDNESS_EPSILON = 0.000001f;
if (lastPopLoudness >= _lastPopOutputTrailingLoudness) {
_lastPopOutputTrailingLoudness = lastPopLoudness;
if (_lastPopOutputLoudness >= _lastPopOutputTrailingLoudness) {
_lastPopOutputTrailingLoudness = _lastPopOutputLoudness;
} else {
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * lastPopLoudness);
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * _lastPopOutputLoudness);
if (_lastPopOutputTrailingLoudness < LOUDNESS_EPSILON) {
_lastPopOutputTrailingLoudness = 0;

View file

@ -16,6 +16,8 @@
#include <AABox.h>
#include "InboundAudioStream.h"
#include "AudioFilter.h"
#include "AudioFilterBank.h"
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
@ -27,13 +29,15 @@ public:
Injector
};
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames,
int maxFramesOverDesired);
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, const InboundAudioStream::Settings& settings);
virtual void resetStats();
virtual AudioStreamStats getAudioStreamStats() const;
void updateLastPopOutputTrailingLoudness();
void updateLastPopOutputLoudnessAndTrailingLoudness();
float getLastPopOutputTrailingLoudness() const { return _lastPopOutputTrailingLoudness; }
float getLastPopOutputLoudness() const { return _lastPopOutputLoudness; }
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
bool isStereo() const { return _isStereo; }
@ -44,6 +48,8 @@ public:
void setListenerUnattenuatedZone(AABox* listenerUnattenuatedZone) { _listenerUnattenuatedZone = listenerUnattenuatedZone; }
AudioFilterHSF1s& getFilter() { return _filter; }
protected:
// disallow copying of PositionalAudioStream objects
PositionalAudioStream(const PositionalAudioStream&);
@ -60,7 +66,10 @@ protected:
bool _isStereo;
float _lastPopOutputTrailingLoudness;
float _lastPopOutputLoudness;
AABox* _listenerUnattenuatedZone;
AudioFilterHSF1s _filter;
};
#endif // hifi_PositionalAudioStream_h

View file

@ -82,6 +82,17 @@ Sound::Sound(const QUrl& sampleURL, QObject* parent) :
connect(soundDownload, SIGNAL(error(QNetworkReply::NetworkError)), this, SLOT(replyError(QNetworkReply::NetworkError)));
}
Sound::Sound(const QByteArray byteArray, QObject* parent) :
QObject(parent),
_byteArray(byteArray),
_hasDownloaded(true)
{
}
void Sound::append(const QByteArray byteArray) {
_byteArray.append(byteArray);
}
void Sound::replyFinished() {
QNetworkReply* reply = reinterpret_cast<QNetworkReply*>(sender());

View file

@ -22,6 +22,8 @@ class Sound : public QObject {
public:
Sound(const QUrl& sampleURL, QObject* parent = NULL);
Sound(float volume, float frequency, float duration, float decay, QObject* parent = NULL);
Sound(const QByteArray byteArray, QObject* parent = NULL);
void append(const QByteArray byteArray);
bool hasDownloaded() const { return _hasDownloaded; }

View file

@ -135,9 +135,9 @@ QByteArray AvatarData::toByteArray() {
// lazily allocate memory for HeadData in case we're not an Avatar instance
if (!_headData) {
_headData = new HeadData(this);
if (_forceFaceshiftConnected) {
_headData->_isFaceshiftConnected = true;
}
}
if (_forceFaceshiftConnected) {
_headData->_isFaceshiftConnected = true;
}
QByteArray avatarDataByteArray;
@ -153,7 +153,7 @@ QByteArray AvatarData::toByteArray() {
destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _bodyYaw);
destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _bodyPitch);
destinationBuffer += packFloatAngleToTwoByte(destinationBuffer, _bodyRoll);
// Body scale
destinationBuffer += packFloatRatioToTwoByte(destinationBuffer, _targetScale);
@ -585,6 +585,101 @@ bool AvatarData::hasReferential() {
return _referential != NULL;
}
bool AvatarData::isPlaying() {
if (!_player) {
return false;
}
if (QThread::currentThread() != thread()) {
bool result;
QMetaObject::invokeMethod(this, "isPlaying", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(bool, result));
return result;
}
return _player && _player->isPlaying();
}
qint64 AvatarData::playerElapsed() {
if (!_player) {
return 0;
}
if (QThread::currentThread() != thread()) {
qint64 result;
QMetaObject::invokeMethod(this, "playerElapsed", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(qint64, result));
return result;
}
return _player->elapsed();
}
qint64 AvatarData::playerLength() {
if (!_player) {
return 0;
}
if (QThread::currentThread() != thread()) {
qint64 result;
QMetaObject::invokeMethod(this, "playerLength", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(qint64, result));
return result;
}
return _player->getRecording()->getLength();
}
void AvatarData::loadRecording(QString filename) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "loadRecording", Qt::BlockingQueuedConnection,
Q_ARG(QString, filename));
return;
}
if (!_player) {
_player = PlayerPointer(new Player(this));
}
_player->loadFromFile(filename);
}
void AvatarData::startPlaying() {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "startPlaying", Qt::BlockingQueuedConnection);
return;
}
if (!_player) {
_player = PlayerPointer(new Player(this));
}
_player->startPlaying();
}
void AvatarData::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
_player->setPlayFromCurrentLocation(playFromCurrentLocation);
}
void AvatarData::setPlayerLoop(bool loop) {
_player->setLoop(loop);
}
void AvatarData::play() {
if (isPlaying()) {
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "play", Qt::BlockingQueuedConnection);
return;
}
_player->play();
}
}
void AvatarData::stopPlaying() {
if (!_player) {
return;
}
if (QThread::currentThread() != thread()) {
QMetaObject::invokeMethod(this, "stopPlaying", Qt::BlockingQueuedConnection);
return;
}
if (_player) {
_player->stopPlaying();
}
}
void AvatarData::changeReferential(Referential *ref) {
delete _referential;
_referential = ref;
@ -683,6 +778,44 @@ glm::quat AvatarData::getJointRotation(const QString& name) const {
return getJointRotation(getJointIndex(name));
}
QVector<glm::quat> AvatarData::getJointRotations() const {
if (QThread::currentThread() != thread()) {
QVector<glm::quat> result;
QMetaObject::invokeMethod(const_cast<AvatarData*>(this),
"getJointRotations", Qt::BlockingQueuedConnection,
Q_RETURN_ARG(QVector<glm::quat>, result));
return result;
}
QVector<glm::quat> jointRotations(_jointData.size());
for (int i = 0; i < _jointData.size(); ++i) {
jointRotations[i] = _jointData[i].rotation;
}
return jointRotations;
}
void AvatarData::setJointRotations(QVector<glm::quat> jointRotations) {
if (QThread::currentThread() != thread()) {
QVector<glm::quat> result;
QMetaObject::invokeMethod(const_cast<AvatarData*>(this),
"setJointRotations", Qt::BlockingQueuedConnection,
Q_ARG(QVector<glm::quat>, jointRotations));
}
if (_jointData.size() < jointRotations.size()) {
_jointData.resize(jointRotations.size());
}
for (int i = 0; i < jointRotations.size(); ++i) {
if (i < _jointData.size()) {
setJointData(i, jointRotations[i]);
}
}
}
void AvatarData::clearJointsData() {
for (int i = 0; i < _jointData.size(); ++i) {
clearJointData(i);
}
}
bool AvatarData::hasIdentityChangedAfterParsing(const QByteArray &packet) {
QDataStream packetStream(packet);
packetStream.skipRawData(numBytesForPacketHeader(packet));

View file

@ -49,6 +49,7 @@ typedef unsigned long long quint64;
#include <Node.h>
#include "Recorder.h"
#include "Referential.h"
#include "HeadData.h"
#include "HandData.h"
@ -210,7 +211,12 @@ public:
Q_INVOKABLE void clearJointData(const QString& name);
Q_INVOKABLE bool isJointDataValid(const QString& name) const;
Q_INVOKABLE glm::quat getJointRotation(const QString& name) const;
Q_INVOKABLE virtual QVector<glm::quat> getJointRotations() const;
Q_INVOKABLE virtual void setJointRotations(QVector<glm::quat> jointRotations);
Q_INVOKABLE virtual void clearJointsData();
/// Returns the index of the joint with the specified name, or -1 if not found/unknown.
Q_INVOKABLE virtual int getJointIndex(const QString& name) const { return _jointIndices.value(name) - 1; }
@ -293,6 +299,16 @@ public slots:
void setSessionUUID(const QUuid& sessionUUID) { _sessionUUID = sessionUUID; }
bool hasReferential();
bool isPlaying();
qint64 playerElapsed();
qint64 playerLength();
void loadRecording(QString filename);
void startPlaying();
void setPlayFromCurrentLocation(bool playFromCurrentLocation);
void setPlayerLoop(bool loop);
void play();
void stopPlaying();
protected:
QUuid _sessionUUID;
glm::vec3 _position;
@ -346,6 +362,8 @@ protected:
QWeakPointer<Node> _owningAvatarMixer;
QElapsedTimer _lastUpdateTimer;
PlayerPointer _player;
/// Loads the joint indices, names from the FST file (if any)
virtual void updateJointMappings();
void changeReferential(Referential* ref);

View file

@ -41,6 +41,10 @@ public:
void setBasePitch(float pitch) { _basePitch = glm::clamp(pitch, MIN_HEAD_PITCH, MAX_HEAD_PITCH); }
float getBaseRoll() const { return _baseRoll; }
void setBaseRoll(float roll) { _baseRoll = glm::clamp(roll, MIN_HEAD_ROLL, MAX_HEAD_ROLL); }
virtual void setFinalYaw(float finalYaw) { _baseYaw = finalYaw; }
virtual void setFinalPitch(float finalPitch) { _basePitch = finalPitch; }
virtual void setFinalRoll(float finalRoll) { _baseRoll = finalRoll; }
virtual float getFinalYaw() const { return _baseYaw; }
virtual float getFinalPitch() const { return _basePitch; }
virtual float getFinalRoll() const { return _baseRoll; }
@ -56,6 +60,7 @@ public:
void setBlendshape(QString name, float val);
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
void setBlendshapeCoefficients(const QVector<float>& blendshapeCoefficients) { _blendshapeCoefficients = blendshapeCoefficients; }
float getPupilDilation() const { return _pupilDilation; }
void setPupilDilation(float pupilDilation) { _pupilDilation = pupilDilation; }
@ -68,6 +73,15 @@ public:
const glm::vec3& getLookAtPosition() const { return _lookAtPosition; }
void setLookAtPosition(const glm::vec3& lookAtPosition) { _lookAtPosition = lookAtPosition; }
float getLeanSideways() const { return _leanSideways; }
float getLeanForward() const { return _leanForward; }
virtual float getFinalLeanSideways() const { return _leanSideways; }
virtual float getFinalLeanForward() const { return _leanForward; }
void setLeanSideways(float leanSideways) { _leanSideways = leanSideways; }
void setLeanForward(float leanForward) { _leanForward = leanForward; }
friend class AvatarData;
protected:

View file

@ -0,0 +1,610 @@
//
// Recorder.cpp
//
//
// Created by Clement on 8/7/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <GLMHelpers.h>
#include <QFile>
#include <QMetaObject>
#include <QObject>
#include "AvatarData.h"
#include "Recorder.h"
void RecordingFrame::setBlendshapeCoefficients(QVector<float> blendshapeCoefficients) {
_blendshapeCoefficients = blendshapeCoefficients;
}
void RecordingFrame::setJointRotations(QVector<glm::quat> jointRotations) {
_jointRotations = jointRotations;
}
void RecordingFrame::setTranslation(glm::vec3 translation) {
_translation = translation;
}
void RecordingFrame::setRotation(glm::quat rotation) {
_rotation = rotation;
}
void RecordingFrame::setScale(float scale) {
_scale = scale;
}
void RecordingFrame::setHeadRotation(glm::quat headRotation) {
_headRotation = headRotation;
}
void RecordingFrame::setLeanSideways(float leanSideways) {
_leanSideways = leanSideways;
}
void RecordingFrame::setLeanForward(float leanForward) {
_leanForward = leanForward;
}
Recording::Recording() : _audio(NULL) {
}
Recording::~Recording() {
delete _audio;
}
void Recording::addFrame(int timestamp, RecordingFrame &frame) {
_timestamps << timestamp;
_frames << frame;
}
void Recording::addAudioPacket(QByteArray byteArray) {
if (!_audio) {
_audio = new Sound(byteArray);
}
_audio->append(byteArray);
}
void Recording::clear() {
_timestamps.clear();
_frames.clear();
delete _audio;
_audio = NULL;
}
Recorder::Recorder(AvatarData* avatar) :
_recording(new Recording()),
_avatar(avatar)
{
_timer.invalidate();
}
bool Recorder::isRecording() const {
return _timer.isValid();
}
qint64 Recorder::elapsed() const {
if (isRecording()) {
return _timer.elapsed();
} else {
return 0;
}
}
void Recorder::startRecording() {
qDebug() << "Recorder::startRecording()";
_recording->clear();
_timer.start();
RecordingFrame frame;
frame.setBlendshapeCoefficients(_avatar->getHeadData()->getBlendshapeCoefficients());
frame.setJointRotations(_avatar->getJointRotations());
frame.setTranslation(_avatar->getPosition());
frame.setRotation(_avatar->getOrientation());
frame.setScale(_avatar->getTargetScale());
const HeadData* head = _avatar->getHeadData();
glm::quat rotation = glm::quat(glm::radians(glm::vec3(head->getFinalPitch(),
head->getFinalYaw(),
head->getFinalRoll())));
frame.setHeadRotation(rotation);
frame.setLeanForward(_avatar->getHeadData()->getLeanForward());
frame.setLeanSideways(_avatar->getHeadData()->getLeanSideways());
_recording->addFrame(0, frame);
}
void Recorder::stopRecording() {
qDebug() << "Recorder::stopRecording()";
_timer.invalidate();
qDebug().nospace() << "Recorded " << _recording->getFrameNumber() << " during " << _recording->getLength() << " msec (" << _recording->getFrameNumber() / (_recording->getLength() / 1000.0f) << " fps)";
}
void Recorder::saveToFile(QString file) {
if (_recording->isEmpty()) {
qDebug() << "Cannot save recording to file, recording is empty.";
}
writeRecordingToFile(_recording, file);
}
void Recorder::record() {
if (isRecording()) {
const RecordingFrame& referenceFrame = _recording->getFrame(0);
RecordingFrame frame;
frame.setBlendshapeCoefficients(_avatar->getHeadData()->getBlendshapeCoefficients());
frame.setJointRotations(_avatar->getJointRotations());
frame.setTranslation(_avatar->getPosition() - referenceFrame.getTranslation());
frame.setRotation(glm::inverse(referenceFrame.getRotation()) * _avatar->getOrientation());
frame.setScale(_avatar->getTargetScale() / referenceFrame.getScale());
const HeadData* head = _avatar->getHeadData();
glm::quat rotation = glm::quat(glm::radians(glm::vec3(head->getFinalPitch(),
head->getFinalYaw(),
head->getFinalRoll())));
frame.setHeadRotation(rotation);
frame.setLeanForward(_avatar->getHeadData()->getLeanForward());
frame.setLeanSideways(_avatar->getHeadData()->getLeanSideways());
_recording->addFrame(_timer.elapsed(), frame);
}
}
void Recorder::record(char* samples, int size) {
QByteArray byteArray(samples, size);
_recording->addAudioPacket(byteArray);
}
Player::Player(AvatarData* avatar) :
_recording(new Recording()),
_avatar(avatar),
_audioThread(NULL),
_startingScale(1.0f),
_playFromCurrentPosition(true),
_loop(false)
{
_timer.invalidate();
_options.setLoop(false);
_options.setVolume(1.0f);
}
bool Player::isPlaying() const {
return _timer.isValid();
}
qint64 Player::elapsed() const {
if (isPlaying()) {
return _timer.elapsed();
} else {
return 0;
}
}
glm::quat Player::getHeadRotation() {
if (!computeCurrentFrame()) {
qWarning() << "Incorrect use of Player::getHeadRotation()";
return glm::quat();
}
if (_currentFrame == 0) {
return _recording->getFrame(_currentFrame).getHeadRotation();
}
return _recording->getFrame(0).getHeadRotation() *
_recording->getFrame(_currentFrame).getHeadRotation();
}
float Player::getLeanSideways() {
if (!computeCurrentFrame()) {
qWarning() << "Incorrect use of Player::getLeanSideways()";
return 0.0f;
}
return _recording->getFrame(_currentFrame).getLeanSideways();
}
float Player::getLeanForward() {
if (!computeCurrentFrame()) {
qWarning() << "Incorrect use of Player::getLeanForward()";
return 0.0f;
}
return _recording->getFrame(_currentFrame).getLeanForward();
}
void Player::startPlaying() {
if (_recording && _recording->getFrameNumber() > 0) {
qDebug() << "Recorder::startPlaying()";
_currentFrame = 0;
// Setup audio thread
_audioThread = new QThread();
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_injector.reset(new AudioInjector(_recording->getAudio(), _options), &QObject::deleteLater);
_injector->moveToThread(_audioThread);
_audioThread->start();
QMetaObject::invokeMethod(_injector.data(), "injectAudio", Qt::QueuedConnection);
// Fake faceshift connection
_avatar->setForceFaceshiftConnected(true);
if (_playFromCurrentPosition) {
_startingPosition = _avatar->getPosition();
_startingRotation = _avatar->getOrientation();
_startingScale = _avatar->getTargetScale();
} else {
_startingPosition = _recording->getFrame(0).getTranslation();
_startingRotation = _recording->getFrame(0).getRotation();
_startingScale = _recording->getFrame(0).getScale();
}
_timer.start();
}
}
void Player::stopPlaying() {
if (!isPlaying()) {
return;
}
_timer.invalidate();
_avatar->clearJointsData();
// Cleanup audio thread
_injector->stop();
QObject::connect(_injector.data(), &AudioInjector::finished,
_injector.data(), &AudioInjector::deleteLater);
QObject::connect(_injector.data(), &AudioInjector::destroyed,
_audioThread, &QThread::quit);
QObject::connect(_audioThread, &QThread::finished,
_audioThread, &QThread::deleteLater);
_injector.clear();
_audioThread = NULL;
// Turn off fake faceshift connection
_avatar->setForceFaceshiftConnected(false);
qDebug() << "Recorder::stopPlaying()";
}
void Player::loadFromFile(QString file) {
if (_recording) {
_recording->clear();
} else {
_recording = RecordingPointer(new Recording());
}
readRecordingFromFile(_recording, file);
}
void Player::loadRecording(RecordingPointer recording) {
_recording = recording;
}
void Player::play() {
computeCurrentFrame();
if (_currentFrame < 0 || (_currentFrame >= _recording->getFrameNumber() - 1)) {
// If it's the end of the recording, stop playing
stopPlaying();
if (_loop) {
startPlaying();
}
return;
}
if (_currentFrame == 0) {
// Don't play frame 0
// only meant to store absolute values
return;
}
_avatar->setPosition(_startingPosition +
glm::inverse(_recording->getFrame(0).getRotation()) * _startingRotation *
_recording->getFrame(_currentFrame).getTranslation());
_avatar->setOrientation(_startingRotation *
_recording->getFrame(_currentFrame).getRotation());
_avatar->setTargetScale(_startingScale *
_recording->getFrame(_currentFrame).getScale());
_avatar->setJointRotations(_recording->getFrame(_currentFrame).getJointRotations());
HeadData* head = const_cast<HeadData*>(_avatar->getHeadData());
if (head) {
head->setBlendshapeCoefficients(_recording->getFrame(_currentFrame).getBlendshapeCoefficients());
head->setLeanSideways(_recording->getFrame(_currentFrame).getLeanSideways());
head->setLeanForward(_recording->getFrame(_currentFrame).getLeanForward());
glm::vec3 eulers = glm::degrees(safeEulerAngles(_recording->getFrame(_currentFrame).getHeadRotation()));
head->setFinalPitch(eulers.x);
head->setFinalYaw(eulers.y);
head->setFinalRoll(eulers.z);
}
_options.setPosition(_avatar->getPosition());
_options.setOrientation(_avatar->getOrientation());
_injector->setOptions(_options);
}
void Player::setPlayFromCurrentLocation(bool playFromCurrentLocation) {
_playFromCurrentPosition = playFromCurrentLocation;
}
void Player::setLoop(bool loop) {
_loop = loop;
}
bool Player::computeCurrentFrame() {
if (!isPlaying()) {
_currentFrame = -1;
return false;
}
if (_currentFrame < 0) {
_currentFrame = 0;
}
while (_currentFrame < _recording->getFrameNumber() - 1 &&
_recording->getFrameTimestamp(_currentFrame) < _timer.elapsed()) {
++_currentFrame;
}
return true;
}
void writeRecordingToFile(RecordingPointer recording, QString filename) {
if (!recording || recording->getFrameNumber() < 1) {
qDebug() << "Can't save empty recording";
return;
}
qDebug() << "Writing recording to " << filename << ".";
QElapsedTimer timer;
QFile file(filename);
if (!file.open(QIODevice::WriteOnly)){
return;
}
timer.start();
QDataStream fileStream(&file);
fileStream << recording->_timestamps;
RecordingFrame& baseFrame = recording->_frames[0];
int totalLength = 0;
// Blendshape coefficients
fileStream << baseFrame._blendshapeCoefficients;
totalLength += baseFrame._blendshapeCoefficients.size();
// Joint Rotations
int jointRotationSize = baseFrame._jointRotations.size();
fileStream << jointRotationSize;
for (int i = 0; i < jointRotationSize; ++i) {
fileStream << baseFrame._jointRotations[i].x << baseFrame._jointRotations[i].y << baseFrame._jointRotations[i].z << baseFrame._jointRotations[i].w;
}
totalLength += jointRotationSize;
// Translation
fileStream << baseFrame._translation.x << baseFrame._translation.y << baseFrame._translation.z;
totalLength += 1;
// Rotation
fileStream << baseFrame._rotation.x << baseFrame._rotation.y << baseFrame._rotation.z << baseFrame._rotation.w;
totalLength += 1;
// Scale
fileStream << baseFrame._scale;
totalLength += 1;
// Head Rotation
fileStream << baseFrame._headRotation.x << baseFrame._headRotation.y << baseFrame._headRotation.z << baseFrame._headRotation.w;
totalLength += 1;
// Lean Sideways
fileStream << baseFrame._leanSideways;
totalLength += 1;
// Lean Forward
fileStream << baseFrame._leanForward;
totalLength += 1;
for (int i = 1; i < recording->_timestamps.size(); ++i) {
QBitArray mask(totalLength);
int maskIndex = 0;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::WriteOnly);
RecordingFrame& previousFrame = recording->_frames[i - 1];
RecordingFrame& frame = recording->_frames[i];
// Blendshape coefficients
for (int i = 0; i < frame._blendshapeCoefficients.size(); ++i) {
if (frame._blendshapeCoefficients[i] != previousFrame._blendshapeCoefficients[i]) {
stream << frame._blendshapeCoefficients[i];
mask.setBit(maskIndex);
}
maskIndex++;
}
// Joint Rotations
for (int i = 0; i < frame._jointRotations.size(); ++i) {
if (frame._jointRotations[i] != previousFrame._jointRotations[i]) {
stream << frame._jointRotations[i].x << frame._jointRotations[i].y << frame._jointRotations[i].z << frame._jointRotations[i].w;
mask.setBit(maskIndex);
}
maskIndex++;
}
// Translation
if (frame._translation != previousFrame._translation) {
stream << frame._translation.x << frame._translation.y << frame._translation.z;
mask.setBit(maskIndex);
}
maskIndex++;
// Rotation
if (frame._rotation != previousFrame._rotation) {
stream << frame._rotation.x << frame._rotation.y << frame._rotation.z << frame._rotation.w;
mask.setBit(maskIndex);
}
maskIndex++;
// Scale
if (frame._scale != previousFrame._scale) {
stream << frame._scale;
mask.setBit(maskIndex);
}
maskIndex++;
// Head Rotation
if (frame._headRotation != previousFrame._headRotation) {
stream << frame._headRotation.x << frame._headRotation.y << frame._headRotation.z << frame._headRotation.w;
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Sideways
if (frame._leanSideways != previousFrame._leanSideways) {
stream << frame._leanSideways;
mask.setBit(maskIndex);
}
maskIndex++;
// Lean Forward
if (frame._leanForward != previousFrame._leanForward) {
stream << frame._leanForward;
mask.setBit(maskIndex);
}
maskIndex++;
fileStream << mask;
fileStream << buffer;
}
fileStream << recording->_audio->getByteArray();
qDebug() << "Wrote " << file.size() << " bytes in " << timer.elapsed() << " ms.";
}
RecordingPointer readRecordingFromFile(RecordingPointer recording, QString filename) {
qDebug() << "Reading recording from " << filename << ".";
if (!recording) {
recording.reset(new Recording());
}
QElapsedTimer timer;
QFile file(filename);
if (!file.open(QIODevice::ReadOnly)){
return recording;
}
timer.start();
QDataStream fileStream(&file);
fileStream >> recording->_timestamps;
RecordingFrame baseFrame;
// Blendshape coefficients
fileStream >> baseFrame._blendshapeCoefficients;
// Joint Rotations
int jointRotationSize;
fileStream >> jointRotationSize;
baseFrame._jointRotations.resize(jointRotationSize);
for (int i = 0; i < jointRotationSize; ++i) {
fileStream >> baseFrame._jointRotations[i].x >> baseFrame._jointRotations[i].y >> baseFrame._jointRotations[i].z >> baseFrame._jointRotations[i].w;
}
fileStream >> baseFrame._translation.x >> baseFrame._translation.y >> baseFrame._translation.z;
fileStream >> baseFrame._rotation.x >> baseFrame._rotation.y >> baseFrame._rotation.z >> baseFrame._rotation.w;
fileStream >> baseFrame._scale;
fileStream >> baseFrame._headRotation.x >> baseFrame._headRotation.y >> baseFrame._headRotation.z >> baseFrame._headRotation.w;
fileStream >> baseFrame._leanSideways;
fileStream >> baseFrame._leanForward;
recording->_frames << baseFrame;
for (int i = 1; i < recording->_timestamps.size(); ++i) {
QBitArray mask;
QByteArray buffer;
QDataStream stream(&buffer, QIODevice::ReadOnly);
RecordingFrame frame;
RecordingFrame& previousFrame = recording->_frames.last();
fileStream >> mask;
fileStream >> buffer;
int maskIndex = 0;
// Blendshape Coefficients
frame._blendshapeCoefficients.resize(baseFrame._blendshapeCoefficients.size());
for (int i = 0; i < baseFrame._blendshapeCoefficients.size(); ++i) {
if (mask[maskIndex++]) {
stream >> frame._blendshapeCoefficients[i];
} else {
frame._blendshapeCoefficients[i] = previousFrame._blendshapeCoefficients[i];
}
}
// Joint Rotations
frame._jointRotations.resize(baseFrame._jointRotations.size());
for (int i = 0; i < baseFrame._jointRotations.size(); ++i) {
if (mask[maskIndex++]) {
stream >> frame._jointRotations[i].x >> frame._jointRotations[i].y >> frame._jointRotations[i].z >> frame._jointRotations[i].w;
} else {
frame._jointRotations[i] = previousFrame._jointRotations[i];
}
}
if (mask[maskIndex++]) {
stream >> frame._translation.x >> frame._translation.y >> frame._translation.z;
} else {
frame._translation = previousFrame._translation;
}
if (mask[maskIndex++]) {
stream >> frame._rotation.x >> frame._rotation.y >> frame._rotation.z >> frame._rotation.w;
} else {
frame._rotation = previousFrame._rotation;
}
if (mask[maskIndex++]) {
stream >> frame._scale;
} else {
frame._scale = previousFrame._scale;
}
if (mask[maskIndex++]) {
stream >> frame._headRotation.x >> frame._headRotation.y >> frame._headRotation.z >> frame._headRotation.w;
} else {
frame._headRotation = previousFrame._headRotation;
}
if (mask[maskIndex++]) {
stream >> frame._leanSideways;
} else {
frame._leanSideways = previousFrame._leanSideways;
}
if (mask[maskIndex++]) {
stream >> frame._leanForward;
} else {
frame._leanForward = previousFrame._leanForward;
}
recording->_frames << frame;
}
QByteArray audioArray;
fileStream >> audioArray;
recording->addAudioPacket(audioArray);
qDebug() << "Read " << file.size() << " bytes in " << timer.elapsed() << " ms.";
return recording;
}

Some files were not shown because too many files have changed in this diff Show more