mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 00:13:29 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into animate
Conflicts: interface/src/Application.cpp libraries/shared/src/SharedUtil.cpp libraries/shared/src/SharedUtil.h
This commit is contained in:
commit
21fa833f19
40 changed files with 2580 additions and 142 deletions
|
@ -66,18 +66,6 @@ DomainServer::DomainServer(int argc, char* argv[]) :
|
||||||
|
|
||||||
LimitedNodeList* nodeList = LimitedNodeList::getInstance();
|
LimitedNodeList* nodeList = LimitedNodeList::getInstance();
|
||||||
|
|
||||||
#if defined(IP_DONTFRAG) || defined(IP_MTU_DISCOVER)
|
|
||||||
qDebug() << "Making required DTLS changes to NodeList DTLS socket.";
|
|
||||||
|
|
||||||
int socketHandle = LimitedNodeList::getInstance()->getDTLSSocket().socketDescriptor();
|
|
||||||
#if defined(IP_DONTFRAG)
|
|
||||||
int optValue = 1;yea
|
|
||||||
setsockopt(socketHandle, IPPROTO_IP, IP_DONTFRAG, (const void*) optValue, sizeof(optValue));
|
|
||||||
#elif defined(IP_MTU_DISCOVER)
|
|
||||||
int optValue = 1;
|
|
||||||
setsockopt(socketHandle, IPPROTO_IP, IP_MTU_DISCOVER, (const void*) optValue, sizeof(optValue));
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
// connect our socket to read datagrams received on the DTLS socket
|
// connect our socket to read datagrams received on the DTLS socket
|
||||||
connect(&nodeList->getDTLSSocket(), &QUdpSocket::readyRead, this, &DomainServer::readAvailableDTLSDatagrams);
|
connect(&nodeList->getDTLSSocket(), &QUdpSocket::readyRead, this, &DomainServer::readAvailableDTLSDatagrams);
|
||||||
}
|
}
|
||||||
|
@ -311,8 +299,7 @@ const NodeSet STATICALLY_ASSIGNED_NODES = NodeSet() << NodeType::AudioMixer
|
||||||
<< NodeType::MetavoxelServer;
|
<< NodeType::MetavoxelServer;
|
||||||
|
|
||||||
|
|
||||||
void DomainServer::addNodeToNodeListAndConfirmConnection(const QByteArray& packet, const HifiSockAddr& senderSockAddr,
|
void DomainServer::addNodeToNodeListAndConfirmConnection(const QByteArray& packet, const HifiSockAddr& senderSockAddr) {
|
||||||
const QJsonObject& authJsonObject) {
|
|
||||||
|
|
||||||
NodeType_t nodeType;
|
NodeType_t nodeType;
|
||||||
HifiSockAddr publicSockAddr, localSockAddr;
|
HifiSockAddr publicSockAddr, localSockAddr;
|
||||||
|
@ -336,7 +323,8 @@ void DomainServer::addNodeToNodeListAndConfirmConnection(const QByteArray& packe
|
||||||
// create a new session UUID for this node
|
// create a new session UUID for this node
|
||||||
QUuid nodeUUID = QUuid::createUuid();
|
QUuid nodeUUID = QUuid::createUuid();
|
||||||
|
|
||||||
SharedNodePointer newNode = LimitedNodeList::getInstance()->addOrUpdateNode(nodeUUID, nodeType, publicSockAddr, localSockAddr);
|
SharedNodePointer newNode = LimitedNodeList::getInstance()->addOrUpdateNode(nodeUUID, nodeType,
|
||||||
|
publicSockAddr, localSockAddr);
|
||||||
|
|
||||||
// when the newNode is created the linked data is also created
|
// when the newNode is created the linked data is also created
|
||||||
// if this was a static assignment set the UUID, set the sendingSockAddr
|
// if this was a static assignment set the UUID, set the sendingSockAddr
|
||||||
|
@ -345,12 +333,6 @@ void DomainServer::addNodeToNodeListAndConfirmConnection(const QByteArray& packe
|
||||||
nodeData->setStaticAssignmentUUID(assignmentUUID);
|
nodeData->setStaticAssignmentUUID(assignmentUUID);
|
||||||
nodeData->setSendingSockAddr(senderSockAddr);
|
nodeData->setSendingSockAddr(senderSockAddr);
|
||||||
|
|
||||||
if (!authJsonObject.isEmpty()) {
|
|
||||||
// pull the connection secret from the authJsonObject and set it as the connection secret for this node
|
|
||||||
QUuid connectionSecret(authJsonObject["data"].toObject()["connection_secret"].toString());
|
|
||||||
newNode->setConnectionSecret(connectionSecret);
|
|
||||||
}
|
|
||||||
|
|
||||||
// reply back to the user with a PacketTypeDomainList
|
// reply back to the user with a PacketTypeDomainList
|
||||||
sendDomainListToNode(newNode, senderSockAddr, nodeInterestListFromPacket(packet, numPreInterestBytes));
|
sendDomainListToNode(newNode, senderSockAddr, nodeInterestListFromPacket(packet, numPreInterestBytes));
|
||||||
}
|
}
|
||||||
|
@ -361,18 +343,6 @@ int DomainServer::parseNodeDataFromByteArray(NodeType_t& nodeType, HifiSockAddr&
|
||||||
QDataStream packetStream(packet);
|
QDataStream packetStream(packet);
|
||||||
packetStream.skipRawData(numBytesForPacketHeader(packet));
|
packetStream.skipRawData(numBytesForPacketHeader(packet));
|
||||||
|
|
||||||
if (packetTypeForPacket(packet) == PacketTypeDomainConnectRequest) {
|
|
||||||
// we need to skip a quint8 that indicates if there is a registration token
|
|
||||||
// and potentially the registration token itself
|
|
||||||
quint8 hasRegistrationToken;
|
|
||||||
packetStream >> hasRegistrationToken;
|
|
||||||
|
|
||||||
if (hasRegistrationToken) {
|
|
||||||
QByteArray registrationToken;
|
|
||||||
packetStream >> registrationToken;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
packetStream >> nodeType;
|
packetStream >> nodeType;
|
||||||
packetStream >> publicSockAddr >> localSockAddr;
|
packetStream >> publicSockAddr >> localSockAddr;
|
||||||
|
|
||||||
|
@ -648,7 +618,11 @@ void DomainServer::processDatagram(const QByteArray& receivedPacket, const HifiS
|
||||||
if (nodeList->packetVersionAndHashMatch(receivedPacket)) {
|
if (nodeList->packetVersionAndHashMatch(receivedPacket)) {
|
||||||
PacketType requestType = packetTypeForPacket(receivedPacket);
|
PacketType requestType = packetTypeForPacket(receivedPacket);
|
||||||
|
|
||||||
if (requestType == PacketTypeDomainListRequest) {
|
if (requestType == PacketTypeDomainConnectRequest) {
|
||||||
|
// add this node to our NodeList
|
||||||
|
// and send back session UUID right away
|
||||||
|
addNodeToNodeListAndConfirmConnection(receivedPacket, senderSockAddr);
|
||||||
|
} else if (requestType == PacketTypeDomainListRequest) {
|
||||||
QUuid nodeUUID = uuidFromPacketHeader(receivedPacket);
|
QUuid nodeUUID = uuidFromPacketHeader(receivedPacket);
|
||||||
|
|
||||||
if (!nodeUUID.isNull() && nodeList->nodeWithUUID(nodeUUID)) {
|
if (!nodeUUID.isNull() && nodeList->nodeWithUUID(nodeUUID)) {
|
||||||
|
@ -665,12 +639,7 @@ void DomainServer::processDatagram(const QByteArray& receivedPacket, const HifiS
|
||||||
checkInNode->setLastHeardMicrostamp(timeNow);
|
checkInNode->setLastHeardMicrostamp(timeNow);
|
||||||
|
|
||||||
sendDomainListToNode(checkInNode, senderSockAddr, nodeInterestListFromPacket(receivedPacket, numNodeInfoBytes));
|
sendDomainListToNode(checkInNode, senderSockAddr, nodeInterestListFromPacket(receivedPacket, numNodeInfoBytes));
|
||||||
} else {
|
|
||||||
// new node - add this node to our NodeList
|
|
||||||
// and send back session UUID right away
|
|
||||||
addNodeToNodeListAndConfirmConnection(receivedPacket, senderSockAddr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (requestType == PacketTypeNodeJsonStats) {
|
} else if (requestType == PacketTypeNodeJsonStats) {
|
||||||
SharedNodePointer matchingNode = nodeList->sendingNodeForPacket(receivedPacket);
|
SharedNodePointer matchingNode = nodeList->sendingNodeForPacket(receivedPacket);
|
||||||
if (matchingNode) {
|
if (matchingNode) {
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
|
|
||||||
#include <Assignment.h>
|
#include <Assignment.h>
|
||||||
#include <HTTPManager.h>
|
#include <HTTPManager.h>
|
||||||
#include <NodeList.h>
|
#include <LimitedNodeList.h>
|
||||||
|
|
||||||
#include "DTLSServerSession.h"
|
#include "DTLSServerSession.h"
|
||||||
|
|
||||||
|
@ -57,8 +57,7 @@ private:
|
||||||
|
|
||||||
void processDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr);
|
void processDatagram(const QByteArray& receivedPacket, const HifiSockAddr& senderSockAddr);
|
||||||
|
|
||||||
void addNodeToNodeListAndConfirmConnection(const QByteArray& packet, const HifiSockAddr& senderSockAddr,
|
void addNodeToNodeListAndConfirmConnection(const QByteArray& packet, const HifiSockAddr& senderSockAddr);
|
||||||
const QJsonObject& authJsonObject = QJsonObject());
|
|
||||||
int parseNodeDataFromByteArray(NodeType_t& nodeType, HifiSockAddr& publicSockAddr,
|
int parseNodeDataFromByteArray(NodeType_t& nodeType, HifiSockAddr& publicSockAddr,
|
||||||
HifiSockAddr& localSockAddr, const QByteArray& packet, const HifiSockAddr& senderSockAddr);
|
HifiSockAddr& localSockAddr, const QByteArray& packet, const HifiSockAddr& senderSockAddr);
|
||||||
NodeSet nodeInterestListFromPacket(const QByteArray& packet, int numPreceedingBytes);
|
NodeSet nodeInterestListFromPacket(const QByteArray& packet, int numPreceedingBytes);
|
||||||
|
|
711
examples/audioReflectorTools.js
Normal file
711
examples/audioReflectorTools.js
Normal file
|
@ -0,0 +1,711 @@
|
||||||
|
//
|
||||||
|
// audioReflectorTools.js
|
||||||
|
// hifi
|
||||||
|
//
|
||||||
|
// Created by Brad Hefta-Gaub on 2/14/14.
|
||||||
|
// Copyright (c) 2014 HighFidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
// Tools for manipulating the attributes of the AudioReflector behavior
|
||||||
|
//
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
var delayScale = 100.0;
|
||||||
|
var fanoutScale = 10.0;
|
||||||
|
var speedScale = 20;
|
||||||
|
var factorScale = 5.0;
|
||||||
|
var localFactorScale = 1.0;
|
||||||
|
var reflectiveScale = 100.0;
|
||||||
|
var diffusionScale = 100.0;
|
||||||
|
var absorptionScale = 100.0;
|
||||||
|
var combFilterScale = 50.0;
|
||||||
|
|
||||||
|
// these three properties are bound together, if you change one, the others will also change
|
||||||
|
var reflectiveRatio = AudioReflector.getReflectiveRatio();
|
||||||
|
var diffusionRatio = AudioReflector.getDiffusionRatio();
|
||||||
|
var absorptionRatio = AudioReflector.getAbsorptionRatio();
|
||||||
|
|
||||||
|
var reflectiveThumbX;
|
||||||
|
var diffusionThumbX;
|
||||||
|
var absorptionThumbX;
|
||||||
|
|
||||||
|
function setReflectiveRatio(reflective) {
|
||||||
|
var total = diffusionRatio + absorptionRatio + (reflective / reflectiveScale);
|
||||||
|
diffusionRatio = diffusionRatio / total;
|
||||||
|
absorptionRatio = absorptionRatio / total;
|
||||||
|
reflectiveRatio = (reflective / reflectiveScale) / total;
|
||||||
|
updateRatioValues();
|
||||||
|
}
|
||||||
|
|
||||||
|
function setDiffusionRatio(diffusion) {
|
||||||
|
var total = (diffusion / diffusionScale) + absorptionRatio + reflectiveRatio;
|
||||||
|
diffusionRatio = (diffusion / diffusionScale) / total;
|
||||||
|
absorptionRatio = absorptionRatio / total;
|
||||||
|
reflectiveRatio = reflectiveRatio / total;
|
||||||
|
updateRatioValues();
|
||||||
|
}
|
||||||
|
|
||||||
|
function setAbsorptionRatio(absorption) {
|
||||||
|
var total = diffusionRatio + (absorption / absorptionScale) + reflectiveRatio;
|
||||||
|
diffusionRatio = diffusionRatio / total;
|
||||||
|
absorptionRatio = (absorption / absorptionScale) / total;
|
||||||
|
reflectiveRatio = reflectiveRatio / total;
|
||||||
|
updateRatioValues();
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateRatioSliders() {
|
||||||
|
reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * reflectiveRatio);
|
||||||
|
diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * diffusionRatio);
|
||||||
|
absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * absorptionRatio);
|
||||||
|
|
||||||
|
Overlays.editOverlay(reflectiveThumb, { x: reflectiveThumbX } );
|
||||||
|
Overlays.editOverlay(diffusionThumb, { x: diffusionThumbX } );
|
||||||
|
Overlays.editOverlay(absorptionThumb, { x: absorptionThumbX } );
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateRatioValues() {
|
||||||
|
AudioReflector.setReflectiveRatio(reflectiveRatio);
|
||||||
|
AudioReflector.setDiffusionRatio(diffusionRatio);
|
||||||
|
AudioReflector.setAbsorptionRatio(absorptionRatio);
|
||||||
|
}
|
||||||
|
|
||||||
|
var topY = 250;
|
||||||
|
var sliderHeight = 35;
|
||||||
|
|
||||||
|
var delayY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
var delayLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: delayY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 12,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Delay:"
|
||||||
|
});
|
||||||
|
|
||||||
|
var delaySlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: delayY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var delayMinThumbX = 110;
|
||||||
|
var delayMaxThumbX = delayMinThumbX + 110;
|
||||||
|
var delayThumbX = delayMinThumbX + ((delayMaxThumbX - delayMinThumbX) * (AudioReflector.getPreDelay() / delayScale));
|
||||||
|
var delayThumb = Overlays.addOverlay("image", {
|
||||||
|
x: delayThumbX,
|
||||||
|
y: delayY + 9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 255, green: 0, blue: 0},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
var fanoutY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var fanoutLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: fanoutY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 12,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Fanout:"
|
||||||
|
});
|
||||||
|
|
||||||
|
var fanoutSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: fanoutY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var fanoutMinThumbX = 110;
|
||||||
|
var fanoutMaxThumbX = fanoutMinThumbX + 110;
|
||||||
|
var fanoutThumbX = fanoutMinThumbX + ((fanoutMaxThumbX - fanoutMinThumbX) * (AudioReflector.getDiffusionFanout() / fanoutScale));
|
||||||
|
var fanoutThumb = Overlays.addOverlay("image", {
|
||||||
|
x: fanoutThumbX,
|
||||||
|
y: fanoutY + 9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 255, green: 255, blue: 0},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var speedY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var speedLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: speedY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 6,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Speed\nin ms/m:"
|
||||||
|
});
|
||||||
|
|
||||||
|
var speedSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: speedY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var speedMinThumbX = 110;
|
||||||
|
var speedMaxThumbX = speedMinThumbX + 110;
|
||||||
|
var speedThumbX = speedMinThumbX + ((speedMaxThumbX - speedMinThumbX) * (AudioReflector.getSoundMsPerMeter() / speedScale));
|
||||||
|
var speedThumb = Overlays.addOverlay("image", {
|
||||||
|
x: speedThumbX,
|
||||||
|
y: speedY+9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 0, green: 255, blue: 0},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
var factorY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var factorLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: factorY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 6,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Attenuation\nFactor:"
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var factorSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: factorY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var factorMinThumbX = 110;
|
||||||
|
var factorMaxThumbX = factorMinThumbX + 110;
|
||||||
|
var factorThumbX = factorMinThumbX + ((factorMaxThumbX - factorMinThumbX) * (AudioReflector.getDistanceAttenuationScalingFactor() / factorScale));
|
||||||
|
var factorThumb = Overlays.addOverlay("image", {
|
||||||
|
x: factorThumbX,
|
||||||
|
y: factorY+9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 0, green: 0, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
var localFactorY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var localFactorLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: localFactorY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 6,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Local\nFactor:"
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var localFactorSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: localFactorY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var localFactorMinThumbX = 110;
|
||||||
|
var localFactorMaxThumbX = localFactorMinThumbX + 110;
|
||||||
|
var localFactorThumbX = localFactorMinThumbX + ((localFactorMaxThumbX - localFactorMinThumbX) * (AudioReflector.getLocalAudioAttenuationFactor() / localFactorScale));
|
||||||
|
var localFactorThumb = Overlays.addOverlay("image", {
|
||||||
|
x: localFactorThumbX,
|
||||||
|
y: localFactorY+9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 0, green: 128, blue: 128},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
var combFilterY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var combFilterLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: combFilterY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 6,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Comb Filter\nWindow:"
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var combFilterSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: combFilterY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var combFilterMinThumbX = 110;
|
||||||
|
var combFilterMaxThumbX = combFilterMinThumbX + 110;
|
||||||
|
var combFilterThumbX = combFilterMinThumbX + ((combFilterMaxThumbX - combFilterMinThumbX) * (AudioReflector.getCombFilterWindow() / combFilterScale));
|
||||||
|
var combFilterThumb = Overlays.addOverlay("image", {
|
||||||
|
x: combFilterThumbX,
|
||||||
|
y: combFilterY+9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 128, green: 128, blue: 0},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var reflectiveY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var reflectiveLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: reflectiveY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 6,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Reflective\nRatio:"
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var reflectiveSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: reflectiveY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var reflectiveMinThumbX = 110;
|
||||||
|
var reflectiveMaxThumbX = reflectiveMinThumbX + 110;
|
||||||
|
reflectiveThumbX = reflectiveMinThumbX + ((reflectiveMaxThumbX - reflectiveMinThumbX) * AudioReflector.getReflectiveRatio());
|
||||||
|
var reflectiveThumb = Overlays.addOverlay("image", {
|
||||||
|
x: reflectiveThumbX,
|
||||||
|
y: reflectiveY+9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
var diffusionY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var diffusionLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: diffusionY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 6,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Diffusion\nRatio:"
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var diffusionSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: diffusionY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var diffusionMinThumbX = 110;
|
||||||
|
var diffusionMaxThumbX = diffusionMinThumbX + 110;
|
||||||
|
diffusionThumbX = diffusionMinThumbX + ((diffusionMaxThumbX - diffusionMinThumbX) * AudioReflector.getDiffusionRatio());
|
||||||
|
var diffusionThumb = Overlays.addOverlay("image", {
|
||||||
|
x: diffusionThumbX,
|
||||||
|
y: diffusionY+9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 0, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
var absorptionY = topY;
|
||||||
|
topY += sliderHeight;
|
||||||
|
|
||||||
|
var absorptionLabel = Overlays.addOverlay("text", {
|
||||||
|
x: 40,
|
||||||
|
y: absorptionY,
|
||||||
|
width: 60,
|
||||||
|
height: sliderHeight,
|
||||||
|
color: { red: 0, green: 0, blue: 0},
|
||||||
|
textColor: { red: 255, green: 255, blue: 255},
|
||||||
|
topMargin: 6,
|
||||||
|
leftMargin: 5,
|
||||||
|
text: "Absorption\nRatio:"
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var absorptionSlider = Overlays.addOverlay("image", {
|
||||||
|
// alternate form of expressing bounds
|
||||||
|
bounds: { x: 100, y: absorptionY, width: 150, height: sliderHeight},
|
||||||
|
subImage: { x: 46, y: 0, width: 200, height: 71 },
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png",
|
||||||
|
color: { red: 255, green: 255, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
var absorptionMinThumbX = 110;
|
||||||
|
var absorptionMaxThumbX = absorptionMinThumbX + 110;
|
||||||
|
absorptionThumbX = absorptionMinThumbX + ((absorptionMaxThumbX - absorptionMinThumbX) * AudioReflector.getAbsorptionRatio());
|
||||||
|
var absorptionThumb = Overlays.addOverlay("image", {
|
||||||
|
x: absorptionThumbX,
|
||||||
|
y: absorptionY+9,
|
||||||
|
width: 18,
|
||||||
|
height: 17,
|
||||||
|
imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png",
|
||||||
|
color: { red: 255, green: 0, blue: 255},
|
||||||
|
alpha: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
// When our script shuts down, we should clean up all of our overlays
|
||||||
|
function scriptEnding() {
|
||||||
|
Overlays.deleteOverlay(factorLabel);
|
||||||
|
Overlays.deleteOverlay(factorThumb);
|
||||||
|
Overlays.deleteOverlay(factorSlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(combFilterLabel);
|
||||||
|
Overlays.deleteOverlay(combFilterThumb);
|
||||||
|
Overlays.deleteOverlay(combFilterSlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(localFactorLabel);
|
||||||
|
Overlays.deleteOverlay(localFactorThumb);
|
||||||
|
Overlays.deleteOverlay(localFactorSlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(speedLabel);
|
||||||
|
Overlays.deleteOverlay(speedThumb);
|
||||||
|
Overlays.deleteOverlay(speedSlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(delayLabel);
|
||||||
|
Overlays.deleteOverlay(delayThumb);
|
||||||
|
Overlays.deleteOverlay(delaySlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(fanoutLabel);
|
||||||
|
Overlays.deleteOverlay(fanoutThumb);
|
||||||
|
Overlays.deleteOverlay(fanoutSlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(reflectiveLabel);
|
||||||
|
Overlays.deleteOverlay(reflectiveThumb);
|
||||||
|
Overlays.deleteOverlay(reflectiveSlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(diffusionLabel);
|
||||||
|
Overlays.deleteOverlay(diffusionThumb);
|
||||||
|
Overlays.deleteOverlay(diffusionSlider);
|
||||||
|
|
||||||
|
Overlays.deleteOverlay(absorptionLabel);
|
||||||
|
Overlays.deleteOverlay(absorptionThumb);
|
||||||
|
Overlays.deleteOverlay(absorptionSlider);
|
||||||
|
|
||||||
|
}
|
||||||
|
Script.scriptEnding.connect(scriptEnding);
|
||||||
|
|
||||||
|
|
||||||
|
var count = 0;
|
||||||
|
|
||||||
|
// Our update() function is called at approximately 60fps, and we will use it to animate our various overlays
|
||||||
|
function update(deltaTime) {
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
Script.update.connect(update);
|
||||||
|
|
||||||
|
|
||||||
|
// The slider is handled in the mouse event callbacks.
|
||||||
|
var movingSliderDelay = false;
|
||||||
|
var movingSliderFanout = false;
|
||||||
|
var movingSliderSpeed = false;
|
||||||
|
var movingSliderFactor = false;
|
||||||
|
var movingSliderCombFilter = false;
|
||||||
|
var movingSliderLocalFactor = false;
|
||||||
|
var movingSliderReflective = false;
|
||||||
|
var movingSliderDiffusion = false;
|
||||||
|
var movingSliderAbsorption = false;
|
||||||
|
|
||||||
|
var thumbClickOffsetX = 0;
|
||||||
|
function mouseMoveEvent(event) {
|
||||||
|
if (movingSliderDelay) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < delayMinThumbX) {
|
||||||
|
newThumbX = delayMinThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > delayMaxThumbX) {
|
||||||
|
newThumbX = delayMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(delayThumb, { x: newThumbX } );
|
||||||
|
var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale;
|
||||||
|
AudioReflector.setPreDelay(delay);
|
||||||
|
}
|
||||||
|
if (movingSliderFanout) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < fanoutMinThumbX) {
|
||||||
|
newThumbX = fanoutMinThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > fanoutMaxThumbX) {
|
||||||
|
newThumbX = fanoutMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(fanoutThumb, { x: newThumbX } );
|
||||||
|
var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale);
|
||||||
|
AudioReflector.setDiffusionFanout(fanout);
|
||||||
|
}
|
||||||
|
if (movingSliderSpeed) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < speedMinThumbX) {
|
||||||
|
newThumbX = speedMminThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > speedMaxThumbX) {
|
||||||
|
newThumbX = speedMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(speedThumb, { x: newThumbX } );
|
||||||
|
var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale;
|
||||||
|
AudioReflector.setSoundMsPerMeter(speed);
|
||||||
|
}
|
||||||
|
if (movingSliderFactor) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < factorMinThumbX) {
|
||||||
|
newThumbX = factorMminThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > factorMaxThumbX) {
|
||||||
|
newThumbX = factorMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(factorThumb, { x: newThumbX } );
|
||||||
|
var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale;
|
||||||
|
AudioReflector.setDistanceAttenuationScalingFactor(factor);
|
||||||
|
}
|
||||||
|
if (movingSliderCombFilter) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < combFilterMinThumbX) {
|
||||||
|
newThumbX = combFilterMminThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > combFilterMaxThumbX) {
|
||||||
|
newThumbX = combFilterMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(combFilterThumb, { x: newThumbX } );
|
||||||
|
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
|
||||||
|
AudioReflector.setCombFilterWindow(combFilter);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderLocalFactor) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < localFactorMinThumbX) {
|
||||||
|
newThumbX = localFactorMminThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > localFactorMaxThumbX) {
|
||||||
|
newThumbX = localFactorMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(localFactorThumb, { x: newThumbX } );
|
||||||
|
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
|
||||||
|
AudioReflector.setLocalAudioAttenuationFactor(localFactor);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderAbsorption) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < absorptionMinThumbX) {
|
||||||
|
newThumbX = absorptionMminThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > absorptionMaxThumbX) {
|
||||||
|
newThumbX = absorptionMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(absorptionThumb, { x: newThumbX } );
|
||||||
|
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
|
||||||
|
setAbsorptionRatio(absorption);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderReflective) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < reflectiveMinThumbX) {
|
||||||
|
newThumbX = reflectiveMminThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > reflectiveMaxThumbX) {
|
||||||
|
newThumbX = reflectiveMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(reflectiveThumb, { x: newThumbX } );
|
||||||
|
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
|
||||||
|
setReflectiveRatio(reflective);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderDiffusion) {
|
||||||
|
newThumbX = event.x - thumbClickOffsetX;
|
||||||
|
if (newThumbX < diffusionMinThumbX) {
|
||||||
|
newThumbX = diffusionMminThumbX;
|
||||||
|
}
|
||||||
|
if (newThumbX > diffusionMaxThumbX) {
|
||||||
|
newThumbX = diffusionMaxThumbX;
|
||||||
|
}
|
||||||
|
Overlays.editOverlay(diffusionThumb, { x: newThumbX } );
|
||||||
|
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
|
||||||
|
setDiffusionRatio(diffusion);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// we also handle click detection in our mousePressEvent()
|
||||||
|
function mousePressEvent(event) {
|
||||||
|
var clickedOverlay = Overlays.getOverlayAtPoint({x: event.x, y: event.y});
|
||||||
|
if (clickedOverlay == delayThumb) {
|
||||||
|
movingSliderDelay = true;
|
||||||
|
thumbClickOffsetX = event.x - delayThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == fanoutThumb) {
|
||||||
|
movingSliderFanout = true;
|
||||||
|
thumbClickOffsetX = event.x - fanoutThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == speedThumb) {
|
||||||
|
movingSliderSpeed = true;
|
||||||
|
thumbClickOffsetX = event.x - speedThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == factorThumb) {
|
||||||
|
movingSliderFactor = true;
|
||||||
|
thumbClickOffsetX = event.x - factorThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == localFactorThumb) {
|
||||||
|
movingSliderLocalFactor = true;
|
||||||
|
thumbClickOffsetX = event.x - localFactorThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == combFilterThumb) {
|
||||||
|
movingSliderCombFilter = true;
|
||||||
|
thumbClickOffsetX = event.x - combFilterThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == diffusionThumb) {
|
||||||
|
movingSliderDiffusion = true;
|
||||||
|
thumbClickOffsetX = event.x - diffusionThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == absorptionThumb) {
|
||||||
|
movingSliderAbsorption = true;
|
||||||
|
thumbClickOffsetX = event.x - absorptionThumbX;
|
||||||
|
}
|
||||||
|
if (clickedOverlay == reflectiveThumb) {
|
||||||
|
movingSliderReflective = true;
|
||||||
|
thumbClickOffsetX = event.x - reflectiveThumbX;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function mouseReleaseEvent(event) {
|
||||||
|
if (movingSliderDelay) {
|
||||||
|
movingSliderDelay = false;
|
||||||
|
var delay = ((newThumbX - delayMinThumbX) / (delayMaxThumbX - delayMinThumbX)) * delayScale;
|
||||||
|
AudioReflector.setPreDelay(delay);
|
||||||
|
delayThumbX = newThumbX;
|
||||||
|
}
|
||||||
|
if (movingSliderFanout) {
|
||||||
|
movingSliderFanout = false;
|
||||||
|
var fanout = Math.round(((newThumbX - fanoutMinThumbX) / (fanoutMaxThumbX - fanoutMinThumbX)) * fanoutScale);
|
||||||
|
AudioReflector.setDiffusionFanout(fanout);
|
||||||
|
fanoutThumbX = newThumbX;
|
||||||
|
}
|
||||||
|
if (movingSliderSpeed) {
|
||||||
|
movingSliderSpeed = false;
|
||||||
|
var speed = ((newThumbX - speedMinThumbX) / (speedMaxThumbX - speedMinThumbX)) * speedScale;
|
||||||
|
AudioReflector.setSoundMsPerMeter(speed);
|
||||||
|
speedThumbX = newThumbX;
|
||||||
|
}
|
||||||
|
if (movingSliderFactor) {
|
||||||
|
movingSliderFactor = false;
|
||||||
|
var factor = ((newThumbX - factorMinThumbX) / (factorMaxThumbX - factorMinThumbX)) * factorScale;
|
||||||
|
AudioReflector.setDistanceAttenuationScalingFactor(factor);
|
||||||
|
factorThumbX = newThumbX;
|
||||||
|
}
|
||||||
|
if (movingSliderCombFilter) {
|
||||||
|
movingSliderCombFilter = false;
|
||||||
|
var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale;
|
||||||
|
AudioReflector.setCombFilterWindow(combFilter);
|
||||||
|
combFilterThumbX = newThumbX;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderLocalFactor) {
|
||||||
|
movingSliderLocalFactor = false;
|
||||||
|
var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale;
|
||||||
|
AudioReflector.setLocalAudioAttenuationFactor(localFactor);
|
||||||
|
localFactorThumbX = newThumbX;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderReflective) {
|
||||||
|
movingSliderReflective = false;
|
||||||
|
var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale;
|
||||||
|
setReflectiveRatio(reflective);
|
||||||
|
reflectiveThumbX = newThumbX;
|
||||||
|
updateRatioSliders();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderDiffusion) {
|
||||||
|
movingSliderDiffusion = false;
|
||||||
|
var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale;
|
||||||
|
setDiffusionRatio(diffusion);
|
||||||
|
diffusionThumbX = newThumbX;
|
||||||
|
updateRatioSliders();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (movingSliderAbsorption) {
|
||||||
|
movingSliderAbsorption = false;
|
||||||
|
var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale;
|
||||||
|
setAbsorptionRatio(absorption);
|
||||||
|
absorptionThumbX = newThumbX;
|
||||||
|
updateRatioSliders();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Controller.mouseMoveEvent.connect(mouseMoveEvent);
|
||||||
|
Controller.mousePressEvent.connect(mousePressEvent);
|
||||||
|
Controller.mouseReleaseEvent.connect(mouseReleaseEvent);
|
||||||
|
|
|
@ -152,11 +152,10 @@ function particleCollisionWithVoxel(particle, voxel, collision) {
|
||||||
var position = particleProperties.position;
|
var position = particleProperties.position;
|
||||||
Particles.deleteParticle(particle);
|
Particles.deleteParticle(particle);
|
||||||
// Make a hole in this voxel
|
// Make a hole in this voxel
|
||||||
Vec3.print("penetration", collision.penetration);
|
//Vec3.print("voxel penetration", collision.penetration);
|
||||||
Vec3.print("contactPoint", collision.contactPoint);
|
//Vec3.print("voxel contactPoint", collision.contactPoint);
|
||||||
Voxels.eraseVoxel(contactPoint.x, contactPoint.y, contactPoint.z, HOLE_SIZE);
|
Voxels.eraseVoxel(collision.contactPoint.x, collision.contactPoint.y, collision.contactPoint.z, HOLE_SIZE);
|
||||||
Voxels.eraseVoxel(position.x, position.y, position.z, HOLE_SIZE);
|
audioOptions.position = collision.contactPoint;
|
||||||
audioOptions.position = Vec3.sum(Camera.getPosition(), Quat.getFront(Camera.getOrientation()));
|
|
||||||
Audio.playSound(impactSound, audioOptions);
|
Audio.playSound(impactSound, audioOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,9 +170,9 @@ function particleCollisionWithParticle(particle1, particle2, collision) {
|
||||||
// Record shot time
|
// Record shot time
|
||||||
var endTime = new Date();
|
var endTime = new Date();
|
||||||
var msecs = endTime.valueOf() - shotTime.valueOf();
|
var msecs = endTime.valueOf() - shotTime.valueOf();
|
||||||
print("hit, msecs = " + msecs);
|
//print("hit, msecs = " + msecs);
|
||||||
Vec3.print("penetration = ", collision.penetration);
|
//Vec3.print("penetration = ", collision.penetration);
|
||||||
Vec3.print("contactPoint = ", collision.contactPoint);
|
//Vec3.print("contactPoint = ", collision.contactPoint);
|
||||||
Particles.deleteParticle(particle1);
|
Particles.deleteParticle(particle1);
|
||||||
Particles.deleteParticle(particle2);
|
Particles.deleteParticle(particle2);
|
||||||
// play the sound near the camera so the shooter can hear it
|
// play the sound near the camera so the shooter can hear it
|
||||||
|
|
79
interface/resources/images/close_down.svg
Normal file
79
interface/resources/images/close_down.svg
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||||
|
<svg
|
||||||
|
xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"
|
||||||
|
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
|
xmlns:cc="http://creativecommons.org/ns#"
|
||||||
|
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||||
|
xmlns:svg="http://www.w3.org/2000/svg"
|
||||||
|
xmlns="http://www.w3.org/2000/svg"
|
||||||
|
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||||
|
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||||
|
width="44px"
|
||||||
|
height="44px"
|
||||||
|
viewBox="0 0 44 44"
|
||||||
|
version="1.1"
|
||||||
|
id="svg3085"
|
||||||
|
inkscape:version="0.48.2 r9819"
|
||||||
|
sodipodi:docname="close_hover.svg">
|
||||||
|
<metadata
|
||||||
|
id="metadata3099">
|
||||||
|
<rdf:RDF>
|
||||||
|
<cc:Work
|
||||||
|
rdf:about="">
|
||||||
|
<dc:format>image/svg+xml</dc:format>
|
||||||
|
<dc:type
|
||||||
|
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||||
|
<dc:title>Slice 1</dc:title>
|
||||||
|
</cc:Work>
|
||||||
|
</rdf:RDF>
|
||||||
|
</metadata>
|
||||||
|
<sodipodi:namedview
|
||||||
|
pagecolor="#ffffff"
|
||||||
|
bordercolor="#666666"
|
||||||
|
borderopacity="1"
|
||||||
|
objecttolerance="10"
|
||||||
|
gridtolerance="10"
|
||||||
|
guidetolerance="10"
|
||||||
|
inkscape:pageopacity="0"
|
||||||
|
inkscape:pageshadow="2"
|
||||||
|
inkscape:window-width="814"
|
||||||
|
inkscape:window-height="783"
|
||||||
|
id="namedview3097"
|
||||||
|
showgrid="false"
|
||||||
|
inkscape:zoom="10.727273"
|
||||||
|
inkscape:cx="14.784087"
|
||||||
|
inkscape:cy="19.379049"
|
||||||
|
inkscape:window-x="0"
|
||||||
|
inkscape:window-y="0"
|
||||||
|
inkscape:window-maximized="0"
|
||||||
|
inkscape:current-layer="Page-1" />
|
||||||
|
<title
|
||||||
|
id="title3087">Slice 1</title>
|
||||||
|
<description
|
||||||
|
id="description3089">Created with Sketch (http://www.bohemiancoding.com/sketch)</description>
|
||||||
|
<defs
|
||||||
|
id="defs3091" />
|
||||||
|
<g
|
||||||
|
id="Page-1"
|
||||||
|
sketch:type="MSPage"
|
||||||
|
stroke-width="1"
|
||||||
|
stroke="none"
|
||||||
|
fill-rule="evenodd"
|
||||||
|
fill="none">
|
||||||
|
<g
|
||||||
|
id="close"
|
||||||
|
sketch:type="MSLayerGroup"
|
||||||
|
fill="#CCCCCC">
|
||||||
|
<path
|
||||||
|
d="M0.286382588,32.6200761 L32.6200759,0.286382745 C33.0019275,-0.0954590326 33.6210173,-0.0954590326 34.0028688,0.286382745 L43.380667,9.66418097 C43.7625088,10.0460227 43.7625088,10.6651125 43.380667,11.0469543 L43.380667,11.0469543 L11.0469639,43.3806574 C10.6651221,43.7624992 10.0460226,43.7624992 9.66418081,43.3806574 L0.286382588,34.0028592 C-0.0954591894,33.6210076 -0.0954591894,33.0019179 0.286382588,32.6200761 L0.286382588,32.6200761 L0.286382588,32.6200761 Z"
|
||||||
|
id="path16"
|
||||||
|
sketch:type="MSShapeGroup"
|
||||||
|
style="fill:#e6e6e6" />
|
||||||
|
<path
|
||||||
|
d="M32.6200759,43.3806574 L0.286382588,11.0469543 C-0.0954591894,10.6651125 -0.0954591894,10.0460227 0.286382588,9.66418097 L9.66418081,0.286382745 C10.0460226,-0.0954590326 10.6651221,-0.0954590326 11.0469639,0.286382745 L43.380667,32.6200761 C43.7625088,33.0019179 43.7625088,33.6210076 43.380667,34.0028592 L34.0028688,43.3806574 C33.6210173,43.7624992 33.0019275,43.7624992 32.6200759,43.3806574 L32.6200759,43.3806574 Z"
|
||||||
|
id="path18"
|
||||||
|
sketch:type="MSShapeGroup"
|
||||||
|
style="fill:#e6e6e6" />
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 3 KiB |
|
@ -11,7 +11,8 @@ QLabel#advancedTuningLabel {
|
||||||
}
|
}
|
||||||
|
|
||||||
QPushButton#buttonBrowseHead,
|
QPushButton#buttonBrowseHead,
|
||||||
QPushButton#buttonBrowseBody {
|
QPushButton#buttonBrowseBody,
|
||||||
|
QPushButton#buttonBrowseLocation {
|
||||||
background-image: url(styles/search.svg);
|
background-image: url(styles/search.svg);
|
||||||
background-repeat: no-repeat;
|
background-repeat: no-repeat;
|
||||||
background-position: center center;
|
background-position: center center;
|
||||||
|
|
|
@ -1659,16 +1659,12 @@ void Application::init() {
|
||||||
|
|
||||||
_particleCollisionSystem.init(&_particleEditSender, _particles.getTree(), _voxels.getTree(), &_audio, &_avatarManager);
|
_particleCollisionSystem.init(&_particleEditSender, _particles.getTree(), _voxels.getTree(), &_audio, &_avatarManager);
|
||||||
|
|
||||||
// connect the _particleCollisionSystem to our script engine's ParticleScriptingInterface
|
// connect the _particleCollisionSystem to our script engine's ParticlesScriptingInterface
|
||||||
connect(&_particleCollisionSystem,
|
connect(&_particleCollisionSystem, &ParticleCollisionSystem::particleCollisionWithVoxel,
|
||||||
SIGNAL(particleCollisionWithVoxel(const ParticleID&, const VoxelDetail&, const CollisionInfo&)),
|
ScriptEngine::getParticlesScriptingInterface(), &ParticlesScriptingInterface::particleCollisionWithVoxel);
|
||||||
ScriptEngine::getParticlesScriptingInterface(),
|
|
||||||
SIGNAL(particleCollisionWithVoxels(const ParticleID&, const VoxelDetail&, const CollisionInfo&)));
|
|
||||||
|
|
||||||
connect(&_particleCollisionSystem,
|
connect(&_particleCollisionSystem, &ParticleCollisionSystem::particleCollisionWithParticle,
|
||||||
SIGNAL(particleCollisionWithParticle(const ParticleID&, const ParticleID&, const CollisionInfo&)),
|
ScriptEngine::getParticlesScriptingInterface(), &ParticlesScriptingInterface::particleCollisionWithParticle);
|
||||||
ScriptEngine::getParticlesScriptingInterface(),
|
|
||||||
SIGNAL(particleCollisionWithParticle(const ParticleID&, const ParticleID&, const CollisionInfo&)));
|
|
||||||
|
|
||||||
_audio.init(_glWidget);
|
_audio.init(_glWidget);
|
||||||
|
|
||||||
|
@ -1678,7 +1674,16 @@ void Application::init() {
|
||||||
connect(_rearMirrorTools, SIGNAL(restoreView()), SLOT(restoreMirrorView()));
|
connect(_rearMirrorTools, SIGNAL(restoreView()), SLOT(restoreMirrorView()));
|
||||||
connect(_rearMirrorTools, SIGNAL(shrinkView()), SLOT(shrinkMirrorView()));
|
connect(_rearMirrorTools, SIGNAL(shrinkView()), SLOT(shrinkMirrorView()));
|
||||||
connect(_rearMirrorTools, SIGNAL(resetView()), SLOT(resetSensors()));
|
connect(_rearMirrorTools, SIGNAL(resetView()), SLOT(resetSensors()));
|
||||||
connect(_myAvatar, SIGNAL(transformChanged()), this, SLOT(bumpSettings()));
|
|
||||||
|
// set up our audio reflector
|
||||||
|
_audioReflector.setMyAvatar(getAvatar());
|
||||||
|
_audioReflector.setVoxels(_voxels.getTree());
|
||||||
|
_audioReflector.setAudio(getAudio());
|
||||||
|
connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection);
|
||||||
|
connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection);
|
||||||
|
|
||||||
|
// save settings when avatar changes
|
||||||
|
connect(_myAvatar, &MyAvatar::transformChanged, this, &Application::bumpSettings);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::closeMirrorView() {
|
void Application::closeMirrorView() {
|
||||||
|
@ -2450,6 +2455,9 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
|
||||||
|
|
||||||
// disable specular lighting for ground and voxels
|
// disable specular lighting for ground and voxels
|
||||||
glMaterialfv(GL_FRONT, GL_SPECULAR, NO_SPECULAR_COLOR);
|
glMaterialfv(GL_FRONT, GL_SPECULAR, NO_SPECULAR_COLOR);
|
||||||
|
|
||||||
|
// draw the audio reflector overlay
|
||||||
|
_audioReflector.render();
|
||||||
|
|
||||||
// Draw voxels
|
// Draw voxels
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::Voxels)) {
|
||||||
|
@ -3284,6 +3292,7 @@ void Application::stopAllScripts() {
|
||||||
}
|
}
|
||||||
_scriptEnginesHash.clear();
|
_scriptEnginesHash.clear();
|
||||||
_runningScriptsWidget->setRunningScripts(getRunningScripts());
|
_runningScriptsWidget->setRunningScripts(getRunningScripts());
|
||||||
|
bumpSettings();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::stopScript(const QString &scriptName)
|
void Application::stopScript(const QString &scriptName)
|
||||||
|
@ -3292,6 +3301,7 @@ void Application::stopScript(const QString &scriptName)
|
||||||
qDebug() << "stopping script..." << scriptName;
|
qDebug() << "stopping script..." << scriptName;
|
||||||
_scriptEnginesHash.remove(scriptName);
|
_scriptEnginesHash.remove(scriptName);
|
||||||
_runningScriptsWidget->setRunningScripts(getRunningScripts());
|
_runningScriptsWidget->setRunningScripts(getRunningScripts());
|
||||||
|
bumpSettings();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::reloadAllScripts() {
|
void Application::reloadAllScripts() {
|
||||||
|
@ -3388,6 +3398,7 @@ void Application::loadScript(const QString& scriptName) {
|
||||||
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("Settings", SettingsScriptingInterface::getInstance());
|
||||||
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("AudioDevice", AudioDeviceScriptingInterface::getInstance());
|
||||||
scriptEngine->registerGlobalObject("AnimationCache", &_animationCache);
|
scriptEngine->registerGlobalObject("AnimationCache", &_animationCache);
|
||||||
|
scriptEngine->registerGlobalObject("AudioReflector", &_audioReflector);
|
||||||
|
|
||||||
QThread* workerThread = new QThread(this);
|
QThread* workerThread = new QThread(this);
|
||||||
|
|
||||||
|
@ -3408,6 +3419,7 @@ void Application::loadScript(const QString& scriptName) {
|
||||||
|
|
||||||
// restore the main window's active state
|
// restore the main window's active state
|
||||||
_window->activateWindow();
|
_window->activateWindow();
|
||||||
|
bumpSettings();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Application::loadDialog() {
|
void Application::loadDialog() {
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#include <VoxelEditPacketSender.h>
|
#include <VoxelEditPacketSender.h>
|
||||||
|
|
||||||
#include "Audio.h"
|
#include "Audio.h"
|
||||||
|
#include "AudioReflector.h"
|
||||||
#include "BuckyBalls.h"
|
#include "BuckyBalls.h"
|
||||||
#include "Camera.h"
|
#include "Camera.h"
|
||||||
#include "DatagramProcessor.h"
|
#include "DatagramProcessor.h"
|
||||||
|
@ -162,6 +163,7 @@ public:
|
||||||
bool isThrottleRendering() const { return _glWidget->isThrottleRendering(); }
|
bool isThrottleRendering() const { return _glWidget->isThrottleRendering(); }
|
||||||
MyAvatar* getAvatar() { return _myAvatar; }
|
MyAvatar* getAvatar() { return _myAvatar; }
|
||||||
Audio* getAudio() { return &_audio; }
|
Audio* getAudio() { return &_audio; }
|
||||||
|
const AudioReflector* getAudioReflector() const { return &_audioReflector; }
|
||||||
Camera* getCamera() { return &_myCamera; }
|
Camera* getCamera() { return &_myCamera; }
|
||||||
ViewFrustum* getViewFrustum() { return &_viewFrustum; }
|
ViewFrustum* getViewFrustum() { return &_viewFrustum; }
|
||||||
ViewFrustum* getShadowViewFrustum() { return &_shadowViewFrustum; }
|
ViewFrustum* getShadowViewFrustum() { return &_shadowViewFrustum; }
|
||||||
|
@ -514,7 +516,7 @@ private:
|
||||||
TouchEvent _lastTouchEvent;
|
TouchEvent _lastTouchEvent;
|
||||||
|
|
||||||
Overlays _overlays;
|
Overlays _overlays;
|
||||||
|
AudioReflector _audioReflector;
|
||||||
RunningScriptsWidget* _runningScriptsWidget;
|
RunningScriptsWidget* _runningScriptsWidget;
|
||||||
QHash<QString, ScriptEngine*> _scriptEnginesHash;
|
QHash<QString, ScriptEngine*> _scriptEnginesHash;
|
||||||
};
|
};
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include <SharedUtil.h>
|
#include <SharedUtil.h>
|
||||||
#include <StdDev.h>
|
#include <StdDev.h>
|
||||||
#include <UUID.h>
|
#include <UUID.h>
|
||||||
|
#include <glm/glm.hpp>
|
||||||
|
|
||||||
#include "Application.h"
|
#include "Application.h"
|
||||||
#include "Audio.h"
|
#include "Audio.h"
|
||||||
|
@ -87,7 +88,11 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
_collisionSoundDuration(0.0f),
|
_collisionSoundDuration(0.0f),
|
||||||
_proceduralEffectSample(0),
|
_proceduralEffectSample(0),
|
||||||
_numFramesDisplayStarve(0),
|
_numFramesDisplayStarve(0),
|
||||||
_muted(false)
|
_muted(false),
|
||||||
|
_processSpatialAudio(false),
|
||||||
|
_spatialAudioStart(0),
|
||||||
|
_spatialAudioFinish(0),
|
||||||
|
_spatialAudioRingBuffer(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL, true) // random access mode
|
||||||
{
|
{
|
||||||
// clear the array of locally injected samples
|
// clear the array of locally injected samples
|
||||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
|
@ -398,7 +403,7 @@ void Audio::handleAudioInput() {
|
||||||
unsigned int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio;
|
unsigned int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio;
|
||||||
|
|
||||||
QByteArray inputByteArray = _inputDevice->readAll();
|
QByteArray inputByteArray = _inputDevice->readAll();
|
||||||
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted && _audioOutput) {
|
||||||
// if this person wants local loopback add that to the locally injected audio
|
// if this person wants local loopback add that to the locally injected audio
|
||||||
|
|
||||||
|
@ -406,7 +411,7 @@ void Audio::handleAudioInput() {
|
||||||
// we didn't have the loopback output device going so set that up now
|
// we didn't have the loopback output device going so set that up now
|
||||||
_loopbackOutputDevice = _loopbackAudioOutput->start();
|
_loopbackOutputDevice = _loopbackAudioOutput->start();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_inputFormat == _outputFormat) {
|
if (_inputFormat == _outputFormat) {
|
||||||
if (_loopbackOutputDevice) {
|
if (_loopbackOutputDevice) {
|
||||||
_loopbackOutputDevice->write(inputByteArray);
|
_loopbackOutputDevice->write(inputByteArray);
|
||||||
|
@ -524,7 +529,7 @@ void Audio::handleAudioInput() {
|
||||||
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
|
_noiseSampleFrames[_noiseGateSampleCounter++] = _lastInputLoudness;
|
||||||
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
|
if (_noiseGateSampleCounter == NUMBER_OF_NOISE_SAMPLE_FRAMES) {
|
||||||
float smallestSample = FLT_MAX;
|
float smallestSample = FLT_MAX;
|
||||||
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i+= NOISE_GATE_FRAMES_TO_AVERAGE) {
|
for (int i = 0; i <= NUMBER_OF_NOISE_SAMPLE_FRAMES - NOISE_GATE_FRAMES_TO_AVERAGE; i += NOISE_GATE_FRAMES_TO_AVERAGE) {
|
||||||
float thisAverage = 0.0f;
|
float thisAverage = 0.0f;
|
||||||
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
|
for (int j = i; j < i + NOISE_GATE_FRAMES_TO_AVERAGE; j++) {
|
||||||
thisAverage += _noiseSampleFrames[j];
|
thisAverage += _noiseSampleFrames[j];
|
||||||
|
@ -559,6 +564,13 @@ void Audio::handleAudioInput() {
|
||||||
_lastInputLoudness = 0;
|
_lastInputLoudness = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// at this point we have clean monoAudioSamples, which match our target output...
|
||||||
|
// this is what we should send to our interested listeners
|
||||||
|
if (_processSpatialAudio && !_muted && _audioOutput) {
|
||||||
|
QByteArray monoInputData((char*)monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * sizeof(int16_t));
|
||||||
|
emit processLocalAudio(_spatialAudioStart, monoInputData, _desiredInputFormat);
|
||||||
|
}
|
||||||
|
|
||||||
if (_proceduralAudioOutput) {
|
if (_proceduralAudioOutput) {
|
||||||
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
processProceduralAudio(monoAudioSamples, NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
}
|
}
|
||||||
|
@ -622,7 +634,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
_totalPacketsReceived++;
|
_totalPacketsReceived++;
|
||||||
|
|
||||||
double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime);
|
double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime);
|
||||||
|
|
||||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||||
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
|
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
|
||||||
_stdev.addValue(timeDiff);
|
_stdev.addValue(timeDiff);
|
||||||
|
@ -650,6 +662,69 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
_lastReceiveTime = currentReceiveTime;
|
_lastReceiveTime = currentReceiveTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
|
||||||
|
// data we know that we will have 2x samples for each stereo time sample at the format's sample rate
|
||||||
|
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) {
|
||||||
|
// Calculate the number of remaining samples available. The source spatial audio buffer will get
|
||||||
|
// clipped if there are insufficient samples available in the accumulation buffer.
|
||||||
|
unsigned int remaining = _spatialAudioRingBuffer.getSampleCapacity() - _spatialAudioRingBuffer.samplesAvailable();
|
||||||
|
|
||||||
|
// Locate where in the accumulation buffer the new samples need to go
|
||||||
|
if (sampleTime >= _spatialAudioFinish) {
|
||||||
|
if (_spatialAudioStart == _spatialAudioFinish) {
|
||||||
|
// Nothing in the spatial audio ring buffer yet, Just do a straight copy, clipping if necessary
|
||||||
|
unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples;
|
||||||
|
if (sampleCount) {
|
||||||
|
_spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount);
|
||||||
|
}
|
||||||
|
_spatialAudioFinish = _spatialAudioStart + sampleCount / _desiredOutputFormat.channelCount();
|
||||||
|
} else {
|
||||||
|
// Spatial audio ring buffer already has data, but there is no overlap with the new sample.
|
||||||
|
// Compute the appropriate time delay and pad with silence until the new start time.
|
||||||
|
unsigned int delay = sampleTime - _spatialAudioFinish;
|
||||||
|
unsigned int delayCount = delay * _desiredOutputFormat.channelCount();
|
||||||
|
unsigned int silentCount = (remaining < delayCount) ? remaining : delayCount;
|
||||||
|
if (silentCount) {
|
||||||
|
_spatialAudioRingBuffer.addSilentFrame(silentCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recalculate the number of remaining samples
|
||||||
|
remaining -= silentCount;
|
||||||
|
unsigned int sampleCount = (remaining < numSamples) ? remaining : numSamples;
|
||||||
|
|
||||||
|
// Copy the new spatial audio to the accumulation ring buffer
|
||||||
|
if (sampleCount) {
|
||||||
|
_spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data(), sampleCount);
|
||||||
|
}
|
||||||
|
_spatialAudioFinish += (sampleCount + silentCount) / _desiredOutputFormat.channelCount();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// There is overlap between the spatial audio buffer and the new sample, mix the overlap
|
||||||
|
// Calculate the offset from the buffer's current read position, which should be located at _spatialAudioStart
|
||||||
|
unsigned int offset = (sampleTime - _spatialAudioStart) * _desiredOutputFormat.channelCount();
|
||||||
|
unsigned int mixedSamplesCount = (_spatialAudioFinish - sampleTime) * _desiredOutputFormat.channelCount();
|
||||||
|
mixedSamplesCount = (mixedSamplesCount < numSamples) ? mixedSamplesCount : numSamples;
|
||||||
|
|
||||||
|
const int16_t* spatial = reinterpret_cast<const int16_t*>(spatialAudio.data());
|
||||||
|
for (unsigned int i = 0; i < mixedSamplesCount; i++) {
|
||||||
|
int existingSample = _spatialAudioRingBuffer[i + offset];
|
||||||
|
int newSample = spatial[i];
|
||||||
|
int sumOfSamples = existingSample + newSample;
|
||||||
|
_spatialAudioRingBuffer[i + offset] = static_cast<int16_t>(glm::clamp<int>(sumOfSamples,
|
||||||
|
std::numeric_limits<short>::min(), std::numeric_limits<short>::max()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the remaining unoverlapped spatial audio to the spatial audio buffer, if any
|
||||||
|
unsigned int nonMixedSampleCount = numSamples - mixedSamplesCount;
|
||||||
|
nonMixedSampleCount = (remaining < nonMixedSampleCount) ? remaining : nonMixedSampleCount;
|
||||||
|
if (nonMixedSampleCount) {
|
||||||
|
_spatialAudioRingBuffer.writeSamples((int16_t*)spatialAudio.data() + mixedSamplesCount, nonMixedSampleCount);
|
||||||
|
// Extend the finish time by the amount of unoverlapped samples
|
||||||
|
_spatialAudioFinish += nonMixedSampleCount / _desiredOutputFormat.channelCount();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool Audio::mousePressEvent(int x, int y) {
|
bool Audio::mousePressEvent(int x, int y) {
|
||||||
if (_iconBounds.contains(x, y)) {
|
if (_iconBounds.contains(x, y)) {
|
||||||
toggleMute();
|
toggleMute();
|
||||||
|
@ -669,7 +744,7 @@ void Audio::toggleAudioNoiseReduction() {
|
||||||
|
|
||||||
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
_ringBuffer.parseData(audioByteArray);
|
_ringBuffer.parseData(audioByteArray);
|
||||||
|
|
||||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
||||||
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
||||||
|
|
||||||
|
@ -700,13 +775,32 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
//qDebug() << "pushing " << numNetworkOutputSamples;
|
//qDebug() << "pushing " << numNetworkOutputSamples;
|
||||||
_ringBuffer.setIsStarved(false);
|
_ringBuffer.setIsStarved(false);
|
||||||
|
|
||||||
// copy the samples we'll resample from the ring buffer - this also
|
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
|
||||||
// pushes the read pointer of the ring buffer forwards
|
if (_processSpatialAudio) {
|
||||||
int16_t* ringBufferSamples= new int16_t[numNetworkOutputSamples];
|
unsigned int sampleTime = _spatialAudioStart;
|
||||||
_ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
QByteArray buffer;
|
||||||
|
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
|
||||||
// add the next numNetworkOutputSamples from each QByteArray
|
|
||||||
// in our _localInjectionByteArrays QVector to the localInjectedSamples
|
_ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
|
||||||
|
// Accumulate direct transmission of audio from sender to receiver
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||||
|
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send audio off for spatial processing
|
||||||
|
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||||
|
|
||||||
|
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
||||||
|
// pushes the read pointer of the spatial audio ring buffer forwards
|
||||||
|
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
||||||
|
|
||||||
|
// Advance the start point for the next packet of audio to arrive
|
||||||
|
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
||||||
|
} else {
|
||||||
|
// copy the samples we'll resample from the ring buffer - this also
|
||||||
|
// pushes the read pointer of the ring buffer forwards
|
||||||
|
_ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
||||||
|
}
|
||||||
|
|
||||||
// copy the packet from the RB to the output
|
// copy the packet from the RB to the output
|
||||||
linearResampling(ringBufferSamples,
|
linearResampling(ringBufferSamples,
|
||||||
|
@ -756,6 +850,15 @@ void Audio::toggleToneInjection() {
|
||||||
_toneInjectionEnabled = !_toneInjectionEnabled;
|
_toneInjectionEnabled = !_toneInjectionEnabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Audio::toggleAudioSpatialProcessing() {
|
||||||
|
_processSpatialAudio = !_processSpatialAudio;
|
||||||
|
if (_processSpatialAudio) {
|
||||||
|
_spatialAudioStart = 0;
|
||||||
|
_spatialAudioFinish = 0;
|
||||||
|
_spatialAudioRingBuffer.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Take a pointer to the acquired microphone input samples and add procedural sounds
|
// Take a pointer to the acquired microphone input samples and add procedural sounds
|
||||||
void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
|
||||||
float sample;
|
float sample;
|
||||||
|
@ -996,6 +1099,12 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
||||||
_proceduralAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_proceduralAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
|
|
||||||
gettimeofday(&_lastReceiveTime, NULL);
|
gettimeofday(&_lastReceiveTime, NULL);
|
||||||
|
|
||||||
|
// setup spatial audio ringbuffer
|
||||||
|
int numFrameSamples = _outputFormat.sampleRate() * _desiredOutputFormat.channelCount();
|
||||||
|
_spatialAudioRingBuffer.resizeForFrameSize(numFrameSamples);
|
||||||
|
_spatialAudioStart = _spatialAudioFinish = 0;
|
||||||
|
|
||||||
supportedFormat = true;
|
supportedFormat = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,8 +54,6 @@ public:
|
||||||
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }
|
void setJitterBufferSamples(int samples) { _jitterBufferSamples = samples; }
|
||||||
int getJitterBufferSamples() { return _jitterBufferSamples; }
|
int getJitterBufferSamples() { return _jitterBufferSamples; }
|
||||||
|
|
||||||
void lowPassFilter(int16_t* inputBuffer);
|
|
||||||
|
|
||||||
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
||||||
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
||||||
|
|
||||||
|
@ -73,15 +71,19 @@ public:
|
||||||
int getNetworkSampleRate() { return SAMPLE_RATE; }
|
int getNetworkSampleRate() { return SAMPLE_RATE; }
|
||||||
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
int getNetworkBufferLengthSamplesPerChannel() { return NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; }
|
||||||
|
|
||||||
|
bool getProcessSpatialAudio() const { return _processSpatialAudio; }
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
void start();
|
void start();
|
||||||
void stop();
|
void stop();
|
||||||
void addReceivedAudioToBuffer(const QByteArray& audioByteArray);
|
void addReceivedAudioToBuffer(const QByteArray& audioByteArray);
|
||||||
|
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
void reset();
|
void reset();
|
||||||
void toggleMute();
|
void toggleMute();
|
||||||
void toggleAudioNoiseReduction();
|
void toggleAudioNoiseReduction();
|
||||||
void toggleToneInjection();
|
void toggleToneInjection();
|
||||||
|
void toggleAudioSpatialProcessing();
|
||||||
|
|
||||||
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
||||||
|
|
||||||
|
@ -97,6 +99,8 @@ public slots:
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
bool muteToggled();
|
bool muteToggled();
|
||||||
|
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
@ -162,9 +166,15 @@ private:
|
||||||
GLuint _boxTextureId;
|
GLuint _boxTextureId;
|
||||||
QRect _iconBounds;
|
QRect _iconBounds;
|
||||||
|
|
||||||
// Audio callback in class context.
|
/// Audio callback in class context.
|
||||||
inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
|
inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
|
||||||
|
|
||||||
|
|
||||||
|
bool _processSpatialAudio; /// Process received audio by spatial audio hooks
|
||||||
|
unsigned int _spatialAudioStart; /// Start of spatial audio interval (in sample rate time base)
|
||||||
|
unsigned int _spatialAudioFinish; /// End of spatial audio interval (in sample rate time base)
|
||||||
|
AudioRingBuffer _spatialAudioRingBuffer; /// Spatially processed audio
|
||||||
|
|
||||||
// Process procedural audio by
|
// Process procedural audio by
|
||||||
// 1. Echo to the local procedural output device
|
// 1. Echo to the local procedural output device
|
||||||
// 2. Mix with the audio input
|
// 2. Mix with the audio input
|
||||||
|
|
762
interface/src/AudioReflector.cpp
Normal file
762
interface/src/AudioReflector.cpp
Normal file
|
@ -0,0 +1,762 @@
|
||||||
|
//
|
||||||
|
// AudioReflector.cpp
|
||||||
|
// interface
|
||||||
|
//
|
||||||
|
// Created by Brad Hefta-Gaub on 4/2/2014
|
||||||
|
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <QMutexLocker>
|
||||||
|
|
||||||
|
#include "AudioReflector.h"
|
||||||
|
#include "Menu.h"
|
||||||
|
|
||||||
|
const float DEFAULT_PRE_DELAY = 20.0f; // this delay in msecs will always be added to all reflections
|
||||||
|
const float DEFAULT_MS_DELAY_PER_METER = 3.0f;
|
||||||
|
const float MINIMUM_ATTENUATION_TO_REFLECT = 1.0f / 256.0f;
|
||||||
|
const float DEFAULT_DISTANCE_SCALING_FACTOR = 2.0f;
|
||||||
|
const float MAXIMUM_DELAY_MS = 1000.0 * 20.0f; // stop reflecting after path is this long
|
||||||
|
const int DEFAULT_DIFFUSION_FANOUT = 5;
|
||||||
|
const int ABSOLUTE_MAXIMUM_BOUNCE_COUNT = 10;
|
||||||
|
const float DEFAULT_LOCAL_ATTENUATION_FACTOR = 0.125;
|
||||||
|
const float DEFAULT_COMB_FILTER_WINDOW = 0.05f; //ms delay differential to avoid
|
||||||
|
|
||||||
|
const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on the inside of the reflection point
|
||||||
|
|
||||||
|
const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed
|
||||||
|
const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused
|
||||||
|
|
||||||
|
AudioReflector::AudioReflector(QObject* parent) :
|
||||||
|
QObject(parent),
|
||||||
|
_preDelay(DEFAULT_PRE_DELAY),
|
||||||
|
_soundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
|
||||||
|
_distanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR),
|
||||||
|
_localAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
|
||||||
|
_combFilterWindow(DEFAULT_COMB_FILTER_WINDOW),
|
||||||
|
_diffusionFanout(DEFAULT_DIFFUSION_FANOUT),
|
||||||
|
_absorptionRatio(DEFAULT_ABSORPTION_RATIO),
|
||||||
|
_diffusionRatio(DEFAULT_DIFFUSION_RATIO),
|
||||||
|
_withDiffusion(false),
|
||||||
|
_lastPreDelay(DEFAULT_PRE_DELAY),
|
||||||
|
_lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER),
|
||||||
|
_lastDistanceAttenuationScalingFactor(DEFAULT_DISTANCE_SCALING_FACTOR),
|
||||||
|
_lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR),
|
||||||
|
_lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT),
|
||||||
|
_lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO),
|
||||||
|
_lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO)
|
||||||
|
{
|
||||||
|
_reflections = 0;
|
||||||
|
_diffusionPathCount = 0;
|
||||||
|
_averageAttenuation = 0.0f;
|
||||||
|
_maxAttenuation = 0.0f;
|
||||||
|
_minAttenuation = 0.0f;
|
||||||
|
_averageDelay = 0;
|
||||||
|
_maxDelay = 0;
|
||||||
|
_minDelay = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioReflector::haveAttributesChanged() {
|
||||||
|
bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
|
||||||
|
|
||||||
|
bool attributesChange = (_withDiffusion != withDiffusion
|
||||||
|
|| _lastPreDelay != _preDelay
|
||||||
|
|| _lastSoundMsPerMeter != _soundMsPerMeter
|
||||||
|
|| _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor
|
||||||
|
|| _lastDiffusionFanout != _diffusionFanout
|
||||||
|
|| _lastAbsorptionRatio != _absorptionRatio
|
||||||
|
|| _lastDiffusionRatio != _diffusionRatio);
|
||||||
|
|
||||||
|
if (attributesChange) {
|
||||||
|
_withDiffusion = withDiffusion;
|
||||||
|
_lastPreDelay = _preDelay;
|
||||||
|
_lastSoundMsPerMeter = _soundMsPerMeter;
|
||||||
|
_lastDistanceAttenuationScalingFactor = _distanceAttenuationScalingFactor;
|
||||||
|
_lastDiffusionFanout = _diffusionFanout;
|
||||||
|
_lastAbsorptionRatio = _absorptionRatio;
|
||||||
|
_lastDiffusionRatio = _diffusionRatio;
|
||||||
|
}
|
||||||
|
|
||||||
|
return attributesChange;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::render() {
|
||||||
|
|
||||||
|
// if we're not set up yet, or we're not processing spatial audio, then exit early
|
||||||
|
if (!_myAvatar || !_audio->getProcessSpatialAudio()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// use this oportunity to calculate our reflections
|
||||||
|
calculateAllReflections();
|
||||||
|
|
||||||
|
// only render if we've been asked to do so
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingRenderPaths)) {
|
||||||
|
drawRays();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// delay = 1ms per foot
|
||||||
|
// = 3ms per meter
|
||||||
|
float AudioReflector::getDelayFromDistance(float distance) {
|
||||||
|
float delay = (_soundMsPerMeter * distance);
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay)) {
|
||||||
|
delay += _preDelay;
|
||||||
|
}
|
||||||
|
return delay;
|
||||||
|
}
|
||||||
|
|
||||||
|
// attenuation = from the Audio Mixer
|
||||||
|
float AudioReflector::getDistanceAttenuationCoefficient(float distance) {
|
||||||
|
const float DISTANCE_SCALE = 2.5f;
|
||||||
|
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||||
|
const float DISTANCE_LOG_BASE = 2.5f;
|
||||||
|
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
||||||
|
|
||||||
|
float distanceSquareToSource = distance * distance;
|
||||||
|
|
||||||
|
// calculate the distance coefficient using the distance to this node
|
||||||
|
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
||||||
|
DISTANCE_SCALE_LOG +
|
||||||
|
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||||
|
|
||||||
|
distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor());
|
||||||
|
|
||||||
|
return distanceCoefficient;
|
||||||
|
}
|
||||||
|
|
||||||
|
glm::vec3 AudioReflector::getFaceNormal(BoxFace face) {
|
||||||
|
bool wantSlightRandomness = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces);
|
||||||
|
glm::vec3 faceNormal;
|
||||||
|
const float MIN_RANDOM_LENGTH = 0.99f;
|
||||||
|
const float MAX_RANDOM_LENGTH = 1.0f;
|
||||||
|
const float NON_RANDOM_LENGTH = 1.0f;
|
||||||
|
float normalLength = wantSlightRandomness ? randFloatInRange(MIN_RANDOM_LENGTH, MAX_RANDOM_LENGTH) : NON_RANDOM_LENGTH;
|
||||||
|
float remainder = (1.0f - normalLength)/2.0f;
|
||||||
|
float remainderSignA = randomSign();
|
||||||
|
float remainderSignB = randomSign();
|
||||||
|
|
||||||
|
if (face == MIN_X_FACE) {
|
||||||
|
faceNormal = glm::vec3(-normalLength, remainder * remainderSignA, remainder * remainderSignB);
|
||||||
|
} else if (face == MAX_X_FACE) {
|
||||||
|
faceNormal = glm::vec3(normalLength, remainder * remainderSignA, remainder * remainderSignB);
|
||||||
|
} else if (face == MIN_Y_FACE) {
|
||||||
|
faceNormal = glm::vec3(remainder * remainderSignA, -normalLength, remainder * remainderSignB);
|
||||||
|
} else if (face == MAX_Y_FACE) {
|
||||||
|
faceNormal = glm::vec3(remainder * remainderSignA, normalLength, remainder * remainderSignB);
|
||||||
|
} else if (face == MIN_Z_FACE) {
|
||||||
|
faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -normalLength);
|
||||||
|
} else if (face == MAX_Z_FACE) {
|
||||||
|
faceNormal = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, normalLength);
|
||||||
|
}
|
||||||
|
return faceNormal;
|
||||||
|
}
|
||||||
|
|
||||||
|
// set up our buffers for our attenuated and delayed samples
|
||||||
|
const int NUMBER_OF_CHANNELS = 2;
|
||||||
|
|
||||||
|
void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint,
|
||||||
|
const QByteArray& samples, unsigned int sampleTime, int sampleRate) {
|
||||||
|
|
||||||
|
bool wantEarSeparation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars);
|
||||||
|
bool wantStereo = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource);
|
||||||
|
glm::vec3 rightEarPosition = wantEarSeparation ? _myAvatar->getHead()->getRightEarPosition() :
|
||||||
|
_myAvatar->getHead()->getPosition();
|
||||||
|
glm::vec3 leftEarPosition = wantEarSeparation ? _myAvatar->getHead()->getLeftEarPosition() :
|
||||||
|
_myAvatar->getHead()->getPosition();
|
||||||
|
|
||||||
|
int totalNumberOfSamples = samples.size() / sizeof(int16_t);
|
||||||
|
int totalNumberOfStereoSamples = samples.size() / (sizeof(int16_t) * NUMBER_OF_CHANNELS);
|
||||||
|
|
||||||
|
const int16_t* originalSamplesData = (const int16_t*)samples.constData();
|
||||||
|
QByteArray attenuatedLeftSamples;
|
||||||
|
QByteArray attenuatedRightSamples;
|
||||||
|
attenuatedLeftSamples.resize(samples.size());
|
||||||
|
attenuatedRightSamples.resize(samples.size());
|
||||||
|
|
||||||
|
int16_t* attenuatedLeftSamplesData = (int16_t*)attenuatedLeftSamples.data();
|
||||||
|
int16_t* attenuatedRightSamplesData = (int16_t*)attenuatedRightSamples.data();
|
||||||
|
|
||||||
|
// calculate the distance to the ears
|
||||||
|
float rightEarDistance = glm::distance(audiblePoint.location, rightEarPosition);
|
||||||
|
float leftEarDistance = glm::distance(audiblePoint.location, leftEarPosition);
|
||||||
|
|
||||||
|
float rightEarDelayMsecs = getDelayFromDistance(rightEarDistance) + audiblePoint.delay;
|
||||||
|
float leftEarDelayMsecs = getDelayFromDistance(leftEarDistance) + audiblePoint.delay;
|
||||||
|
float averageEarDelayMsecs = (leftEarDelayMsecs + rightEarDelayMsecs) / 2.0f;
|
||||||
|
|
||||||
|
bool safeToInject = true; // assume the best
|
||||||
|
|
||||||
|
// check to see if this new injection point would be within the comb filter
|
||||||
|
// suppression window for any of the existing known delays
|
||||||
|
QMap<float, float>& knownDelays = (source == INBOUND_AUDIO) ? _inboundAudioDelays : _localAudioDelays;
|
||||||
|
QMap<float, float>::const_iterator lowerBound = knownDelays.lowerBound(averageEarDelayMsecs - _combFilterWindow);
|
||||||
|
if (lowerBound != knownDelays.end()) {
|
||||||
|
float closestFound = lowerBound.value();
|
||||||
|
float deltaToClosest = (averageEarDelayMsecs - closestFound);
|
||||||
|
if (deltaToClosest > -_combFilterWindow && deltaToClosest < _combFilterWindow) {
|
||||||
|
safeToInject = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// keep track of any of our suppressed echoes so we can report them in our statistics
|
||||||
|
if (!safeToInject) {
|
||||||
|
QVector<float>& suppressedEchoes = (source == INBOUND_AUDIO) ? _inboundEchoesSuppressed : _localEchoesSuppressed;
|
||||||
|
suppressedEchoes << averageEarDelayMsecs;
|
||||||
|
} else {
|
||||||
|
knownDelays[averageEarDelayMsecs] = averageEarDelayMsecs;
|
||||||
|
|
||||||
|
_totalDelay += rightEarDelayMsecs + leftEarDelayMsecs;
|
||||||
|
_delayCount += 2;
|
||||||
|
_maxDelay = std::max(_maxDelay,rightEarDelayMsecs);
|
||||||
|
_maxDelay = std::max(_maxDelay,leftEarDelayMsecs);
|
||||||
|
_minDelay = std::min(_minDelay,rightEarDelayMsecs);
|
||||||
|
_minDelay = std::min(_minDelay,leftEarDelayMsecs);
|
||||||
|
|
||||||
|
int rightEarDelay = rightEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
||||||
|
int leftEarDelay = leftEarDelayMsecs * sampleRate / MSECS_PER_SECOND;
|
||||||
|
|
||||||
|
float rightEarAttenuation = audiblePoint.attenuation *
|
||||||
|
getDistanceAttenuationCoefficient(rightEarDistance + audiblePoint.distance);
|
||||||
|
|
||||||
|
float leftEarAttenuation = audiblePoint.attenuation *
|
||||||
|
getDistanceAttenuationCoefficient(leftEarDistance + audiblePoint.distance);
|
||||||
|
|
||||||
|
_totalAttenuation += rightEarAttenuation + leftEarAttenuation;
|
||||||
|
_attenuationCount += 2;
|
||||||
|
_maxAttenuation = std::max(_maxAttenuation,rightEarAttenuation);
|
||||||
|
_maxAttenuation = std::max(_maxAttenuation,leftEarAttenuation);
|
||||||
|
_minAttenuation = std::min(_minAttenuation,rightEarAttenuation);
|
||||||
|
_minAttenuation = std::min(_minAttenuation,leftEarAttenuation);
|
||||||
|
|
||||||
|
// run through the samples, and attenuate them
|
||||||
|
for (int sample = 0; sample < totalNumberOfStereoSamples; sample++) {
|
||||||
|
int16_t leftSample = originalSamplesData[sample * NUMBER_OF_CHANNELS];
|
||||||
|
int16_t rightSample = leftSample;
|
||||||
|
if (wantStereo) {
|
||||||
|
rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation;
|
||||||
|
attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0;
|
||||||
|
|
||||||
|
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0;
|
||||||
|
attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation;
|
||||||
|
}
|
||||||
|
|
||||||
|
// now inject the attenuated array with the appropriate delay
|
||||||
|
unsigned int sampleTimeLeft = sampleTime + leftEarDelay;
|
||||||
|
unsigned int sampleTimeRight = sampleTime + rightEarDelay;
|
||||||
|
|
||||||
|
_audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples);
|
||||||
|
_audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
|
||||||
|
const int NUM_CHANNELS_INPUT = 1;
|
||||||
|
const int NUM_CHANNELS_OUTPUT = 2;
|
||||||
|
const int EXPECTED_SAMPLE_RATE = 24000;
|
||||||
|
if (format.channelCount() == NUM_CHANNELS_INPUT && format.sampleRate() == EXPECTED_SAMPLE_RATE) {
|
||||||
|
QAudioFormat outputFormat = format;
|
||||||
|
outputFormat.setChannelCount(NUM_CHANNELS_OUTPUT);
|
||||||
|
QByteArray stereoInputData(samples.size() * NUM_CHANNELS_OUTPUT, 0);
|
||||||
|
int numberOfSamples = (samples.size() / sizeof(int16_t));
|
||||||
|
int16_t* monoSamples = (int16_t*)samples.data();
|
||||||
|
int16_t* stereoSamples = (int16_t*)stereoInputData.data();
|
||||||
|
|
||||||
|
for (int i = 0; i < numberOfSamples; i++) {
|
||||||
|
stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor;
|
||||||
|
stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor;
|
||||||
|
}
|
||||||
|
_localAudioDelays.clear();
|
||||||
|
_localEchoesSuppressed.clear();
|
||||||
|
echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||||
|
_inboundAudioDelays.clear();
|
||||||
|
_inboundEchoesSuppressed.clear();
|
||||||
|
echoAudio(INBOUND_AUDIO, sampleTime, samples, format);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
|
||||||
|
_maxDelay = 0;
|
||||||
|
_maxAttenuation = 0.0f;
|
||||||
|
_minDelay = std::numeric_limits<int>::max();
|
||||||
|
_minAttenuation = std::numeric_limits<float>::max();
|
||||||
|
_totalDelay = 0.0f;
|
||||||
|
_delayCount = 0;
|
||||||
|
_totalAttenuation = 0.0f;
|
||||||
|
_attenuationCount = 0;
|
||||||
|
|
||||||
|
QMutexLocker locker(&_mutex);
|
||||||
|
|
||||||
|
// depending on if we're processing local or external audio, pick the correct points vector
|
||||||
|
QVector<AudiblePoint>& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
|
||||||
|
|
||||||
|
foreach(const AudiblePoint& audiblePoint, audiblePoints) {
|
||||||
|
injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate());
|
||||||
|
}
|
||||||
|
|
||||||
|
_averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount;
|
||||||
|
_averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount;
|
||||||
|
|
||||||
|
if (_reflections == 0) {
|
||||||
|
_minDelay = 0.0f;
|
||||||
|
_minAttenuation = 0.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) {
|
||||||
|
glDisable(GL_LIGHTING);
|
||||||
|
glLineWidth(2.0);
|
||||||
|
|
||||||
|
// Draw the vector itself
|
||||||
|
glBegin(GL_LINES);
|
||||||
|
glColor3f(color.x,color.y,color.z);
|
||||||
|
glVertex3f(start.x, start.y, start.z);
|
||||||
|
glVertex3f(end.x, end.y, end.z);
|
||||||
|
glEnd();
|
||||||
|
|
||||||
|
glEnable(GL_LIGHTING);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
AudioPath::AudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& direction,
|
||||||
|
float attenuation, float delay, float distance,bool isDiffusion, int bounceCount) :
|
||||||
|
|
||||||
|
source(source),
|
||||||
|
isDiffusion(isDiffusion),
|
||||||
|
startPoint(origin),
|
||||||
|
startDirection(direction),
|
||||||
|
startDelay(delay),
|
||||||
|
startAttenuation(attenuation),
|
||||||
|
|
||||||
|
lastPoint(origin),
|
||||||
|
lastDirection(direction),
|
||||||
|
lastDistance(distance),
|
||||||
|
lastDelay(delay),
|
||||||
|
lastAttenuation(attenuation),
|
||||||
|
bounceCount(bounceCount),
|
||||||
|
|
||||||
|
finalized(false),
|
||||||
|
reflections()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection,
|
||||||
|
float initialAttenuation, float initialDelay, float initialDistance, bool isDiffusion) {
|
||||||
|
|
||||||
|
AudioPath* path = new AudioPath(source, origin, initialDirection, initialAttenuation, initialDelay,
|
||||||
|
initialDistance, isDiffusion, 0);
|
||||||
|
|
||||||
|
QVector<AudioPath*>& audioPaths = source == INBOUND_AUDIO ? _inboundAudioPaths : _localAudioPaths;
|
||||||
|
|
||||||
|
audioPaths.push_back(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::calculateAllReflections() {
|
||||||
|
// only recalculate when we've moved, or if the attributes have changed
|
||||||
|
// TODO: what about case where new voxels are added in front of us???
|
||||||
|
bool wantHeadOrientation = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingHeadOriented);
|
||||||
|
glm::quat orientation = wantHeadOrientation ? _myAvatar->getHead()->getFinalOrientation() : _myAvatar->getOrientation();
|
||||||
|
glm::vec3 origin = _myAvatar->getHead()->getPosition();
|
||||||
|
glm::vec3 listenerPosition = _myAvatar->getHead()->getPosition();
|
||||||
|
|
||||||
|
bool shouldRecalc = _reflections == 0
|
||||||
|
|| !isSimilarPosition(origin, _origin)
|
||||||
|
|| !isSimilarOrientation(orientation, _orientation)
|
||||||
|
|| !isSimilarPosition(listenerPosition, _listenerPosition)
|
||||||
|
|| haveAttributesChanged();
|
||||||
|
|
||||||
|
if (shouldRecalc) {
|
||||||
|
QMutexLocker locker(&_mutex);
|
||||||
|
quint64 start = usecTimestampNow();
|
||||||
|
_origin = origin;
|
||||||
|
_orientation = orientation;
|
||||||
|
_listenerPosition = listenerPosition;
|
||||||
|
analyzePaths(); // actually does the work
|
||||||
|
quint64 end = usecTimestampNow();
|
||||||
|
const bool wantDebugging = false;
|
||||||
|
if (wantDebugging) {
|
||||||
|
qDebug() << "newCalculateAllReflections() elapsed=" << (end - start);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::drawRays() {
|
||||||
|
const glm::vec3 RED(1,0,0);
|
||||||
|
const glm::vec3 GREEN(0,1,0);
|
||||||
|
const glm::vec3 BLUE(0,0,1);
|
||||||
|
const glm::vec3 CYAN(0,1,1);
|
||||||
|
|
||||||
|
int diffusionNumber = 0;
|
||||||
|
|
||||||
|
QMutexLocker locker(&_mutex);
|
||||||
|
|
||||||
|
// draw the paths for inbound audio
|
||||||
|
foreach(AudioPath* const& path, _inboundAudioPaths) {
|
||||||
|
// if this is an original reflection, draw it in RED
|
||||||
|
if (path->isDiffusion) {
|
||||||
|
diffusionNumber++;
|
||||||
|
drawPath(path, GREEN);
|
||||||
|
} else {
|
||||||
|
drawPath(path, RED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) {
|
||||||
|
// draw the paths for local audio
|
||||||
|
foreach(AudioPath* const& path, _localAudioPaths) {
|
||||||
|
// if this is an original reflection, draw it in RED
|
||||||
|
if (path->isDiffusion) {
|
||||||
|
diffusionNumber++;
|
||||||
|
drawPath(path, CYAN);
|
||||||
|
} else {
|
||||||
|
drawPath(path, BLUE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::drawPath(AudioPath* path, const glm::vec3& originalColor) {
|
||||||
|
glm::vec3 start = path->startPoint;
|
||||||
|
glm::vec3 color = originalColor;
|
||||||
|
const float COLOR_ADJUST_PER_BOUNCE = 0.75f;
|
||||||
|
|
||||||
|
foreach (glm::vec3 end, path->reflections) {
|
||||||
|
drawVector(start, end, color);
|
||||||
|
start = end;
|
||||||
|
color = color * COLOR_ADJUST_PER_BOUNCE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::clearPaths() {
|
||||||
|
// clear our inbound audio paths
|
||||||
|
foreach(AudioPath* const& path, _inboundAudioPaths) {
|
||||||
|
delete path;
|
||||||
|
}
|
||||||
|
_inboundAudioPaths.clear();
|
||||||
|
_inboundAudiblePoints.clear(); // clear our inbound audible points
|
||||||
|
|
||||||
|
// clear our local audio paths
|
||||||
|
foreach(AudioPath* const& path, _localAudioPaths) {
|
||||||
|
delete path;
|
||||||
|
}
|
||||||
|
_localAudioPaths.clear();
|
||||||
|
_localAudiblePoints.clear(); // clear our local audible points
|
||||||
|
}
|
||||||
|
|
||||||
|
// Here's how this works: we have an array of AudioPaths, we loop on all of our currently calculating audio
|
||||||
|
// paths, and calculate one ray per path. If that ray doesn't reflect, or reaches a max distance/attenuation, then it
|
||||||
|
// is considered finalized.
|
||||||
|
// If the ray hits a surface, then, based on the characteristics of that surface, it will calculate the new
|
||||||
|
// attenuation, path length, and delay for the primary path. For surfaces that have diffusion, it will also create
|
||||||
|
// fanout number of new paths, those new paths will have an origin of the reflection point, and an initial attenuation
|
||||||
|
// of their diffusion ratio. Those new paths will be added to the active audio paths, and be analyzed for the next loop.
|
||||||
|
void AudioReflector::analyzePaths() {
|
||||||
|
clearPaths();
|
||||||
|
|
||||||
|
// add our initial paths
|
||||||
|
glm::vec3 right = glm::normalize(_orientation * IDENTITY_RIGHT);
|
||||||
|
glm::vec3 up = glm::normalize(_orientation * IDENTITY_UP);
|
||||||
|
glm::vec3 front = glm::normalize(_orientation * IDENTITY_FRONT);
|
||||||
|
glm::vec3 left = -right;
|
||||||
|
glm::vec3 down = -up;
|
||||||
|
glm::vec3 back = -front;
|
||||||
|
glm::vec3 frontRightUp = glm::normalize(front + right + up);
|
||||||
|
glm::vec3 frontLeftUp = glm::normalize(front + left + up);
|
||||||
|
glm::vec3 backRightUp = glm::normalize(back + right + up);
|
||||||
|
glm::vec3 backLeftUp = glm::normalize(back + left + up);
|
||||||
|
glm::vec3 frontRightDown = glm::normalize(front + right + down);
|
||||||
|
glm::vec3 frontLeftDown = glm::normalize(front + left + down);
|
||||||
|
glm::vec3 backRightDown = glm::normalize(back + right + down);
|
||||||
|
glm::vec3 backLeftDown = glm::normalize(back + left + down);
|
||||||
|
|
||||||
|
float initialAttenuation = 1.0f;
|
||||||
|
|
||||||
|
float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ? _preDelay : 0.0f;
|
||||||
|
|
||||||
|
// NOTE: we're still calculating our initial paths based on the listeners position. But the analysis code has been
|
||||||
|
// updated to support individual sound sources (which is how we support diffusion), we can use this new paradigm to
|
||||||
|
// add support for individual sound sources, and more directional sound sources
|
||||||
|
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, front, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, right, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, up, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, down, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, back, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, left, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, backRightUp, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, backLeftUp, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, backRightDown, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(INBOUND_AUDIO, _origin, backLeftDown, initialAttenuation, preDelay);
|
||||||
|
|
||||||
|
// the original paths for the local audio are directional to the front of the origin
|
||||||
|
addAudioPath(LOCAL_AUDIO, _origin, front, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(LOCAL_AUDIO, _origin, frontRightUp, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(LOCAL_AUDIO, _origin, frontLeftUp, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(LOCAL_AUDIO, _origin, frontRightDown, initialAttenuation, preDelay);
|
||||||
|
addAudioPath(LOCAL_AUDIO, _origin, frontLeftDown, initialAttenuation, preDelay);
|
||||||
|
|
||||||
|
// loop through all our audio paths and keep analyzing them until they complete
|
||||||
|
int steps = 0;
|
||||||
|
int acitvePaths = _inboundAudioPaths.size() + _localAudioPaths.size(); // when we start, all paths are active
|
||||||
|
while(acitvePaths > 0) {
|
||||||
|
acitvePaths = analyzePathsSingleStep();
|
||||||
|
steps++;
|
||||||
|
}
|
||||||
|
_reflections = _inboundAudiblePoints.size() + _localAudiblePoints.size();
|
||||||
|
_diffusionPathCount = countDiffusionPaths();
|
||||||
|
}
|
||||||
|
|
||||||
|
int AudioReflector::countDiffusionPaths() {
|
||||||
|
int diffusionCount = 0;
|
||||||
|
|
||||||
|
foreach(AudioPath* const& path, _inboundAudioPaths) {
|
||||||
|
if (path->isDiffusion) {
|
||||||
|
diffusionCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
foreach(AudioPath* const& path, _localAudioPaths) {
|
||||||
|
if (path->isDiffusion) {
|
||||||
|
diffusionCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return diffusionCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
int AudioReflector::analyzePathsSingleStep() {
|
||||||
|
// iterate all the active sound paths, calculate one step per active path
|
||||||
|
int activePaths = 0;
|
||||||
|
|
||||||
|
QVector<AudioPath*>* pathsLists[] = { &_inboundAudioPaths, &_localAudioPaths };
|
||||||
|
|
||||||
|
for(unsigned int i = 0; i < sizeof(pathsLists) / sizeof(pathsLists[0]); i++) {
|
||||||
|
|
||||||
|
QVector<AudioPath*>& pathList = *pathsLists[i];
|
||||||
|
|
||||||
|
foreach(AudioPath* const& path, pathList) {
|
||||||
|
|
||||||
|
glm::vec3 start = path->lastPoint;
|
||||||
|
glm::vec3 direction = path->lastDirection;
|
||||||
|
OctreeElement* elementHit; // output from findRayIntersection
|
||||||
|
float distance; // output from findRayIntersection
|
||||||
|
BoxFace face; // output from findRayIntersection
|
||||||
|
|
||||||
|
if (!path->finalized) {
|
||||||
|
activePaths++;
|
||||||
|
|
||||||
|
if (path->bounceCount > ABSOLUTE_MAXIMUM_BOUNCE_COUNT) {
|
||||||
|
path->finalized = true;
|
||||||
|
} else if (_voxels->findRayIntersection(start, direction, elementHit, distance, face)) {
|
||||||
|
// TODO: we need to decide how we want to handle locking on the ray intersection, if we force lock,
|
||||||
|
// we get an accurate picture, but it could prevent rendering of the voxels. If we trylock (default),
|
||||||
|
// we might not get ray intersections where they may exist, but we can't really detect that case...
|
||||||
|
// add last parameter of Octree::Lock to force locking
|
||||||
|
handlePathPoint(path, distance, elementHit, face);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// If we didn't intersect, but this was a diffusion ray, then we will go ahead and cast a short ray out
|
||||||
|
// from our last known point, in the last known direction, and leave that sound source hanging there
|
||||||
|
if (path->isDiffusion) {
|
||||||
|
const float MINIMUM_RANDOM_DISTANCE = 0.25f;
|
||||||
|
const float MAXIMUM_RANDOM_DISTANCE = 0.5f;
|
||||||
|
float distance = randFloatInRange(MINIMUM_RANDOM_DISTANCE, MAXIMUM_RANDOM_DISTANCE);
|
||||||
|
handlePathPoint(path, distance, NULL, UNKNOWN_FACE);
|
||||||
|
} else {
|
||||||
|
path->finalized = true; // if it doesn't intersect, then it is finished
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return activePaths;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face) {
|
||||||
|
glm::vec3 start = path->lastPoint;
|
||||||
|
glm::vec3 direction = path->lastDirection;
|
||||||
|
glm::vec3 end = start + (direction * (distance * SLIGHTLY_SHORT));
|
||||||
|
|
||||||
|
float currentReflectiveAttenuation = path->lastAttenuation; // only the reflective components
|
||||||
|
float currentDelay = path->lastDelay; // start with our delay so far
|
||||||
|
float pathDistance = path->lastDistance;
|
||||||
|
|
||||||
|
pathDistance += glm::distance(start, end);
|
||||||
|
|
||||||
|
float toListenerDistance = glm::distance(end, _listenerPosition);
|
||||||
|
|
||||||
|
// adjust our current delay by just the delay from the most recent ray
|
||||||
|
currentDelay += getDelayFromDistance(distance);
|
||||||
|
|
||||||
|
// now we know the current attenuation for the "perfect" reflection case, but we now incorporate
|
||||||
|
// our surface materials to determine how much of this ray is absorbed, reflected, and diffused
|
||||||
|
SurfaceCharacteristics material = getSurfaceCharacteristics(elementHit);
|
||||||
|
|
||||||
|
float reflectiveAttenuation = currentReflectiveAttenuation * material.reflectiveRatio;
|
||||||
|
float totalDiffusionAttenuation = currentReflectiveAttenuation * material.diffusionRatio;
|
||||||
|
|
||||||
|
bool wantDiffusions = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
|
||||||
|
int fanout = wantDiffusions ? _diffusionFanout : 0;
|
||||||
|
|
||||||
|
float partialDiffusionAttenuation = fanout < 1 ? 0.0f : totalDiffusionAttenuation / (float)fanout;
|
||||||
|
|
||||||
|
// total delay includes the bounce back to listener
|
||||||
|
float totalDelay = currentDelay + getDelayFromDistance(toListenerDistance);
|
||||||
|
float toListenerAttenuation = getDistanceAttenuationCoefficient(toListenerDistance + pathDistance);
|
||||||
|
|
||||||
|
// if our resulting partial diffusion attenuation, is still above our minimum attenuation
|
||||||
|
// then we add new paths for each diffusion point
|
||||||
|
if ((partialDiffusionAttenuation * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
|
||||||
|
&& totalDelay < MAXIMUM_DELAY_MS) {
|
||||||
|
|
||||||
|
// diffusions fan out from random places on the semisphere of the collision point
|
||||||
|
for(int i = 0; i < fanout; i++) {
|
||||||
|
glm::vec3 diffusion;
|
||||||
|
|
||||||
|
// We're creating a random normal here. But we want it to be relatively dramatic compared to how we handle
|
||||||
|
// our slightly random surface normals.
|
||||||
|
const float MINIMUM_RANDOM_LENGTH = 0.5f;
|
||||||
|
const float MAXIMUM_RANDOM_LENGTH = 1.0f;
|
||||||
|
float randomness = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
||||||
|
float remainder = (1.0f - randomness)/2.0f;
|
||||||
|
float remainderSignA = randomSign();
|
||||||
|
float remainderSignB = randomSign();
|
||||||
|
|
||||||
|
if (face == MIN_X_FACE) {
|
||||||
|
diffusion = glm::vec3(-randomness, remainder * remainderSignA, remainder * remainderSignB);
|
||||||
|
} else if (face == MAX_X_FACE) {
|
||||||
|
diffusion = glm::vec3(randomness, remainder * remainderSignA, remainder * remainderSignB);
|
||||||
|
} else if (face == MIN_Y_FACE) {
|
||||||
|
diffusion = glm::vec3(remainder * remainderSignA, -randomness, remainder * remainderSignB);
|
||||||
|
} else if (face == MAX_Y_FACE) {
|
||||||
|
diffusion = glm::vec3(remainder * remainderSignA, randomness, remainder * remainderSignB);
|
||||||
|
} else if (face == MIN_Z_FACE) {
|
||||||
|
diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, -randomness);
|
||||||
|
} else if (face == MAX_Z_FACE) {
|
||||||
|
diffusion = glm::vec3(remainder * remainderSignA, remainder * remainderSignB, randomness);
|
||||||
|
} else if (face == UNKNOWN_FACE) {
|
||||||
|
float randomnessX = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
||||||
|
float randomnessY = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
||||||
|
float randomnessZ = randFloatInRange(MINIMUM_RANDOM_LENGTH, MAXIMUM_RANDOM_LENGTH);
|
||||||
|
diffusion = glm::vec3(direction.x * randomnessX, direction.y * randomnessY, direction.z * randomnessZ);
|
||||||
|
}
|
||||||
|
|
||||||
|
diffusion = glm::normalize(diffusion);
|
||||||
|
|
||||||
|
// add new audio path for these diffusions, the new path's source is the same as the original source
|
||||||
|
addAudioPath(path->source, end, diffusion, partialDiffusionAttenuation, currentDelay, pathDistance, true);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const bool wantDebugging = false;
|
||||||
|
if (wantDebugging) {
|
||||||
|
if ((partialDiffusionAttenuation * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) {
|
||||||
|
qDebug() << "too quiet to diffuse";
|
||||||
|
qDebug() << " partialDiffusionAttenuation=" << partialDiffusionAttenuation;
|
||||||
|
qDebug() << " toListenerAttenuation=" << toListenerAttenuation;
|
||||||
|
qDebug() << " result=" << (partialDiffusionAttenuation * toListenerAttenuation);
|
||||||
|
qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT;
|
||||||
|
}
|
||||||
|
if (totalDelay > MAXIMUM_DELAY_MS) {
|
||||||
|
qDebug() << "too delayed to diffuse";
|
||||||
|
qDebug() << " totalDelay=" << totalDelay;
|
||||||
|
qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if our reflective attenuation is above our minimum, then add our reflection point and
|
||||||
|
// allow our path to continue
|
||||||
|
if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) > MINIMUM_ATTENUATION_TO_REFLECT
|
||||||
|
&& totalDelay < MAXIMUM_DELAY_MS) {
|
||||||
|
|
||||||
|
// add this location, as the reflective attenuation as well as the total diffusion attenuation
|
||||||
|
// NOTE: we add the delay to the audible point, not back to the listener. The additional delay
|
||||||
|
// and attenuation to the listener is recalculated at the point where we actually inject the
|
||||||
|
// audio so that it can be adjusted to ear position
|
||||||
|
AudiblePoint point = {end, currentDelay, (reflectiveAttenuation + totalDiffusionAttenuation), pathDistance};
|
||||||
|
|
||||||
|
QVector<AudiblePoint>& audiblePoints = path->source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints;
|
||||||
|
|
||||||
|
audiblePoints.push_back(point);
|
||||||
|
|
||||||
|
// add this location to the path points, so we can visualize it
|
||||||
|
path->reflections.push_back(end);
|
||||||
|
|
||||||
|
// now, if our reflective attenuation is over our minimum then keep going...
|
||||||
|
if (reflectiveAttenuation * toListenerAttenuation > MINIMUM_ATTENUATION_TO_REFLECT) {
|
||||||
|
glm::vec3 faceNormal = getFaceNormal(face);
|
||||||
|
path->lastDirection = glm::normalize(glm::reflect(direction,faceNormal));
|
||||||
|
path->lastPoint = end;
|
||||||
|
path->lastAttenuation = reflectiveAttenuation;
|
||||||
|
path->lastDelay = currentDelay;
|
||||||
|
path->lastDistance = pathDistance;
|
||||||
|
path->bounceCount++;
|
||||||
|
} else {
|
||||||
|
path->finalized = true; // if we're too quiet, then we're done
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const bool wantDebugging = false;
|
||||||
|
if (wantDebugging) {
|
||||||
|
if (((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation) <= MINIMUM_ATTENUATION_TO_REFLECT) {
|
||||||
|
qDebug() << "too quiet to add audible point";
|
||||||
|
qDebug() << " reflectiveAttenuation + totalDiffusionAttenuation=" << (reflectiveAttenuation + totalDiffusionAttenuation);
|
||||||
|
qDebug() << " toListenerAttenuation=" << toListenerAttenuation;
|
||||||
|
qDebug() << " result=" << ((reflectiveAttenuation + totalDiffusionAttenuation) * toListenerAttenuation);
|
||||||
|
qDebug() << " MINIMUM_ATTENUATION_TO_REFLECT=" << MINIMUM_ATTENUATION_TO_REFLECT;
|
||||||
|
}
|
||||||
|
if (totalDelay > MAXIMUM_DELAY_MS) {
|
||||||
|
qDebug() << "too delayed to add audible point";
|
||||||
|
qDebug() << " totalDelay=" << totalDelay;
|
||||||
|
qDebug() << " MAXIMUM_DELAY_MS=" << MAXIMUM_DELAY_MS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
path->finalized = true; // if we're too quiet, then we're done
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: eventually we will add support for different surface characteristics based on the element
|
||||||
|
// that is hit, which is why we pass in the elementHit to this helper function. But for now, all
|
||||||
|
// surfaces have the same characteristics
|
||||||
|
SurfaceCharacteristics AudioReflector::getSurfaceCharacteristics(OctreeElement* elementHit) {
|
||||||
|
SurfaceCharacteristics result = { getReflectiveRatio(), _absorptionRatio, _diffusionRatio };
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::setReflectiveRatio(float ratio) {
|
||||||
|
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
|
||||||
|
float currentReflectiveRatio = (1.0f - (_absorptionRatio + _diffusionRatio));
|
||||||
|
float halfDifference = (safeRatio - currentReflectiveRatio) / 2.0f;
|
||||||
|
|
||||||
|
// evenly distribute the difference between the two other ratios
|
||||||
|
_absorptionRatio -= halfDifference;
|
||||||
|
_diffusionRatio -= halfDifference;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::setAbsorptionRatio(float ratio) {
|
||||||
|
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
|
||||||
|
_absorptionRatio = safeRatio;
|
||||||
|
const float MAX_COMBINED_RATIO = 1.0f;
|
||||||
|
if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) {
|
||||||
|
_diffusionRatio = MAX_COMBINED_RATIO - _absorptionRatio;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioReflector::setDiffusionRatio(float ratio) {
|
||||||
|
float safeRatio = std::max(0.0f, std::min(ratio, 1.0f));
|
||||||
|
_diffusionRatio = safeRatio;
|
||||||
|
const float MAX_COMBINED_RATIO = 1.0f;
|
||||||
|
if (_absorptionRatio + _diffusionRatio > MAX_COMBINED_RATIO) {
|
||||||
|
_absorptionRatio = MAX_COMBINED_RATIO - _diffusionRatio;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
222
interface/src/AudioReflector.h
Normal file
222
interface/src/AudioReflector.h
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
//
|
||||||
|
// AudioReflector.h
|
||||||
|
// interface
|
||||||
|
//
|
||||||
|
// Created by Brad Hefta-Gaub on 4/2/2014
|
||||||
|
// Copyright (c) 2014 High Fidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef interface_AudioReflector_h
|
||||||
|
#define interface_AudioReflector_h
|
||||||
|
|
||||||
|
#include <QMutex>
|
||||||
|
|
||||||
|
#include <VoxelTree.h>
|
||||||
|
|
||||||
|
#include "Audio.h"
|
||||||
|
#include "avatar/MyAvatar.h"
|
||||||
|
|
||||||
|
enum AudioSource {
|
||||||
|
LOCAL_AUDIO,
|
||||||
|
INBOUND_AUDIO
|
||||||
|
};
|
||||||
|
|
||||||
|
class AudioPath {
|
||||||
|
public:
|
||||||
|
AudioPath(AudioSource source = INBOUND_AUDIO, const glm::vec3& origin = glm::vec3(0.0f),
|
||||||
|
const glm::vec3& direction = glm::vec3(0.0f), float attenuation = 1.0f,
|
||||||
|
float delay = 0.0f, float distance = 0.0f, bool isDiffusion = false, int bounceCount = 0);
|
||||||
|
|
||||||
|
AudioSource source;
|
||||||
|
bool isDiffusion;
|
||||||
|
glm::vec3 startPoint;
|
||||||
|
glm::vec3 startDirection;
|
||||||
|
float startDelay;
|
||||||
|
float startAttenuation;
|
||||||
|
|
||||||
|
glm::vec3 lastPoint;
|
||||||
|
glm::vec3 lastDirection;
|
||||||
|
float lastDistance;
|
||||||
|
float lastDelay;
|
||||||
|
float lastAttenuation;
|
||||||
|
unsigned int bounceCount;
|
||||||
|
|
||||||
|
bool finalized;
|
||||||
|
QVector<glm::vec3> reflections;
|
||||||
|
};
|
||||||
|
|
||||||
|
class AudiblePoint {
|
||||||
|
public:
|
||||||
|
glm::vec3 location; /// location of the audible point
|
||||||
|
float delay; /// includes total delay including pre delay to the point of the audible location, not to the listener's ears
|
||||||
|
float attenuation; /// only the reflective & diffusive portion of attenuation, doesn't include distance attenuation
|
||||||
|
float distance; /// includes total distance to the point of the audible location, not to the listener's ears
|
||||||
|
};
|
||||||
|
|
||||||
|
class SurfaceCharacteristics {
|
||||||
|
public:
|
||||||
|
float reflectiveRatio;
|
||||||
|
float absorptionRatio;
|
||||||
|
float diffusionRatio;
|
||||||
|
};
|
||||||
|
|
||||||
|
class AudioReflector : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
AudioReflector(QObject* parent = NULL);
|
||||||
|
|
||||||
|
// setup functions to configure the resources used by the AudioReflector
|
||||||
|
void setVoxels(VoxelTree* voxels) { _voxels = voxels; }
|
||||||
|
void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; }
|
||||||
|
void setAudio(Audio* audio) { _audio = audio; }
|
||||||
|
|
||||||
|
void render(); /// must be called in the application render loop
|
||||||
|
|
||||||
|
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
|
||||||
|
public slots:
|
||||||
|
// statistics
|
||||||
|
int getReflections() const { return _reflections; }
|
||||||
|
float getAverageDelayMsecs() const { return _averageDelay; }
|
||||||
|
float getAverageAttenuation() const { return _averageAttenuation; }
|
||||||
|
float getMaxDelayMsecs() const { return _maxDelay; }
|
||||||
|
float getMaxAttenuation() const { return _maxAttenuation; }
|
||||||
|
float getMinDelayMsecs() const { return _minDelay; }
|
||||||
|
float getMinAttenuation() const { return _minAttenuation; }
|
||||||
|
float getDelayFromDistance(float distance);
|
||||||
|
int getDiffusionPathCount() const { return _diffusionPathCount; }
|
||||||
|
int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); }
|
||||||
|
int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); }
|
||||||
|
|
||||||
|
/// ms of delay added to all echos
|
||||||
|
float getPreDelay() const { return _preDelay; }
|
||||||
|
void setPreDelay(float preDelay) { _preDelay = preDelay; }
|
||||||
|
|
||||||
|
/// ms per meter that sound travels, larger means slower, which sounds bigger
|
||||||
|
float getSoundMsPerMeter() const { return _soundMsPerMeter; }
|
||||||
|
void setSoundMsPerMeter(float soundMsPerMeter) { _soundMsPerMeter = soundMsPerMeter; }
|
||||||
|
|
||||||
|
/// scales attenuation to be louder or softer than the default distance attenuation
|
||||||
|
float getDistanceAttenuationScalingFactor() const { return _distanceAttenuationScalingFactor; }
|
||||||
|
void setDistanceAttenuationScalingFactor(float factor) { _distanceAttenuationScalingFactor = factor; }
|
||||||
|
|
||||||
|
/// scales attenuation of local audio to be louder or softer than the default attenuation
|
||||||
|
float getLocalAudioAttenuationFactor() const { return _localAudioAttenuationFactor; }
|
||||||
|
void setLocalAudioAttenuationFactor(float factor) { _localAudioAttenuationFactor = factor; }
|
||||||
|
|
||||||
|
/// ms window in which we will suppress echoes to reduce comb filter effects
|
||||||
|
float getCombFilterWindow() const { return _combFilterWindow; }
|
||||||
|
void setCombFilterWindow(float value) { _combFilterWindow = value; }
|
||||||
|
|
||||||
|
/// number of points of diffusion from each reflection point, as fanout increases there are more chances for secondary
|
||||||
|
/// echoes, but each diffusion ray is quieter and therefore more likely to be below the sound floor
|
||||||
|
int getDiffusionFanout() const { return _diffusionFanout; }
|
||||||
|
void setDiffusionFanout(int fanout) { _diffusionFanout = fanout; }
|
||||||
|
|
||||||
|
/// ratio 0.0 - 1.0 of amount of each ray that is absorbed upon hitting a surface
|
||||||
|
float getAbsorptionRatio() const { return _absorptionRatio; }
|
||||||
|
void setAbsorptionRatio(float ratio);
|
||||||
|
|
||||||
|
// ratio 0.0 - 1.0 of amount of each ray that is diffused upon hitting a surface
|
||||||
|
float getDiffusionRatio() const { return _diffusionRatio; }
|
||||||
|
void setDiffusionRatio(float ratio);
|
||||||
|
|
||||||
|
// remaining ratio 0.0 - 1.0 of amount of each ray that is cleanly reflected upon hitting a surface
|
||||||
|
float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); }
|
||||||
|
void setReflectiveRatio(float ratio);
|
||||||
|
|
||||||
|
signals:
|
||||||
|
|
||||||
|
private:
|
||||||
|
VoxelTree* _voxels; // used to access voxel scene
|
||||||
|
MyAvatar* _myAvatar; // access to listener
|
||||||
|
Audio* _audio; // access to audio API
|
||||||
|
|
||||||
|
// Helpers for drawing
|
||||||
|
void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color);
|
||||||
|
|
||||||
|
// helper for generically calculating attenuation based on distance
|
||||||
|
float getDistanceAttenuationCoefficient(float distance);
|
||||||
|
|
||||||
|
// statistics
|
||||||
|
int _reflections;
|
||||||
|
int _diffusionPathCount;
|
||||||
|
int _delayCount;
|
||||||
|
float _totalDelay;
|
||||||
|
float _averageDelay;
|
||||||
|
float _maxDelay;
|
||||||
|
float _minDelay;
|
||||||
|
int _attenuationCount;
|
||||||
|
float _totalAttenuation;
|
||||||
|
float _averageAttenuation;
|
||||||
|
float _maxAttenuation;
|
||||||
|
float _minAttenuation;
|
||||||
|
|
||||||
|
glm::vec3 _listenerPosition;
|
||||||
|
glm::vec3 _origin;
|
||||||
|
glm::quat _orientation;
|
||||||
|
|
||||||
|
QVector<AudioPath*> _inboundAudioPaths; /// audio paths we're processing for inbound audio
|
||||||
|
QVector<AudiblePoint> _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths
|
||||||
|
QMap<float, float> _inboundAudioDelays; /// delay times for currently injected audio points
|
||||||
|
QVector<float> _inboundEchoesSuppressed; /// delay times for currently injected audio points
|
||||||
|
|
||||||
|
QVector<AudioPath*> _localAudioPaths; /// audio paths we're processing for local audio
|
||||||
|
QVector<AudiblePoint> _localAudiblePoints; /// the audible points that have been calculated from the local audio paths
|
||||||
|
QMap<float, float> _localAudioDelays; /// delay times for currently injected audio points
|
||||||
|
QVector<float> _localEchoesSuppressed; /// delay times for currently injected audio points
|
||||||
|
|
||||||
|
// adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties,
|
||||||
|
// as well as diffusion sound sources
|
||||||
|
void addAudioPath(AudioSource source, const glm::vec3& origin, const glm::vec3& initialDirection, float initialAttenuation,
|
||||||
|
float initialDelay, float initialDistance = 0.0f, bool isDiffusion = false);
|
||||||
|
|
||||||
|
// helper that handles audioPath analysis
|
||||||
|
int analyzePathsSingleStep();
|
||||||
|
void handlePathPoint(AudioPath* path, float distance, OctreeElement* elementHit, BoxFace face);
|
||||||
|
void clearPaths();
|
||||||
|
void analyzePaths();
|
||||||
|
void drawRays();
|
||||||
|
void drawPath(AudioPath* path, const glm::vec3& originalColor);
|
||||||
|
void calculateAllReflections();
|
||||||
|
int countDiffusionPaths();
|
||||||
|
glm::vec3 getFaceNormal(BoxFace face);
|
||||||
|
|
||||||
|
void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate);
|
||||||
|
void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
|
||||||
|
// return the surface characteristics of the element we hit
|
||||||
|
SurfaceCharacteristics getSurfaceCharacteristics(OctreeElement* elementHit = NULL);
|
||||||
|
|
||||||
|
|
||||||
|
QMutex _mutex;
|
||||||
|
|
||||||
|
float _preDelay;
|
||||||
|
float _soundMsPerMeter;
|
||||||
|
float _distanceAttenuationScalingFactor;
|
||||||
|
float _localAudioAttenuationFactor;
|
||||||
|
float _combFilterWindow;
|
||||||
|
|
||||||
|
int _diffusionFanout; // number of points of diffusion from each reflection point
|
||||||
|
|
||||||
|
// all elements have the same material for now...
|
||||||
|
float _absorptionRatio;
|
||||||
|
float _diffusionRatio;
|
||||||
|
float _reflectiveRatio;
|
||||||
|
|
||||||
|
// remember the last known values at calculation
|
||||||
|
bool haveAttributesChanged();
|
||||||
|
|
||||||
|
bool _withDiffusion;
|
||||||
|
float _lastPreDelay;
|
||||||
|
float _lastSoundMsPerMeter;
|
||||||
|
float _lastDistanceAttenuationScalingFactor;
|
||||||
|
float _lastLocalAudioAttenuationFactor;
|
||||||
|
int _lastDiffusionFanout;
|
||||||
|
float _lastAbsorptionRatio;
|
||||||
|
float _lastDiffusionRatio;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#endif // interface_AudioReflector_h
|
|
@ -25,7 +25,6 @@
|
||||||
#include <QMessageBox>
|
#include <QMessageBox>
|
||||||
#include <QShortcut>
|
#include <QShortcut>
|
||||||
#include <QSlider>
|
#include <QSlider>
|
||||||
#include <QStandardPaths>
|
|
||||||
#include <QUuid>
|
#include <QUuid>
|
||||||
#include <QHBoxLayout>
|
#include <QHBoxLayout>
|
||||||
|
|
||||||
|
@ -90,7 +89,8 @@ Menu::Menu() :
|
||||||
_fpsAverage(FIVE_SECONDS_OF_FRAMES),
|
_fpsAverage(FIVE_SECONDS_OF_FRAMES),
|
||||||
_fastFPSAverage(ONE_SECOND_OF_FRAMES),
|
_fastFPSAverage(ONE_SECOND_OF_FRAMES),
|
||||||
_loginAction(NULL),
|
_loginAction(NULL),
|
||||||
_preferencesDialog(NULL)
|
_preferencesDialog(NULL),
|
||||||
|
_snapshotsLocation()
|
||||||
{
|
{
|
||||||
Application *appInstance = Application::getInstance();
|
Application *appInstance = Application::getInstance();
|
||||||
|
|
||||||
|
@ -349,8 +349,8 @@ Menu::Menu() :
|
||||||
|
|
||||||
|
|
||||||
QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools");
|
QMenu* renderDebugMenu = developerMenu->addMenu("Render Debugging Tools");
|
||||||
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings, Qt::CTRL | Qt::SHIFT | Qt::Key_P);
|
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::PipelineWarnings);
|
||||||
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings, Qt::CTRL | Qt::SHIFT | Qt::Key_S);
|
addCheckableActionToQMenuAndActionHash(renderDebugMenu, MenuOption::SuppressShortTimings);
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
|
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
|
||||||
MenuOption::CullSharedFaces,
|
MenuOption::CullSharedFaces,
|
||||||
|
@ -361,7 +361,7 @@ Menu::Menu() :
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
|
addCheckableActionToQMenuAndActionHash(renderDebugMenu,
|
||||||
MenuOption::ShowCulledSharedFaces,
|
MenuOption::ShowCulledSharedFaces,
|
||||||
Qt::CTRL | Qt::SHIFT | Qt::Key_X,
|
0,
|
||||||
false,
|
false,
|
||||||
appInstance->getVoxels(),
|
appInstance->getVoxels(),
|
||||||
SLOT(showCulledSharedFaces()));
|
SLOT(showCulledSharedFaces()));
|
||||||
|
@ -385,6 +385,50 @@ Menu::Menu() :
|
||||||
appInstance->getAudio(),
|
appInstance->getAudio(),
|
||||||
SLOT(toggleToneInjection()));
|
SLOT(toggleToneInjection()));
|
||||||
|
|
||||||
|
QMenu* spatialAudioMenu = audioDebugMenu->addMenu("Spatial Audio");
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessing,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_M,
|
||||||
|
false,
|
||||||
|
appInstance->getAudio(),
|
||||||
|
SLOT(toggleAudioSpatialProcessing()));
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingIncludeOriginal,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_O,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSeparateEars,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_E,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingPreDelay,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_D,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingStereoSource,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_S,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingHeadOriented,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_H,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingWithDiffusions,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_W,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingRenderPaths,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_R,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_X,
|
||||||
|
true);
|
||||||
|
|
||||||
|
addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingProcessLocalAudio,
|
||||||
|
Qt::CTRL | Qt::SHIFT | Qt::Key_A,
|
||||||
|
true);
|
||||||
|
|
||||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||||
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
||||||
this,
|
this,
|
||||||
|
@ -423,6 +467,8 @@ void Menu::loadSettings(QSettings* settings) {
|
||||||
_avatarLODDistanceMultiplier = loadSetting(settings, "avatarLODDistanceMultiplier",
|
_avatarLODDistanceMultiplier = loadSetting(settings, "avatarLODDistanceMultiplier",
|
||||||
DEFAULT_AVATAR_LOD_DISTANCE_MULTIPLIER);
|
DEFAULT_AVATAR_LOD_DISTANCE_MULTIPLIER);
|
||||||
_boundaryLevelAdjust = loadSetting(settings, "boundaryLevelAdjust", 0);
|
_boundaryLevelAdjust = loadSetting(settings, "boundaryLevelAdjust", 0);
|
||||||
|
_snapshotsLocation = settings->value("snapshotsLocation",
|
||||||
|
QStandardPaths::writableLocation(QStandardPaths::DesktopLocation)).toString();
|
||||||
|
|
||||||
settings->beginGroup("View Frustum Offset Camera");
|
settings->beginGroup("View Frustum Offset Camera");
|
||||||
// in case settings is corrupt or missing loadSetting() will check for NaN
|
// in case settings is corrupt or missing loadSetting() will check for NaN
|
||||||
|
@ -466,6 +512,7 @@ void Menu::saveSettings(QSettings* settings) {
|
||||||
settings->setValue("avatarLODIncreaseFPS", _avatarLODIncreaseFPS);
|
settings->setValue("avatarLODIncreaseFPS", _avatarLODIncreaseFPS);
|
||||||
settings->setValue("avatarLODDistanceMultiplier", _avatarLODDistanceMultiplier);
|
settings->setValue("avatarLODDistanceMultiplier", _avatarLODDistanceMultiplier);
|
||||||
settings->setValue("boundaryLevelAdjust", _boundaryLevelAdjust);
|
settings->setValue("boundaryLevelAdjust", _boundaryLevelAdjust);
|
||||||
|
settings->setValue("snapshotsLocation", _snapshotsLocation);
|
||||||
settings->beginGroup("View Frustum Offset Camera");
|
settings->beginGroup("View Frustum Offset Camera");
|
||||||
settings->setValue("viewFrustumOffsetYaw", _viewFrustumOffset.yaw);
|
settings->setValue("viewFrustumOffsetYaw", _viewFrustumOffset.yaw);
|
||||||
settings->setValue("viewFrustumOffsetPitch", _viewFrustumOffset.pitch);
|
settings->setValue("viewFrustumOffsetPitch", _viewFrustumOffset.pitch);
|
||||||
|
@ -1074,18 +1121,12 @@ void Menu::showChat() {
|
||||||
mainWindow->addDockWidget(Qt::RightDockWidgetArea, _chatWindow = new ChatWindow());
|
mainWindow->addDockWidget(Qt::RightDockWidgetArea, _chatWindow = new ChatWindow());
|
||||||
}
|
}
|
||||||
if (!_chatWindow->toggleViewAction()->isChecked()) {
|
if (!_chatWindow->toggleViewAction()->isChecked()) {
|
||||||
int width = _chatWindow->width();
|
const QRect& windowGeometry = mainWindow->geometry();
|
||||||
int y = qMax((mainWindow->height() - _chatWindow->height()) / 2, 0);
|
_chatWindow->move(windowGeometry.topRight().x() - _chatWindow->width(),
|
||||||
_chatWindow->move(mainWindow->width(), y);
|
windowGeometry.topRight().y() + (windowGeometry.height() / 2) - (_chatWindow->height() / 2));
|
||||||
|
|
||||||
_chatWindow->resize(0, _chatWindow->height());
|
_chatWindow->resize(0, _chatWindow->height());
|
||||||
_chatWindow->toggleViewAction()->trigger();
|
_chatWindow->toggleViewAction()->trigger();
|
||||||
|
|
||||||
QPropertyAnimation* slideAnimation = new QPropertyAnimation(_chatWindow, "geometry", _chatWindow);
|
|
||||||
slideAnimation->setStartValue(_chatWindow->geometry());
|
|
||||||
slideAnimation->setEndValue(QRect(mainWindow->width() - width, _chatWindow->y(),
|
|
||||||
width, _chatWindow->height()));
|
|
||||||
slideAnimation->setDuration(250);
|
|
||||||
slideAnimation->start(QAbstractAnimation::DeleteWhenStopped);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1519,3 +1560,10 @@ void Menu::removeMenuItem(const QString& menu, const QString& menuitem) {
|
||||||
QMenuBar::repaint();
|
QMenuBar::repaint();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
QString Menu::getSnapshotsLocation() const {
|
||||||
|
if (_snapshotsLocation.isNull() || _snapshotsLocation.isEmpty() || QDir(_snapshotsLocation).exists() == false) {
|
||||||
|
return QStandardPaths::writableLocation(QStandardPaths::DesktopLocation);
|
||||||
|
}
|
||||||
|
return _snapshotsLocation;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,10 +12,12 @@
|
||||||
#ifndef hifi_Menu_h
|
#ifndef hifi_Menu_h
|
||||||
#define hifi_Menu_h
|
#define hifi_Menu_h
|
||||||
|
|
||||||
|
#include <QDir>
|
||||||
#include <QMenuBar>
|
#include <QMenuBar>
|
||||||
#include <QHash>
|
#include <QHash>
|
||||||
#include <QKeySequence>
|
#include <QKeySequence>
|
||||||
#include <QPointer>
|
#include <QPointer>
|
||||||
|
#include <QStandardPaths>
|
||||||
|
|
||||||
#include <EventTypes.h>
|
#include <EventTypes.h>
|
||||||
#include <MenuItemProperties.h>
|
#include <MenuItemProperties.h>
|
||||||
|
@ -83,6 +85,9 @@ public:
|
||||||
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
|
void setFieldOfView(float fieldOfView) { _fieldOfView = fieldOfView; }
|
||||||
float getFaceshiftEyeDeflection() const { return _faceshiftEyeDeflection; }
|
float getFaceshiftEyeDeflection() const { return _faceshiftEyeDeflection; }
|
||||||
void setFaceshiftEyeDeflection(float faceshiftEyeDeflection) { _faceshiftEyeDeflection = faceshiftEyeDeflection; }
|
void setFaceshiftEyeDeflection(float faceshiftEyeDeflection) { _faceshiftEyeDeflection = faceshiftEyeDeflection; }
|
||||||
|
QString getSnapshotsLocation() const;
|
||||||
|
void setSnapshotsLocation(QString snapshotsLocation) { _snapshotsLocation = snapshotsLocation; }
|
||||||
|
|
||||||
BandwidthDialog* getBandwidthDialog() const { return _bandwidthDialog; }
|
BandwidthDialog* getBandwidthDialog() const { return _bandwidthDialog; }
|
||||||
FrustumDrawMode getFrustumDrawMode() const { return _frustumDrawMode; }
|
FrustumDrawMode getFrustumDrawMode() const { return _frustumDrawMode; }
|
||||||
ViewFrustumOffset getViewFrustumOffset() const { return _viewFrustumOffset; }
|
ViewFrustumOffset getViewFrustumOffset() const { return _viewFrustumOffset; }
|
||||||
|
@ -243,6 +248,7 @@ private:
|
||||||
QAction* _loginAction;
|
QAction* _loginAction;
|
||||||
QPointer<PreferencesDialog> _preferencesDialog;
|
QPointer<PreferencesDialog> _preferencesDialog;
|
||||||
QAction* _chatAction;
|
QAction* _chatAction;
|
||||||
|
QString _snapshotsLocation;
|
||||||
};
|
};
|
||||||
|
|
||||||
namespace MenuOption {
|
namespace MenuOption {
|
||||||
|
@ -251,6 +257,18 @@ namespace MenuOption {
|
||||||
const QString Atmosphere = "Atmosphere";
|
const QString Atmosphere = "Atmosphere";
|
||||||
const QString AudioNoiseReduction = "Audio Noise Reduction";
|
const QString AudioNoiseReduction = "Audio Noise Reduction";
|
||||||
const QString AudioToneInjection = "Inject Test Tone";
|
const QString AudioToneInjection = "Inject Test Tone";
|
||||||
|
|
||||||
|
const QString AudioSpatialProcessing = "Audio Spatial Processing";
|
||||||
|
const QString AudioSpatialProcessingHeadOriented = "Head Oriented";
|
||||||
|
const QString AudioSpatialProcessingIncludeOriginal = "Includes Network Original";
|
||||||
|
const QString AudioSpatialProcessingPreDelay = "Add Pre-Delay";
|
||||||
|
const QString AudioSpatialProcessingProcessLocalAudio = "Process Local Audio";
|
||||||
|
const QString AudioSpatialProcessingRenderPaths = "Render Paths";
|
||||||
|
const QString AudioSpatialProcessingSeparateEars = "Separate Ears";
|
||||||
|
const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces";
|
||||||
|
const QString AudioSpatialProcessingStereoSource = "Stereo Source";
|
||||||
|
const QString AudioSpatialProcessingWithDiffusions = "With Diffusions";
|
||||||
|
|
||||||
const QString Avatars = "Avatars";
|
const QString Avatars = "Avatars";
|
||||||
const QString Bandwidth = "Bandwidth Display";
|
const QString Bandwidth = "Bandwidth Display";
|
||||||
const QString BandwidthDetails = "Bandwidth Details";
|
const QString BandwidthDetails = "Bandwidth Details";
|
||||||
|
|
|
@ -30,6 +30,8 @@ enum eyeContactTargets {
|
||||||
MOUTH
|
MOUTH
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const float EYE_EAR_GAP = 0.08f;
|
||||||
|
|
||||||
class Avatar;
|
class Avatar;
|
||||||
class ProgramObject;
|
class ProgramObject;
|
||||||
|
|
||||||
|
@ -73,6 +75,11 @@ public:
|
||||||
|
|
||||||
glm::quat getEyeRotation(const glm::vec3& eyePosition) const;
|
glm::quat getEyeRotation(const glm::vec3& eyePosition) const;
|
||||||
|
|
||||||
|
const glm::vec3& getRightEyePosition() const { return _rightEyePosition; }
|
||||||
|
const glm::vec3& getLeftEyePosition() const { return _leftEyePosition; }
|
||||||
|
glm::vec3 getRightEarPosition() const { return _rightEyePosition + (getRightDirection() * EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); }
|
||||||
|
glm::vec3 getLeftEarPosition() const { return _leftEyePosition + (getRightDirection() * -EYE_EAR_GAP) + (getFrontDirection() * -EYE_EAR_GAP); }
|
||||||
|
|
||||||
FaceModel& getFaceModel() { return _faceModel; }
|
FaceModel& getFaceModel() { return _faceModel; }
|
||||||
const FaceModel& getFaceModel() const { return _faceModel; }
|
const FaceModel& getFaceModel() const { return _faceModel; }
|
||||||
|
|
||||||
|
|
44
interface/src/ui/ChatMessageArea.cpp
Normal file
44
interface/src/ui/ChatMessageArea.cpp
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
//
|
||||||
|
// ChatMessageArea.cpp
|
||||||
|
// interface/src/ui
|
||||||
|
//
|
||||||
|
// Created by Ryan Huffman on 4/11/14.
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "ChatMessageArea.h"
|
||||||
|
#include <QAbstractTextDocumentLayout>
|
||||||
|
#include <QWheelEvent>
|
||||||
|
|
||||||
|
ChatMessageArea::ChatMessageArea() : QTextBrowser() {
|
||||||
|
connect(document()->documentLayout(), &QAbstractTextDocumentLayout::documentSizeChanged,
|
||||||
|
this, &ChatMessageArea::updateLayout);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatMessageArea::setHtml(const QString& html) {
|
||||||
|
// Create format with updated line height
|
||||||
|
QTextBlockFormat format;
|
||||||
|
format.setLineHeight(CHAT_MESSAGE_LINE_HEIGHT, QTextBlockFormat::ProportionalHeight);
|
||||||
|
|
||||||
|
// Possibly a bug in QT, the format won't take effect if `insertHtml` is used first. Inserting a space and deleting
|
||||||
|
// it after ensures the format is applied.
|
||||||
|
QTextCursor cursor = textCursor();
|
||||||
|
cursor.setBlockFormat(format);
|
||||||
|
cursor.insertText(" ");
|
||||||
|
cursor.insertHtml(html);
|
||||||
|
cursor.setPosition(0);
|
||||||
|
cursor.deleteChar();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatMessageArea::updateLayout() {
|
||||||
|
setFixedHeight(document()->size().height());
|
||||||
|
updateGeometry();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatMessageArea::wheelEvent(QWheelEvent* event) {
|
||||||
|
// Capture wheel events to stop Ctrl-WheelUp/Down zooming
|
||||||
|
event->ignore();
|
||||||
|
}
|
33
interface/src/ui/ChatMessageArea.h
Normal file
33
interface/src/ui/ChatMessageArea.h
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
//
|
||||||
|
// ChatMessageArea.h
|
||||||
|
// interface/src/ui
|
||||||
|
//
|
||||||
|
// Created by Ryan Huffman on 4/11/14.
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_ChatMessageArea_h
|
||||||
|
#define hifi_ChatMessageArea_h
|
||||||
|
|
||||||
|
#include <QTextBrowser>
|
||||||
|
|
||||||
|
const int CHAT_MESSAGE_LINE_HEIGHT = 130;
|
||||||
|
|
||||||
|
class ChatMessageArea : public QTextBrowser {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
ChatMessageArea();
|
||||||
|
virtual void setHtml(const QString& html);
|
||||||
|
|
||||||
|
public slots:
|
||||||
|
void updateLayout();
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual void wheelEvent(QWheelEvent* event);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_ChatMessageArea_h
|
|
@ -24,6 +24,7 @@
|
||||||
#include "qtimespan.h"
|
#include "qtimespan.h"
|
||||||
#include "ui_chatWindow.h"
|
#include "ui_chatWindow.h"
|
||||||
#include "XmppClient.h"
|
#include "XmppClient.h"
|
||||||
|
#include "ChatMessageArea.h"
|
||||||
|
|
||||||
#include "ChatWindow.h"
|
#include "ChatWindow.h"
|
||||||
|
|
||||||
|
@ -33,7 +34,9 @@ const QRegularExpression regexLinks("((?:(?:ftp)|(?:https?))://\\S+)");
|
||||||
|
|
||||||
ChatWindow::ChatWindow() :
|
ChatWindow::ChatWindow() :
|
||||||
ui(new Ui::ChatWindow),
|
ui(new Ui::ChatWindow),
|
||||||
numMessagesAfterLastTimeStamp(0)
|
numMessagesAfterLastTimeStamp(0),
|
||||||
|
_mousePressed(false),
|
||||||
|
_mouseStartPosition()
|
||||||
{
|
{
|
||||||
ui->setupUi(this);
|
ui->setupUi(this);
|
||||||
|
|
||||||
|
@ -86,6 +89,25 @@ ChatWindow::~ChatWindow() {
|
||||||
delete ui;
|
delete ui;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ChatWindow::mousePressEvent(QMouseEvent *e) {
|
||||||
|
if (e->button() == Qt::LeftButton && isFloating()) {
|
||||||
|
_mousePressed = true;
|
||||||
|
_mouseStartPosition = e->pos();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatWindow::mouseMoveEvent(QMouseEvent *e) {
|
||||||
|
if (_mousePressed) {
|
||||||
|
move(mapToParent(e->pos() - _mouseStartPosition));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChatWindow::mouseReleaseEvent( QMouseEvent *e ) {
|
||||||
|
if ( e->button() == Qt::LeftButton ) {
|
||||||
|
_mousePressed = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ChatWindow::keyPressEvent(QKeyEvent* event) {
|
void ChatWindow::keyPressEvent(QKeyEvent* event) {
|
||||||
QDockWidget::keyPressEvent(event);
|
QDockWidget::keyPressEvent(event);
|
||||||
if (event->key() == Qt::Key_Escape) {
|
if (event->key() == Qt::Key_Escape) {
|
||||||
|
@ -158,8 +180,18 @@ void ChatWindow::addTimeStamp() {
|
||||||
"padding: 4px;");
|
"padding: 4px;");
|
||||||
timeLabel->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Preferred);
|
timeLabel->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Preferred);
|
||||||
timeLabel->setAlignment(Qt::AlignHCenter);
|
timeLabel->setAlignment(Qt::AlignHCenter);
|
||||||
|
|
||||||
|
bool atBottom = isAtBottom();
|
||||||
|
|
||||||
ui->messagesGridLayout->addWidget(timeLabel, ui->messagesGridLayout->rowCount(), 0, 1, 2);
|
ui->messagesGridLayout->addWidget(timeLabel, ui->messagesGridLayout->rowCount(), 0, 1, 2);
|
||||||
|
ui->messagesGridLayout->parentWidget()->updateGeometry();
|
||||||
|
|
||||||
|
Application::processEvents();
|
||||||
numMessagesAfterLastTimeStamp = 0;
|
numMessagesAfterLastTimeStamp = 0;
|
||||||
|
|
||||||
|
if (atBottom) {
|
||||||
|
scrollToBottom();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,25 +267,41 @@ void ChatWindow::messageReceived(const QXmppMessage& message) {
|
||||||
userLabel->setStyleSheet("padding: 2px; font-weight: bold");
|
userLabel->setStyleSheet("padding: 2px; font-weight: bold");
|
||||||
userLabel->setAlignment(Qt::AlignTop | Qt::AlignRight);
|
userLabel->setAlignment(Qt::AlignTop | Qt::AlignRight);
|
||||||
|
|
||||||
QLabel* messageLabel = new QLabel(message.body().replace(regexLinks, "<a href=\"\\1\">\\1</a>"));
|
ChatMessageArea* messageArea = new ChatMessageArea();
|
||||||
messageLabel->setWordWrap(true);
|
|
||||||
messageLabel->setTextInteractionFlags(Qt::TextBrowserInteraction);
|
|
||||||
messageLabel->setOpenExternalLinks(true);
|
|
||||||
messageLabel->setStyleSheet("padding-bottom: 2px; padding-left: 2px; padding-top: 2px; padding-right: 20px");
|
|
||||||
messageLabel->setAlignment(Qt::AlignTop | Qt::AlignLeft);
|
|
||||||
|
|
||||||
if (getParticipantName(message.from()) == AccountManager::getInstance().getUsername()) {
|
messageArea->setOpenLinks(true);
|
||||||
|
messageArea->setOpenExternalLinks(true);
|
||||||
|
messageArea->setWordWrapMode(QTextOption::WrapAtWordBoundaryOrAnywhere);
|
||||||
|
messageArea->setTextInteractionFlags(Qt::TextBrowserInteraction);
|
||||||
|
messageArea->setVerticalScrollBarPolicy(Qt::ScrollBarAlwaysOff);
|
||||||
|
messageArea->setHorizontalScrollBarPolicy(Qt::ScrollBarAlwaysOff);
|
||||||
|
messageArea->setReadOnly(true);
|
||||||
|
|
||||||
|
messageArea->setStyleSheet("padding-bottom: 2px;"
|
||||||
|
"padding-left: 2px;"
|
||||||
|
"padding-top: 2px;"
|
||||||
|
"padding-right: 20px;"
|
||||||
|
"background-color: rgba(0, 0, 0, 0%);"
|
||||||
|
"border: 0;");
|
||||||
|
|
||||||
|
bool fromSelf = getParticipantName(message.from()) == AccountManager::getInstance().getUsername();
|
||||||
|
if (fromSelf) {
|
||||||
userLabel->setStyleSheet(userLabel->styleSheet() + "; background-color: #e1e8ea");
|
userLabel->setStyleSheet(userLabel->styleSheet() + "; background-color: #e1e8ea");
|
||||||
messageLabel->setStyleSheet(messageLabel->styleSheet() + "; background-color: #e1e8ea");
|
messageArea->setStyleSheet(messageArea->styleSheet() + "; background-color: #e1e8ea");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
messageArea->setHtml(message.body().replace(regexLinks, "<a href=\"\\1\">\\1</a>"));
|
||||||
|
|
||||||
|
bool atBottom = isAtBottom();
|
||||||
ui->messagesGridLayout->addWidget(userLabel, ui->messagesGridLayout->rowCount(), 0);
|
ui->messagesGridLayout->addWidget(userLabel, ui->messagesGridLayout->rowCount(), 0);
|
||||||
ui->messagesGridLayout->addWidget(messageLabel, ui->messagesGridLayout->rowCount() - 1, 1);
|
ui->messagesGridLayout->addWidget(messageArea, ui->messagesGridLayout->rowCount() - 1, 1);
|
||||||
|
|
||||||
ui->messagesGridLayout->parentWidget()->updateGeometry();
|
ui->messagesGridLayout->parentWidget()->updateGeometry();
|
||||||
Application::processEvents();
|
Application::processEvents();
|
||||||
QScrollBar* verticalScrollBar = ui->messagesScrollArea->verticalScrollBar();
|
|
||||||
verticalScrollBar->setSliderPosition(verticalScrollBar->maximum());
|
if (atBottom || fromSelf) {
|
||||||
messageLabel->updateGeometry();
|
scrollToBottom();
|
||||||
|
}
|
||||||
|
|
||||||
++numMessagesAfterLastTimeStamp;
|
++numMessagesAfterLastTimeStamp;
|
||||||
if (message.stamp().isValid()) {
|
if (message.stamp().isValid()) {
|
||||||
|
@ -265,6 +313,17 @@ void ChatWindow::messageReceived(const QXmppMessage& message) {
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
bool ChatWindow::isAtBottom() {
|
||||||
|
QScrollBar* verticalScrollBar = ui->messagesScrollArea->verticalScrollBar();
|
||||||
|
return verticalScrollBar->sliderPosition() == verticalScrollBar->maximum();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scroll chat message area to bottom.
|
||||||
|
void ChatWindow::scrollToBottom() {
|
||||||
|
QScrollBar* verticalScrollBar = ui->messagesScrollArea->verticalScrollBar();
|
||||||
|
verticalScrollBar->setSliderPosition(verticalScrollBar->maximum());
|
||||||
|
}
|
||||||
|
|
||||||
void ChatWindow::togglePinned() {
|
void ChatWindow::togglePinned() {
|
||||||
QMainWindow* mainWindow = Application::getInstance()->getWindow();
|
QMainWindow* mainWindow = Application::getInstance()->getWindow();
|
||||||
mainWindow->removeDockWidget(this);
|
mainWindow->removeDockWidget(this);
|
||||||
|
@ -276,4 +335,4 @@ void ChatWindow::togglePinned() {
|
||||||
}
|
}
|
||||||
this->setFloating(!ui->togglePinnedButton->isChecked());
|
this->setFloating(!ui->togglePinnedButton->isChecked());
|
||||||
setTitleBarWidget(ui->togglePinnedButton->isChecked()?new QWidget():titleBar);
|
setTitleBarWidget(ui->togglePinnedButton->isChecked()?new QWidget():titleBar);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,10 @@ public:
|
||||||
virtual void keyPressEvent(QKeyEvent *event);
|
virtual void keyPressEvent(QKeyEvent *event);
|
||||||
virtual void showEvent(QShowEvent* event);
|
virtual void showEvent(QShowEvent* event);
|
||||||
|
|
||||||
|
virtual void mousePressEvent(QMouseEvent *e);
|
||||||
|
virtual void mouseMoveEvent(QMouseEvent *e);
|
||||||
|
virtual void mouseReleaseEvent(QMouseEvent *e);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool eventFilter(QObject* sender, QEvent* event);
|
bool eventFilter(QObject* sender, QEvent* event);
|
||||||
|
|
||||||
|
@ -48,11 +52,15 @@ private:
|
||||||
#endif
|
#endif
|
||||||
void startTimerForTimeStamps();
|
void startTimerForTimeStamps();
|
||||||
void addTimeStamp();
|
void addTimeStamp();
|
||||||
|
bool isAtBottom();
|
||||||
|
void scrollToBottom();
|
||||||
|
|
||||||
Ui::ChatWindow* ui;
|
Ui::ChatWindow* ui;
|
||||||
QWidget* titleBar;
|
QWidget* titleBar;
|
||||||
int numMessagesAfterLastTimeStamp;
|
int numMessagesAfterLastTimeStamp;
|
||||||
QDateTime lastMessageStamp;
|
QDateTime lastMessageStamp;
|
||||||
|
bool _mousePressed;
|
||||||
|
QPoint _mouseStartPosition;
|
||||||
|
|
||||||
private slots:
|
private slots:
|
||||||
void connected();
|
void connected();
|
||||||
|
|
|
@ -28,6 +28,7 @@ PreferencesDialog::PreferencesDialog(QWidget* parent, Qt::WindowFlags flags) : F
|
||||||
|
|
||||||
connect(ui.buttonBrowseHead, &QPushButton::clicked, this, &PreferencesDialog::openHeadModelBrowser);
|
connect(ui.buttonBrowseHead, &QPushButton::clicked, this, &PreferencesDialog::openHeadModelBrowser);
|
||||||
connect(ui.buttonBrowseBody, &QPushButton::clicked, this, &PreferencesDialog::openBodyModelBrowser);
|
connect(ui.buttonBrowseBody, &QPushButton::clicked, this, &PreferencesDialog::openBodyModelBrowser);
|
||||||
|
connect(ui.buttonBrowseLocation, &QPushButton::clicked, this, &PreferencesDialog::openSnapshotLocationBrowser);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PreferencesDialog::accept() {
|
void PreferencesDialog::accept() {
|
||||||
|
@ -59,6 +60,17 @@ void PreferencesDialog::openBodyModelBrowser() {
|
||||||
modelBrowser.browse();
|
modelBrowser.browse();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PreferencesDialog::openSnapshotLocationBrowser() {
|
||||||
|
setWindowFlags(windowFlags() & ~Qt::WindowStaysOnTopHint);
|
||||||
|
QString dir = QFileDialog::getExistingDirectory(this, tr("Snapshots Location"),
|
||||||
|
QStandardPaths::writableLocation(QStandardPaths::DesktopLocation),
|
||||||
|
QFileDialog::ShowDirsOnly | QFileDialog::DontResolveSymlinks);
|
||||||
|
if (!dir.isNull() && !dir.isEmpty()) {
|
||||||
|
ui.snapshotLocationEdit->setText(dir);
|
||||||
|
}
|
||||||
|
setWindowFlags(windowFlags() | Qt::WindowStaysOnTopHint);
|
||||||
|
}
|
||||||
|
|
||||||
void PreferencesDialog::resizeEvent(QResizeEvent *resizeEvent) {
|
void PreferencesDialog::resizeEvent(QResizeEvent *resizeEvent) {
|
||||||
|
|
||||||
// keep buttons panel at the bottom
|
// keep buttons panel at the bottom
|
||||||
|
@ -94,6 +106,8 @@ void PreferencesDialog::loadPreferences() {
|
||||||
_skeletonURLString = myAvatar->getSkeletonModel().getURL().toString();
|
_skeletonURLString = myAvatar->getSkeletonModel().getURL().toString();
|
||||||
ui.skeletonURLEdit->setText(_skeletonURLString);
|
ui.skeletonURLEdit->setText(_skeletonURLString);
|
||||||
|
|
||||||
|
ui.snapshotLocationEdit->setText(menuInstance->getSnapshotsLocation());
|
||||||
|
|
||||||
ui.pupilDilationSlider->setValue(myAvatar->getHead()->getPupilDilation() *
|
ui.pupilDilationSlider->setValue(myAvatar->getHead()->getPupilDilation() *
|
||||||
ui.pupilDilationSlider->maximum());
|
ui.pupilDilationSlider->maximum());
|
||||||
|
|
||||||
|
@ -143,6 +157,10 @@ void PreferencesDialog::savePreferences() {
|
||||||
Application::getInstance()->bumpSettings();
|
Application::getInstance()->bumpSettings();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!ui.snapshotLocationEdit->text().isEmpty() && QDir(ui.snapshotLocationEdit->text()).exists()) {
|
||||||
|
Menu::getInstance()->setSnapshotsLocation(ui.snapshotLocationEdit->text());
|
||||||
|
}
|
||||||
|
|
||||||
myAvatar->getHead()->setPupilDilation(ui.pupilDilationSlider->value() / (float)ui.pupilDilationSlider->maximum());
|
myAvatar->getHead()->setPupilDilation(ui.pupilDilationSlider->value() / (float)ui.pupilDilationSlider->maximum());
|
||||||
myAvatar->setLeanScale(ui.leanScaleSpin->value());
|
myAvatar->setLeanScale(ui.leanScaleSpin->value());
|
||||||
myAvatar->setClampedTargetScale(ui.avatarScaleSpin->value());
|
myAvatar->setClampedTargetScale(ui.avatarScaleSpin->value());
|
||||||
|
|
|
@ -41,6 +41,7 @@ private slots:
|
||||||
void accept();
|
void accept();
|
||||||
void setHeadUrl(QString modelUrl);
|
void setHeadUrl(QString modelUrl);
|
||||||
void setSkeletonUrl(QString modelUrl);
|
void setSkeletonUrl(QString modelUrl);
|
||||||
|
void openSnapshotLocationBrowser();
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <FileUtils.h>
|
#include <FileUtils.h>
|
||||||
|
|
||||||
#include "Snapshot.h"
|
#include "Snapshot.h"
|
||||||
|
#include "Menu.h"
|
||||||
|
|
||||||
// filename format: hifi-snap-by-%username%-on-%date%_%time%_@-%location%.jpg
|
// filename format: hifi-snap-by-%username%-on-%date%_%time%_@-%location%.jpg
|
||||||
// %1 <= username, %2 <= date and time, %3 <= current location
|
// %1 <= username, %2 <= date and time, %3 <= current location
|
||||||
|
@ -90,8 +91,12 @@ void Snapshot::saveSnapshot(QGLWidget* widget, Avatar* avatar) {
|
||||||
username.replace(QRegExp("[^A-Za-z0-9_]"), "-");
|
username.replace(QRegExp("[^A-Za-z0-9_]"), "-");
|
||||||
|
|
||||||
QDateTime now = QDateTime::currentDateTime();
|
QDateTime now = QDateTime::currentDateTime();
|
||||||
|
QString fileName = Menu::getInstance()->getSnapshotsLocation();
|
||||||
QString fileName = FileUtils::standardPath(SNAPSHOTS_DIRECTORY);
|
|
||||||
|
if (!fileName.endsWith(QDir::separator())) {
|
||||||
|
fileName.append(QDir::separator());
|
||||||
|
}
|
||||||
|
|
||||||
fileName.append(QString(FILENAME_PATH_FORMAT.arg(username, now.toString(DATETIME_FORMAT), formattedLocation)));
|
fileName.append(QString(FILENAME_PATH_FORMAT.arg(username, now.toString(DATETIME_FORMAT), formattedLocation)));
|
||||||
shot.save(fileName, 0, 100);
|
shot.save(fileName, 0, 100);
|
||||||
}
|
}
|
||||||
|
|
|
@ -293,6 +293,7 @@ void Stats::display(
|
||||||
glm::vec3 avatarPos = myAvatar->getPosition();
|
glm::vec3 avatarPos = myAvatar->getPosition();
|
||||||
|
|
||||||
lines = _expanded ? 5 : 3;
|
lines = _expanded ? 5 : 3;
|
||||||
|
|
||||||
drawBackground(backgroundColor, horizontalOffset, 0, _geoStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
drawBackground(backgroundColor, horizontalOffset, 0, _geoStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||||
horizontalOffset += 5;
|
horizontalOffset += 5;
|
||||||
|
|
||||||
|
@ -341,6 +342,10 @@ void Stats::display(
|
||||||
VoxelSystem* voxels = Application::getInstance()->getVoxels();
|
VoxelSystem* voxels = Application::getInstance()->getVoxels();
|
||||||
|
|
||||||
lines = _expanded ? 12 : 3;
|
lines = _expanded ? 12 : 3;
|
||||||
|
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
|
||||||
|
lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info
|
||||||
|
}
|
||||||
|
|
||||||
drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
|
drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10);
|
||||||
horizontalOffset += 5;
|
horizontalOffset += 5;
|
||||||
|
|
||||||
|
@ -497,5 +502,89 @@ void Stats::display(
|
||||||
voxelStats << "LOD: You can see " << qPrintable(displayLODDetails.trimmed());
|
voxelStats << "LOD: You can see " << qPrintable(displayLODDetails.trimmed());
|
||||||
verticalOffset += STATS_PELS_PER_LINE;
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, (char*)voxelStats.str().c_str(), color);
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, (char*)voxelStats.str().c_str(), color);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) {
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE; // space one line...
|
||||||
|
|
||||||
|
const AudioReflector* audioReflector = Application::getInstance()->getAudioReflector();
|
||||||
|
|
||||||
|
// add some reflection stats
|
||||||
|
char reflectionsStatus[128];
|
||||||
|
|
||||||
|
sprintf(reflectionsStatus, "Reflections: %d, Original: %s, Ears: %s, Source: %s, Normals: %s",
|
||||||
|
audioReflector->getReflections(),
|
||||||
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)
|
||||||
|
? "included" : "silent"),
|
||||||
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSeparateEars)
|
||||||
|
? "two" : "one"),
|
||||||
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingStereoSource)
|
||||||
|
? "stereo" : "mono"),
|
||||||
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingSlightlyRandomSurfaces)
|
||||||
|
? "random" : "regular")
|
||||||
|
);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||||
|
|
||||||
|
float preDelay = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingPreDelay) ?
|
||||||
|
audioReflector->getPreDelay() : 0.0f;
|
||||||
|
|
||||||
|
sprintf(reflectionsStatus, "Delay: pre: %6.3f, average %6.3f, max %6.3f, min %6.3f, speed: %6.3f",
|
||||||
|
preDelay,
|
||||||
|
audioReflector->getAverageDelayMsecs(),
|
||||||
|
audioReflector->getMaxDelayMsecs(),
|
||||||
|
audioReflector->getMinDelayMsecs(),
|
||||||
|
audioReflector->getSoundMsPerMeter());
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||||
|
|
||||||
|
sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, Factor: %5.3f",
|
||||||
|
audioReflector->getAverageAttenuation(),
|
||||||
|
audioReflector->getMaxAttenuation(),
|
||||||
|
audioReflector->getMinAttenuation(),
|
||||||
|
audioReflector->getDistanceAttenuationScalingFactor());
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||||
|
|
||||||
|
sprintf(reflectionsStatus, "Local Audio: %s Attenuation: %5.3f",
|
||||||
|
(Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
|
||||||
|
? "yes" : "no"),
|
||||||
|
audioReflector->getLocalAudioAttenuationFactor());
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||||
|
|
||||||
|
bool diffusionEnabled = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions);
|
||||||
|
int fanout = diffusionEnabled ? audioReflector->getDiffusionFanout() : 0;
|
||||||
|
int diffusionPaths = diffusionEnabled ? audioReflector->getDiffusionPathCount() : 0;
|
||||||
|
sprintf(reflectionsStatus, "Diffusion: %s, Fanout: %d, Paths: %d",
|
||||||
|
(diffusionEnabled ? "yes" : "no"), fanout, diffusionPaths);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||||
|
|
||||||
|
const float AS_PERCENT = 100.0f;
|
||||||
|
float reflectiveRatio = audioReflector->getReflectiveRatio() * AS_PERCENT;
|
||||||
|
float diffusionRatio = audioReflector->getDiffusionRatio() * AS_PERCENT;
|
||||||
|
float absorptionRatio = audioReflector->getAbsorptionRatio() * AS_PERCENT;
|
||||||
|
sprintf(reflectionsStatus, "Ratios: Reflective: %5.3f, Diffusion: %5.3f, Absorption: %5.3f",
|
||||||
|
reflectiveRatio, diffusionRatio, absorptionRatio);
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||||
|
|
||||||
|
sprintf(reflectionsStatus, "Comb Filter Window: %5.3f ms, Allowed: %d, Suppressed: %d",
|
||||||
|
audioReflector->getCombFilterWindow(),
|
||||||
|
audioReflector->getEchoesInjected(),
|
||||||
|
audioReflector->getEchoesSuppressed());
|
||||||
|
|
||||||
|
verticalOffset += STATS_PELS_PER_LINE;
|
||||||
|
drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,13 +135,23 @@
|
||||||
<property name="focusPolicy">
|
<property name="focusPolicy">
|
||||||
<enum>Qt::NoFocus</enum>
|
<enum>Qt::NoFocus</enum>
|
||||||
</property>
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">QPushButton {
|
||||||
|
background-color: rgba( 0, 0, 0, 0% );
|
||||||
|
border: none;
|
||||||
|
image: url(../resources/images/close.svg)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
QPushButton:pressed {
|
||||||
|
background-color: rgba( 0, 0, 0, 0% );
|
||||||
|
border: none;
|
||||||
|
image: url(../resources/images/close_down.svg)
|
||||||
|
}</string>
|
||||||
|
</property>
|
||||||
<property name="text">
|
<property name="text">
|
||||||
<string/>
|
<string/>
|
||||||
</property>
|
</property>
|
||||||
<property name="icon">
|
|
||||||
<iconset>
|
|
||||||
<normaloff>../resources/images/close.svg</normaloff>../resources/images/close.svg</iconset>
|
|
||||||
</property>
|
|
||||||
<property name="flat">
|
<property name="flat">
|
||||||
<bool>true</bool>
|
<bool>true</bool>
|
||||||
</property>
|
</property>
|
||||||
|
|
|
@ -156,7 +156,7 @@ color: #0e7077</string>
|
||||||
<x>0</x>
|
<x>0</x>
|
||||||
<y>0</y>
|
<y>0</y>
|
||||||
<width>615</width>
|
<width>615</width>
|
||||||
<height>833</height>
|
<height>936</height>
|
||||||
</rect>
|
</rect>
|
||||||
</property>
|
</property>
|
||||||
<layout class="QVBoxLayout" name="verticalLayout_2">
|
<layout class="QVBoxLayout" name="verticalLayout_2">
|
||||||
|
@ -300,7 +300,7 @@ color: #0e7077</string>
|
||||||
<number>0</number>
|
<number>0</number>
|
||||||
</property>
|
</property>
|
||||||
<property name="buddy">
|
<property name="buddy">
|
||||||
<cstring>faceURLEdit</cstring>
|
<cstring>snapshotLocationEdit</cstring>
|
||||||
</property>
|
</property>
|
||||||
</widget>
|
</widget>
|
||||||
</item>
|
</item>
|
||||||
|
@ -476,6 +476,145 @@ color: #0e7077</string>
|
||||||
</item>
|
</item>
|
||||||
</layout>
|
</layout>
|
||||||
</item>
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QLabel" name="avatarTitleLabel_2">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>0</width>
|
||||||
|
<height>40</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
<pointsize>20</pointsize>
|
||||||
|
<weight>50</weight>
|
||||||
|
<bold>false</bold>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: #0e7077</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Snapshots</string>
|
||||||
|
</property>
|
||||||
|
<property name="alignment">
|
||||||
|
<set>Qt::AlignBottom|Qt::AlignLeading|Qt::AlignLeft</set>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QLabel" name="headLabel">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>0</width>
|
||||||
|
<height>30</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
<pointsize>16</pointsize>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true">color: #0e7077</string>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string>Place my Snapshots here:</string>
|
||||||
|
</property>
|
||||||
|
<property name="alignment">
|
||||||
|
<set>Qt::AlignBottom|Qt::AlignLeading|Qt::AlignLeft</set>
|
||||||
|
</property>
|
||||||
|
<property name="margin">
|
||||||
|
<number>0</number>
|
||||||
|
</property>
|
||||||
|
<property name="buddy">
|
||||||
|
<cstring>snapshotLocationEdit</cstring>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<layout class="QHBoxLayout" name="horizontalLayout">
|
||||||
|
<item>
|
||||||
|
<widget class="QLineEdit" name="snapshotLocationEdit">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Expanding" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="font">
|
||||||
|
<font>
|
||||||
|
<family>Arial</family>
|
||||||
|
</font>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<spacer name="horizontalSpacer">
|
||||||
|
<property name="orientation">
|
||||||
|
<enum>Qt::Horizontal</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeType">
|
||||||
|
<enum>QSizePolicy::Fixed</enum>
|
||||||
|
</property>
|
||||||
|
<property name="sizeHint" stdset="0">
|
||||||
|
<size>
|
||||||
|
<width>20</width>
|
||||||
|
<height>20</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</spacer>
|
||||||
|
</item>
|
||||||
|
<item>
|
||||||
|
<widget class="QPushButton" name="buttonBrowseLocation">
|
||||||
|
<property name="sizePolicy">
|
||||||
|
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||||
|
<horstretch>0</horstretch>
|
||||||
|
<verstretch>0</verstretch>
|
||||||
|
</sizepolicy>
|
||||||
|
</property>
|
||||||
|
<property name="minimumSize">
|
||||||
|
<size>
|
||||||
|
<width>30</width>
|
||||||
|
<height>30</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="maximumSize">
|
||||||
|
<size>
|
||||||
|
<width>30</width>
|
||||||
|
<height>30</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
<property name="styleSheet">
|
||||||
|
<string notr="true"/>
|
||||||
|
</property>
|
||||||
|
<property name="text">
|
||||||
|
<string/>
|
||||||
|
</property>
|
||||||
|
<property name="iconSize">
|
||||||
|
<size>
|
||||||
|
<width>30</width>
|
||||||
|
<height>30</height>
|
||||||
|
</size>
|
||||||
|
</property>
|
||||||
|
</widget>
|
||||||
|
</item>
|
||||||
|
</layout>
|
||||||
|
</item>
|
||||||
<item>
|
<item>
|
||||||
<spacer name="verticalSpacer">
|
<spacer name="verticalSpacer">
|
||||||
<property name="orientation">
|
<property name="orientation">
|
||||||
|
|
|
@ -18,15 +18,19 @@
|
||||||
|
|
||||||
#include "AudioRingBuffer.h"
|
#include "AudioRingBuffer.h"
|
||||||
|
|
||||||
AudioRingBuffer::AudioRingBuffer(int numFrameSamples) :
|
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode) :
|
||||||
NodeData(),
|
NodeData(),
|
||||||
_sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES),
|
_sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES),
|
||||||
_numFrameSamples(numFrameSamples),
|
_numFrameSamples(numFrameSamples),
|
||||||
_isStarved(true),
|
_isStarved(true),
|
||||||
_hasStarted(false)
|
_hasStarted(false),
|
||||||
|
_randomAccessMode(randomAccessMode)
|
||||||
{
|
{
|
||||||
if (numFrameSamples) {
|
if (numFrameSamples) {
|
||||||
_buffer = new int16_t[_sampleCapacity];
|
_buffer = new int16_t[_sampleCapacity];
|
||||||
|
if (_randomAccessMode) {
|
||||||
|
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
|
||||||
|
}
|
||||||
_nextOutput = _buffer;
|
_nextOutput = _buffer;
|
||||||
_endOfLastWrite = _buffer;
|
_endOfLastWrite = _buffer;
|
||||||
} else {
|
} else {
|
||||||
|
@ -50,6 +54,9 @@ void AudioRingBuffer::resizeForFrameSize(qint64 numFrameSamples) {
|
||||||
delete[] _buffer;
|
delete[] _buffer;
|
||||||
_sampleCapacity = numFrameSamples * RING_BUFFER_LENGTH_FRAMES;
|
_sampleCapacity = numFrameSamples * RING_BUFFER_LENGTH_FRAMES;
|
||||||
_buffer = new int16_t[_sampleCapacity];
|
_buffer = new int16_t[_sampleCapacity];
|
||||||
|
if (_randomAccessMode) {
|
||||||
|
memset(_buffer, 0, _sampleCapacity * sizeof(int16_t));
|
||||||
|
}
|
||||||
_nextOutput = _buffer;
|
_nextOutput = _buffer;
|
||||||
_endOfLastWrite = _buffer;
|
_endOfLastWrite = _buffer;
|
||||||
}
|
}
|
||||||
|
@ -68,18 +75,34 @@ qint64 AudioRingBuffer::readData(char *data, qint64 maxSize) {
|
||||||
// only copy up to the number of samples we have available
|
// only copy up to the number of samples we have available
|
||||||
int numReadSamples = std::min((unsigned) (maxSize / sizeof(int16_t)), samplesAvailable());
|
int numReadSamples = std::min((unsigned) (maxSize / sizeof(int16_t)), samplesAvailable());
|
||||||
|
|
||||||
|
// If we're in random access mode, then we consider our number of available read samples slightly
|
||||||
|
// differently. Namely, if anything has been written, we say we have as many samples as they ask for
|
||||||
|
// otherwise we say we have nothing available
|
||||||
|
if (_randomAccessMode) {
|
||||||
|
numReadSamples = _endOfLastWrite ? (maxSize / sizeof(int16_t)) : 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) {
|
if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) {
|
||||||
// we're going to need to do two reads to get this data, it wraps around the edge
|
// we're going to need to do two reads to get this data, it wraps around the edge
|
||||||
|
|
||||||
// read to the end of the buffer
|
// read to the end of the buffer
|
||||||
int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput;
|
int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput;
|
||||||
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
|
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
|
||||||
|
if (_randomAccessMode) {
|
||||||
|
memset(_nextOutput, 0, numSamplesToEnd * sizeof(int16_t)); // clear it
|
||||||
|
}
|
||||||
|
|
||||||
// read the rest from the beginning of the buffer
|
// read the rest from the beginning of the buffer
|
||||||
memcpy(data + (numSamplesToEnd * sizeof(int16_t)), _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
|
memcpy(data + (numSamplesToEnd * sizeof(int16_t)), _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
|
||||||
|
if (_randomAccessMode) {
|
||||||
|
memset(_buffer, 0, (numReadSamples - numSamplesToEnd) * sizeof(int16_t)); // clear it
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// read the data
|
// read the data
|
||||||
memcpy(data, _nextOutput, numReadSamples * sizeof(int16_t));
|
memcpy(data, _nextOutput, numReadSamples * sizeof(int16_t));
|
||||||
|
if (_randomAccessMode) {
|
||||||
|
memset(_nextOutput, 0, numReadSamples * sizeof(int16_t)); // clear it
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// push the position of _nextOutput by the number of samples read
|
// push the position of _nextOutput by the number of samples read
|
||||||
|
@ -128,6 +151,10 @@ int16_t& AudioRingBuffer::operator[](const int index) {
|
||||||
return *shiftedPositionAccomodatingWrap(_nextOutput, index);
|
return *shiftedPositionAccomodatingWrap(_nextOutput, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const int16_t& AudioRingBuffer::operator[] (const int index) const {
|
||||||
|
return *shiftedPositionAccomodatingWrap(_nextOutput, index);
|
||||||
|
}
|
||||||
|
|
||||||
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
|
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
|
||||||
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
|
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
|
||||||
class AudioRingBuffer : public NodeData {
|
class AudioRingBuffer : public NodeData {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
AudioRingBuffer(int numFrameSamples);
|
AudioRingBuffer(int numFrameSamples, bool randomAccessMode = false);
|
||||||
~AudioRingBuffer();
|
~AudioRingBuffer();
|
||||||
|
|
||||||
void reset();
|
void reset();
|
||||||
|
@ -50,8 +50,8 @@ public:
|
||||||
int parseData(const QByteArray& packet);
|
int parseData(const QByteArray& packet);
|
||||||
|
|
||||||
// assume callers using this will never wrap around the end
|
// assume callers using this will never wrap around the end
|
||||||
const int16_t* getNextOutput() { return _nextOutput; }
|
const int16_t* getNextOutput() const { return _nextOutput; }
|
||||||
const int16_t* getBuffer() { return _buffer; }
|
const int16_t* getBuffer() const { return _buffer; }
|
||||||
|
|
||||||
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
qint64 readSamples(int16_t* destination, qint64 maxSamples);
|
||||||
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
|
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
|
||||||
|
@ -60,6 +60,7 @@ public:
|
||||||
qint64 writeData(const char* data, qint64 maxSize);
|
qint64 writeData(const char* data, qint64 maxSize);
|
||||||
|
|
||||||
int16_t& operator[](const int index);
|
int16_t& operator[](const int index);
|
||||||
|
const int16_t& operator[] (const int index) const;
|
||||||
|
|
||||||
void shiftReadPosition(unsigned int numSamples);
|
void shiftReadPosition(unsigned int numSamples);
|
||||||
|
|
||||||
|
@ -87,6 +88,7 @@ protected:
|
||||||
int16_t* _buffer;
|
int16_t* _buffer;
|
||||||
bool _isStarved;
|
bool _isStarved;
|
||||||
bool _hasStarted;
|
bool _hasStarted;
|
||||||
|
bool _randomAccessMode; /// will this ringbuffer be used for random access? if so, do some special processing
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioRingBuffer_h
|
#endif // hifi_AudioRingBuffer_h
|
||||||
|
|
|
@ -105,7 +105,21 @@ QUdpSocket& LimitedNodeList::getDTLSSocket() {
|
||||||
_dtlsSocket = new QUdpSocket(this);
|
_dtlsSocket = new QUdpSocket(this);
|
||||||
|
|
||||||
_dtlsSocket->bind(QHostAddress::AnyIPv4, 0, QAbstractSocket::DontShareAddress);
|
_dtlsSocket->bind(QHostAddress::AnyIPv4, 0, QAbstractSocket::DontShareAddress);
|
||||||
qDebug() << "NodeList DTLS socket is listening on" << _dtlsSocket->localPort();
|
|
||||||
|
#if defined(IP_DONTFRAG) || defined(IP_MTU_DISCOVER)
|
||||||
|
qDebug() << "Making required DTLS changes to LimitedNodeList DTLS socket.";
|
||||||
|
|
||||||
|
int socketHandle = _dtlsSocket->socketDescriptor();
|
||||||
|
#if defined(IP_DONTFRAG)
|
||||||
|
int optValue = 1;
|
||||||
|
setsockopt(socketHandle, IPPROTO_IP, IP_DONTFRAG, reinterpret_cast<const void*>(&optValue), sizeof(optValue));
|
||||||
|
#elif defined(IP_MTU_DISCOVER)
|
||||||
|
int optValue = 1;
|
||||||
|
setsockopt(socketHandle, IPPROTO_IP, IP_MTU_DISCOVER, reinterpret_cast<const void*>(&optValue), sizeof(optValue));
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
qDebug() << "LimitedNodeList DTLS socket is listening on" << _dtlsSocket->localPort();
|
||||||
}
|
}
|
||||||
|
|
||||||
return *_dtlsSocket;
|
return *_dtlsSocket;
|
||||||
|
@ -331,7 +345,7 @@ void LimitedNodeList::processKillNode(const QByteArray& dataByteArray) {
|
||||||
killNodeWithUUID(nodeUUID);
|
killNodeWithUUID(nodeUUID);
|
||||||
}
|
}
|
||||||
|
|
||||||
SharedNodePointer LimitedNodeList::addOrUpdateNode(const QUuid& uuid, char nodeType,
|
SharedNodePointer LimitedNodeList::addOrUpdateNode(const QUuid& uuid, NodeType_t nodeType,
|
||||||
const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket) {
|
const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket) {
|
||||||
_nodeHashMutex.lock();
|
_nodeHashMutex.lock();
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ public:
|
||||||
SharedNodePointer nodeWithUUID(const QUuid& nodeUUID, bool blockingLock = true);
|
SharedNodePointer nodeWithUUID(const QUuid& nodeUUID, bool blockingLock = true);
|
||||||
SharedNodePointer sendingNodeForPacket(const QByteArray& packet);
|
SharedNodePointer sendingNodeForPacket(const QByteArray& packet);
|
||||||
|
|
||||||
SharedNodePointer addOrUpdateNode(const QUuid& uuid, char nodeType,
|
SharedNodePointer addOrUpdateNode(const QUuid& uuid, NodeType_t nodeType,
|
||||||
const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket);
|
const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket);
|
||||||
SharedNodePointer updateSocketsForNode(const QUuid& uuid,
|
SharedNodePointer updateSocketsForNode(const QUuid& uuid,
|
||||||
const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket);
|
const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket);
|
||||||
|
|
|
@ -42,7 +42,7 @@ const QString& NodeType::getNodeTypeName(NodeType_t nodeType) {
|
||||||
return matchedTypeName != TypeNameHash.end() ? matchedTypeName.value() : UNKNOWN_NodeType_t_NAME;
|
return matchedTypeName != TypeNameHash.end() ? matchedTypeName.value() : UNKNOWN_NodeType_t_NAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node::Node(const QUuid& uuid, char type, const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket) :
|
Node::Node(const QUuid& uuid, NodeType_t type, const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket) :
|
||||||
_type(type),
|
_type(type),
|
||||||
_uuid(uuid),
|
_uuid(uuid),
|
||||||
_wakeTimestamp(QDateTime::currentMSecsSinceEpoch()),
|
_wakeTimestamp(QDateTime::currentMSecsSinceEpoch()),
|
||||||
|
@ -58,6 +58,7 @@ Node::Node(const QUuid& uuid, char type, const HifiSockAddr& publicSocket, const
|
||||||
_clockSkewUsec(0),
|
_clockSkewUsec(0),
|
||||||
_mutex()
|
_mutex()
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Node::~Node() {
|
Node::~Node() {
|
||||||
|
|
|
@ -45,7 +45,7 @@ namespace NodeType {
|
||||||
class Node : public QObject {
|
class Node : public QObject {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
Node(const QUuid& uuid, char type, const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket);
|
Node(const QUuid& uuid, NodeType_t type, const HifiSockAddr& publicSocket, const HifiSockAddr& localSocket);
|
||||||
~Node();
|
~Node();
|
||||||
|
|
||||||
bool operator==(const Node& otherNode) const { return _uuid == otherNode._uuid; }
|
bool operator==(const Node& otherNode) const { return _uuid == otherNode._uuid; }
|
||||||
|
|
|
@ -209,8 +209,10 @@ void NodeList::reset() {
|
||||||
// clear the domain connection information
|
// clear the domain connection information
|
||||||
_domainHandler.clearConnectionInfo();
|
_domainHandler.clearConnectionInfo();
|
||||||
|
|
||||||
// also disconnect from the DTLS socket readyRead() so it can handle handshaking
|
// if we setup the DTLS socket, also disconnect from the DTLS socket readyRead() so it can handle handshaking
|
||||||
disconnect(_dtlsSocket, 0, this, 0);
|
if (_dtlsSocket) {
|
||||||
|
disconnect(_dtlsSocket, 0, this, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void NodeList::addNodeTypeToInterestSet(NodeType_t nodeTypeToAdd) {
|
void NodeList::addNodeTypeToInterestSet(NodeType_t nodeTypeToAdd) {
|
||||||
|
@ -376,10 +378,14 @@ void NodeList::sendDomainServerCheckIn() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// construct the DS check in packet
|
PacketType domainPacketType = _sessionUUID.isNull()
|
||||||
QUuid packetUUID = (!_sessionUUID.isNull() ? _sessionUUID : _domainHandler.getAssignmentUUID());
|
? PacketTypeDomainConnectRequest : PacketTypeDomainListRequest;
|
||||||
|
|
||||||
QByteArray domainServerPacket = byteArrayWithPopulatedHeader(PacketTypeDomainListRequest, packetUUID);
|
// construct the DS check in packet
|
||||||
|
QUuid packetUUID = (domainPacketType == PacketTypeDomainListRequest
|
||||||
|
? _sessionUUID : _domainHandler.getAssignmentUUID());
|
||||||
|
|
||||||
|
QByteArray domainServerPacket = byteArrayWithPopulatedHeader(domainPacketType, packetUUID);
|
||||||
QDataStream packetStream(&domainServerPacket, QIODevice::Append);
|
QDataStream packetStream(&domainServerPacket, QIODevice::Append);
|
||||||
|
|
||||||
// pack our data to send to the domain-server
|
// pack our data to send to the domain-server
|
||||||
|
|
|
@ -55,7 +55,7 @@ PacketVersion versionForPacketType(PacketType type) {
|
||||||
return 1;
|
return 1;
|
||||||
case PacketTypeDomainList:
|
case PacketTypeDomainList:
|
||||||
case PacketTypeDomainListRequest:
|
case PacketTypeDomainListRequest:
|
||||||
return 2;
|
return 3;
|
||||||
case PacketTypeCreateAssignment:
|
case PacketTypeCreateAssignment:
|
||||||
case PacketTypeRequestAssignment:
|
case PacketTypeRequestAssignment:
|
||||||
return 2;
|
return 2;
|
||||||
|
|
|
@ -58,7 +58,7 @@ enum PacketType {
|
||||||
PacketTypeMetavoxelData,
|
PacketTypeMetavoxelData,
|
||||||
PacketTypeAvatarIdentity,
|
PacketTypeAvatarIdentity,
|
||||||
PacketTypeAvatarBillboard,
|
PacketTypeAvatarBillboard,
|
||||||
PacketTypeDomainConnectRequest, // reusable
|
PacketTypeDomainConnectRequest,
|
||||||
PacketTypeDomainServerRequireDTLS,
|
PacketTypeDomainServerRequireDTLS,
|
||||||
PacketTypeNodeJsonStats,
|
PacketTypeNodeJsonStats,
|
||||||
};
|
};
|
||||||
|
@ -66,7 +66,8 @@ enum PacketType {
|
||||||
typedef char PacketVersion;
|
typedef char PacketVersion;
|
||||||
|
|
||||||
const QSet<PacketType> NON_VERIFIED_PACKETS = QSet<PacketType>()
|
const QSet<PacketType> NON_VERIFIED_PACKETS = QSet<PacketType>()
|
||||||
<< PacketTypeDomainServerRequireDTLS << PacketTypeDomainList << PacketTypeDomainListRequest
|
<< PacketTypeDomainServerRequireDTLS << PacketTypeDomainConnectRequest
|
||||||
|
<< PacketTypeDomainList << PacketTypeDomainListRequest
|
||||||
<< PacketTypeCreateAssignment << PacketTypeRequestAssignment << PacketTypeStunResponse
|
<< PacketTypeCreateAssignment << PacketTypeRequestAssignment << PacketTypeStunResponse
|
||||||
<< PacketTypeNodeJsonStats;
|
<< PacketTypeNodeJsonStats;
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,8 @@ enum BoxFace {
|
||||||
MIN_Y_FACE,
|
MIN_Y_FACE,
|
||||||
MAX_Y_FACE,
|
MAX_Y_FACE,
|
||||||
MIN_Z_FACE,
|
MIN_Z_FACE,
|
||||||
MAX_Z_FACE
|
MAX_Z_FACE,
|
||||||
|
UNKNOWN_FACE
|
||||||
};
|
};
|
||||||
|
|
||||||
enum BoxVertex {
|
enum BoxVertex {
|
||||||
|
|
|
@ -425,10 +425,6 @@ bool ViewFrustum::matches(const ViewFrustum& compareTo, bool debug) const {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isNaN(float f) {
|
|
||||||
return f != f;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const {
|
bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const {
|
||||||
|
|
||||||
// Compute distance between the two positions
|
// Compute distance between the two positions
|
||||||
|
@ -450,7 +446,7 @@ bool ViewFrustum::isVerySimilar(const ViewFrustum& compareTo, bool debug) const
|
||||||
float angleEyeOffsetOrientation = compareTo._eyeOffsetOrientation == _eyeOffsetOrientation
|
float angleEyeOffsetOrientation = compareTo._eyeOffsetOrientation == _eyeOffsetOrientation
|
||||||
? 0.0f : glm::degrees(glm::angle(dQEyeOffsetOrientation));
|
? 0.0f : glm::degrees(glm::angle(dQEyeOffsetOrientation));
|
||||||
if (isNaN(angleEyeOffsetOrientation)) {
|
if (isNaN(angleEyeOffsetOrientation)) {
|
||||||
angleOrientation = 0.0f;
|
angleEyeOffsetOrientation = 0.0f;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool result =
|
bool result =
|
||||||
|
|
|
@ -61,8 +61,7 @@ void FileUtils::locateFile(QString filePath) {
|
||||||
QString FileUtils::standardPath(QString subfolder) {
|
QString FileUtils::standardPath(QString subfolder) {
|
||||||
// standard path
|
// standard path
|
||||||
// Mac: ~/Library/Application Support/Interface
|
// Mac: ~/Library/Application Support/Interface
|
||||||
QString path = QStandardPaths::writableLocation(QStandardPaths::GenericDataLocation);
|
QString path = QStandardPaths::writableLocation(QStandardPaths::DataLocation);
|
||||||
path.append("/Interface");
|
|
||||||
|
|
||||||
if (!subfolder.startsWith("/")) {
|
if (!subfolder.startsWith("/")) {
|
||||||
subfolder.prepend("/");
|
subfolder.prepend("/");
|
||||||
|
|
|
@ -51,6 +51,10 @@ float randFloatInRange (float min,float max) {
|
||||||
return min + ((rand() % 10000)/10000.f * (max-min));
|
return min + ((rand() % 10000)/10000.f * (max-min));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float randomSign() {
|
||||||
|
return randomBoolean() ? -1.0 : 1.0;
|
||||||
|
}
|
||||||
|
|
||||||
unsigned char randomColorValue(int miniumum) {
|
unsigned char randomColorValue(int miniumum) {
|
||||||
return miniumum + (rand() % (256 - miniumum));
|
return miniumum + (rand() % (256 - miniumum));
|
||||||
}
|
}
|
||||||
|
@ -762,3 +766,22 @@ float extractUniformScale(const glm::mat4& matrix) {
|
||||||
float extractUniformScale(const glm::vec3& scale) {
|
float extractUniformScale(const glm::vec3& scale) {
|
||||||
return (scale.x + scale.y + scale.z) / 3.0f;
|
return (scale.x + scale.y + scale.z) / 3.0f;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool isNaN(float value) {
|
||||||
|
return value != value;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB, float similarEnough) {
|
||||||
|
// Compute the angular distance between the two orientations
|
||||||
|
float angleOrientation = orientionA == orientionB ? 0.0f : glm::degrees(glm::angle(orientionA * glm::inverse(orientionB)));
|
||||||
|
if (isNaN(angleOrientation)) {
|
||||||
|
angleOrientation = 0.0f;
|
||||||
|
}
|
||||||
|
return (angleOrientation <= similarEnough);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough) {
|
||||||
|
// Compute the distance between the two points
|
||||||
|
float positionDistance = glm::distance(positionA, positionB);
|
||||||
|
return (positionDistance <= similarEnough);
|
||||||
|
}
|
||||||
|
|
|
@ -73,6 +73,7 @@ void usecTimestampNowForceClockSkew(int clockSkew);
|
||||||
float randFloat();
|
float randFloat();
|
||||||
int randIntInRange (int min, int max);
|
int randIntInRange (int min, int max);
|
||||||
float randFloatInRange (float min,float max);
|
float randFloatInRange (float min,float max);
|
||||||
|
float randomSign(); /// \return -1.0 or 1.0
|
||||||
unsigned char randomColorValue(int minimum);
|
unsigned char randomColorValue(int minimum);
|
||||||
bool randomBoolean();
|
bool randomBoolean();
|
||||||
|
|
||||||
|
@ -182,4 +183,14 @@ float extractUniformScale(const glm::mat4& matrix);
|
||||||
|
|
||||||
float extractUniformScale(const glm::vec3& scale);
|
float extractUniformScale(const glm::vec3& scale);
|
||||||
|
|
||||||
|
/// \return bool are two orientations similar to each other
|
||||||
|
const float ORIENTATION_SIMILAR_ENOUGH = 5.0f; // 10 degrees in any direction
|
||||||
|
bool isSimilarOrientation(const glm::quat& orientionA, const glm::quat& orientionB,
|
||||||
|
float similarEnough = ORIENTATION_SIMILAR_ENOUGH);
|
||||||
|
const float POSITION_SIMILAR_ENOUGH = 0.1f; // 0.1 meter
|
||||||
|
bool isSimilarPosition(const glm::vec3& positionA, const glm::vec3& positionB, float similarEnough = POSITION_SIMILAR_ENOUGH);
|
||||||
|
|
||||||
|
/// \return bool is the float NaN
|
||||||
|
bool isNaN(float value);
|
||||||
|
|
||||||
#endif // hifi_SharedUtil_h
|
#endif // hifi_SharedUtil_h
|
||||||
|
|
|
@ -75,6 +75,9 @@ QScriptValue rayToVoxelIntersectionResultToScriptValue(QScriptEngine* engine, co
|
||||||
case MAX_Z_FACE:
|
case MAX_Z_FACE:
|
||||||
faceName = "MAX_Z_FACE";
|
faceName = "MAX_Z_FACE";
|
||||||
break;
|
break;
|
||||||
|
case UNKNOWN_FACE:
|
||||||
|
faceName = "UNKNOWN_FACE";
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
obj.setProperty("face", faceName);
|
obj.setProperty("face", faceName);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue