mirror of
https://github.com/overte-org/overte.git
synced 2025-04-25 10:14:52 +02:00
working on selected audio
This commit is contained in:
parent
c301b799c5
commit
c969570e8c
8 changed files with 233 additions and 145 deletions
|
@ -32,6 +32,32 @@ PositionalAudioRingBuffer::~PositionalAudioRingBuffer() {
|
|||
}
|
||||
}
|
||||
|
||||
bool PositionalAudioRingBuffer::isListeningToSource(PositionalAudioRingBuffer* other) {
|
||||
switch (_listenMode) {
|
||||
default:
|
||||
case AudioRingBuffer::NORMAL:
|
||||
return true;
|
||||
break;
|
||||
|
||||
case AudioRingBuffer::OMNI_DIRECTIONAL_POINT: {
|
||||
float distance = glm::distance(_position, other->_position);
|
||||
return distance <= _listenRadius;
|
||||
break;
|
||||
}
|
||||
case AudioRingBuffer::SELECTED_SOURCES:
|
||||
if (_listenSources) {
|
||||
for (int i = 0; i < _listenSourceCount; i++) {
|
||||
if (other->_sourceID == _listenSources[i]) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int PositionalAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
||||
unsigned char* currentBuffer = sourceBuffer + numBytesForPacketHeader(sourceBuffer);
|
||||
currentBuffer += parseSourceData(currentBuffer, numBytes - (currentBuffer - sourceBuffer));
|
||||
|
@ -56,21 +82,19 @@ int PositionalAudioRingBuffer::parseListenModeData(unsigned char* sourceBuffer,
|
|||
|
||||
memcpy(&_listenMode, currentBuffer, sizeof(_listenMode));
|
||||
currentBuffer += sizeof(_listenMode);
|
||||
|
||||
|
||||
if (_listenMode == AudioRingBuffer::OMNI_DIRECTIONAL_POINT) {
|
||||
memcpy(&_listenRadius, currentBuffer, sizeof(_listenRadius));
|
||||
currentBuffer += sizeof(_listenRadius);
|
||||
} else if (_listenMode == AudioRingBuffer::SELECTED_SOURCES) {
|
||||
memcpy(&_listenSourceCount, currentBuffer, sizeof(_listenSourceCount));
|
||||
currentBuffer += sizeof(_listenSourceCount);
|
||||
|
||||
if (_listenSources) {
|
||||
delete[] _listenSources;
|
||||
}
|
||||
_listenSources = new int[_listenSourceCount];
|
||||
memcpy(_listenSources, currentBuffer, sizeof(int) * _listenSourceCount);
|
||||
currentBuffer += sizeof(int) * _listenSourceCount;
|
||||
|
||||
}
|
||||
|
||||
return currentBuffer - sourceBuffer;
|
||||
|
@ -81,7 +105,7 @@ int PositionalAudioRingBuffer::parsePositionalData(unsigned char* sourceBuffer,
|
|||
|
||||
memcpy(&_position, currentBuffer, sizeof(_position));
|
||||
currentBuffer += sizeof(_position);
|
||||
|
||||
|
||||
memcpy(&_orientation, currentBuffer, sizeof(_orientation));
|
||||
currentBuffer += sizeof(_orientation);
|
||||
|
||||
|
|
|
@ -30,6 +30,10 @@ public:
|
|||
|
||||
const glm::vec3& getPosition() const { return _position; }
|
||||
const glm::quat& getOrientation() const { return _orientation; }
|
||||
|
||||
bool isListeningToSource(PositionalAudioRingBuffer* other);
|
||||
int getSourceID() const { return _sourceID; }
|
||||
int getListeningMode() const { return _listenMode; }
|
||||
|
||||
protected:
|
||||
// disallow copying of PositionalAudioRingBuffer objects
|
||||
|
|
|
@ -159,168 +159,174 @@ int main(int argc, const char* argv[]) {
|
|||
// zero out the client mix for this node
|
||||
memset(clientSamples, 0, sizeof(clientSamples));
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
for (NodeList::iterator otherNode = nodeList->begin(); otherNode != nodeList->end(); otherNode++) {
|
||||
|
||||
if (((PositionalAudioRingBuffer*) otherNode->getLinkedData())->willBeAddedToMix()
|
||||
&& (otherNode != node || (otherNode == node && nodeRingBuffer->shouldLoopbackForNode()))) {
|
||||
|
||||
PositionalAudioRingBuffer* otherNodeBuffer = (PositionalAudioRingBuffer*) otherNode->getLinkedData();
|
||||
|
||||
// based on our listen mode we will do this mixing...
|
||||
if (nodeRingBuffer->isListeningToSource(otherNodeBuffer)) {
|
||||
float bearingRelativeAngleToSource = 0.0f;
|
||||
float attenuationCoefficient = 1.0f;
|
||||
int numSamplesDelay = 0;
|
||||
float weakChannelAmplitudeRatio = 1.0f;
|
||||
|
||||
float bearingRelativeAngleToSource = 0.0f;
|
||||
float attenuationCoefficient = 1.0f;
|
||||
int numSamplesDelay = 0;
|
||||
float weakChannelAmplitudeRatio = 1.0f;
|
||||
stk::TwoPole* otherNodeTwoPole = NULL;
|
||||
|
||||
stk::TwoPole* otherNodeTwoPole = NULL;
|
||||
|
||||
if (otherNode != node) {
|
||||
// only do axis/distance attenuation when in normal mode
|
||||
if (otherNode != node && nodeRingBuffer->getListeningMode() == AudioRingBuffer::NORMAL) {
|
||||
|
||||
glm::vec3 listenerPosition = nodeRingBuffer->getPosition();
|
||||
glm::vec3 relativePosition = otherNodeBuffer->getPosition() - nodeRingBuffer->getPosition();
|
||||
glm::quat inverseOrientation = glm::inverse(nodeRingBuffer->getOrientation());
|
||||
glm::vec3 listenerPosition = nodeRingBuffer->getPosition();
|
||||
glm::vec3 relativePosition = otherNodeBuffer->getPosition() - nodeRingBuffer->getPosition();
|
||||
glm::quat inverseOrientation = glm::inverse(nodeRingBuffer->getOrientation());
|
||||
|
||||
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
||||
float radius = 0.0f;
|
||||
float distanceSquareToSource = glm::dot(relativePosition, relativePosition);
|
||||
float radius = 0.0f;
|
||||
|
||||
if (otherNode->getType() == NODE_TYPE_AUDIO_INJECTOR) {
|
||||
InjectedAudioRingBuffer* injectedBuffer = (InjectedAudioRingBuffer*) otherNodeBuffer;
|
||||
radius = injectedBuffer->getRadius();
|
||||
attenuationCoefficient *= injectedBuffer->getAttenuationRatio();
|
||||
}
|
||||
if (otherNode->getType() == NODE_TYPE_AUDIO_INJECTOR) {
|
||||
InjectedAudioRingBuffer* injectedBuffer = (InjectedAudioRingBuffer*) otherNodeBuffer;
|
||||
radius = injectedBuffer->getRadius();
|
||||
attenuationCoefficient *= injectedBuffer->getAttenuationRatio();
|
||||
}
|
||||
|
||||
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
||||
// this is either not a spherical source, or the listener is outside the sphere
|
||||
if (radius == 0 || (distanceSquareToSource > radius * radius)) {
|
||||
// this is either not a spherical source, or the listener is outside the sphere
|
||||
|
||||
if (radius > 0) {
|
||||
// this is a spherical source - the distance used for the coefficient
|
||||
// needs to be the closest point on the boundary to the source
|
||||
if (radius > 0) {
|
||||
// this is a spherical source - the distance used for the coefficient
|
||||
// needs to be the closest point on the boundary to the source
|
||||
|
||||
// ovveride the distance to the node with the distance to the point on the
|
||||
// boundary of the sphere
|
||||
distanceSquareToSource -= (radius * radius);
|
||||
// ovveride the distance to the node with the distance to the point on the
|
||||
// boundary of the sphere
|
||||
distanceSquareToSource -= (radius * radius);
|
||||
|
||||
} else {
|
||||
// calculate the angle delivery for off-axis attenuation
|
||||
glm::vec3 rotatedListenerPosition = glm::inverse(otherNodeBuffer->getOrientation())
|
||||
* relativePosition;
|
||||
} else {
|
||||
// calculate the angle delivery for off-axis attenuation
|
||||
glm::vec3 rotatedListenerPosition = glm::inverse(otherNodeBuffer->getOrientation())
|
||||
* relativePosition;
|
||||
|
||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedListenerPosition));
|
||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedListenerPosition));
|
||||
|
||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||
|
||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f));
|
||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f));
|
||||
|
||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||
attenuationCoefficient *= offAxisCoefficient;
|
||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||
attenuationCoefficient *= offAxisCoefficient;
|
||||
}
|
||||
|
||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||
|
||||
const float DISTANCE_SCALE = 2.5f;
|
||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||
const float DISTANCE_LOG_BASE = 2.5f;
|
||||
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
||||
DISTANCE_SCALE_LOG +
|
||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||
distanceCoefficient = std::min(1.0f, distanceCoefficient);
|
||||
|
||||
// multiply the current attenuation coefficient by the distance coefficient
|
||||
attenuationCoefficient *= distanceCoefficient;
|
||||
|
||||
// project the rotated source position vector onto the XZ plane
|
||||
rotatedSourcePosition.y = 0.0f;
|
||||
|
||||
// produce an oriented angle about the y-axis
|
||||
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedSourcePosition),
|
||||
glm::vec3(0.0f, 1.0f, 0.0f));
|
||||
|
||||
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
||||
|
||||
// figure out the number of samples of delay and the ratio of the amplitude
|
||||
// in the weak channel for audio spatialization
|
||||
float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource)));
|
||||
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
|
||||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||
|
||||
// grab the TwoPole object for this source, add it if it doesn't exist
|
||||
TwoPoleNodeMap& nodeTwoPoles = nodeRingBuffer->getTwoPoles();
|
||||
TwoPoleNodeMap::iterator twoPoleIterator = nodeTwoPoles.find(otherNode->getNodeID());
|
||||
|
||||
if (twoPoleIterator == nodeTwoPoles.end()) {
|
||||
// setup the freeVerb effect for this source for this client
|
||||
otherNodeTwoPole = nodeTwoPoles[otherNode->getNodeID()] = new stk::TwoPole;
|
||||
} else {
|
||||
otherNodeTwoPole = twoPoleIterator->second;
|
||||
}
|
||||
|
||||
// calculate the reasonance for this TwoPole based on angle to source
|
||||
float TWO_POLE_CUT_OFF_FREQUENCY = 800.0f;
|
||||
float TWO_POLE_MAX_FILTER_STRENGTH = 0.4f;
|
||||
|
||||
otherNodeTwoPole->setResonance(TWO_POLE_CUT_OFF_FREQUENCY,
|
||||
TWO_POLE_MAX_FILTER_STRENGTH
|
||||
* fabsf(bearingRelativeAngleToSource) / 180.0f,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
int16_t* sourceBuffer = otherNodeBuffer->getNextOutput();
|
||||
|
||||
int16_t* goodChannel = (bearingRelativeAngleToSource > 0.0f)
|
||||
? clientSamples
|
||||
: clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
int16_t* delayedChannel = (bearingRelativeAngleToSource > 0.0f)
|
||||
? clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||
: clientSamples;
|
||||
|
||||
int16_t* delaySamplePointer = otherNodeBuffer->getNextOutput() == otherNodeBuffer->getBuffer()
|
||||
? otherNodeBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES - numSamplesDelay
|
||||
: otherNodeBuffer->getNextOutput() - numSamplesDelay;
|
||||
|
||||
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) {
|
||||
// load up the stkFrameBuffer with this source's samples
|
||||
stkFrameBuffer[s] = (stk::StkFloat) sourceBuffer[s];
|
||||
}
|
||||
|
||||
// perform the TwoPole effect on the stkFrameBuffer
|
||||
if (otherNodeTwoPole) {
|
||||
otherNodeTwoPole->tick(stkFrameBuffer);
|
||||
}
|
||||
|
||||
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) {
|
||||
if (s < numSamplesDelay) {
|
||||
// pull the earlier sample for the delayed channel
|
||||
int earlierSample = delaySamplePointer[s] * attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||
|
||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||
|
||||
const float DISTANCE_SCALE = 2.5f;
|
||||
const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f;
|
||||
const float DISTANCE_LOG_BASE = 2.5f;
|
||||
const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE);
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR,
|
||||
DISTANCE_SCALE_LOG +
|
||||
(0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1);
|
||||
distanceCoefficient = std::min(1.0f, distanceCoefficient);
|
||||
|
||||
// multiply the current attenuation coefficient by the distance coefficient
|
||||
attenuationCoefficient *= distanceCoefficient;
|
||||
|
||||
// project the rotated source position vector onto the XZ plane
|
||||
rotatedSourcePosition.y = 0.0f;
|
||||
|
||||
// produce an oriented angle about the y-axis
|
||||
bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedSourcePosition),
|
||||
glm::vec3(0.0f, 1.0f, 0.0f));
|
||||
|
||||
const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5;
|
||||
|
||||
// figure out the number of samples of delay and the ratio of the amplitude
|
||||
// in the weak channel for audio spatialization
|
||||
float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource)));
|
||||
numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio;
|
||||
weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio);
|
||||
|
||||
// grab the TwoPole object for this source, add it if it doesn't exist
|
||||
TwoPoleNodeMap& nodeTwoPoles = nodeRingBuffer->getTwoPoles();
|
||||
TwoPoleNodeMap::iterator twoPoleIterator = nodeTwoPoles.find(otherNode->getNodeID());
|
||||
|
||||
if (twoPoleIterator == nodeTwoPoles.end()) {
|
||||
// setup the freeVerb effect for this source for this client
|
||||
otherNodeTwoPole = nodeTwoPoles[otherNode->getNodeID()] = new stk::TwoPole;
|
||||
} else {
|
||||
otherNodeTwoPole = twoPoleIterator->second;
|
||||
delayedChannel[s] = glm::clamp(delayedChannel[s] + earlierSample,
|
||||
MIN_SAMPLE_VALUE,
|
||||
MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
// calculate the reasonance for this TwoPole based on angle to source
|
||||
float TWO_POLE_CUT_OFF_FREQUENCY = 800.0f;
|
||||
float TWO_POLE_MAX_FILTER_STRENGTH = 0.4f;
|
||||
|
||||
otherNodeTwoPole->setResonance(TWO_POLE_CUT_OFF_FREQUENCY,
|
||||
TWO_POLE_MAX_FILTER_STRENGTH
|
||||
* fabsf(bearingRelativeAngleToSource) / 180.0f,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
int16_t* sourceBuffer = otherNodeBuffer->getNextOutput();
|
||||
|
||||
int16_t* goodChannel = (bearingRelativeAngleToSource > 0.0f)
|
||||
? clientSamples
|
||||
: clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
int16_t* delayedChannel = (bearingRelativeAngleToSource > 0.0f)
|
||||
? clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||
: clientSamples;
|
||||
|
||||
int16_t* delaySamplePointer = otherNodeBuffer->getNextOutput() == otherNodeBuffer->getBuffer()
|
||||
? otherNodeBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES - numSamplesDelay
|
||||
: otherNodeBuffer->getNextOutput() - numSamplesDelay;
|
||||
|
||||
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) {
|
||||
// load up the stkFrameBuffer with this source's samples
|
||||
stkFrameBuffer[s] = (stk::StkFloat) sourceBuffer[s];
|
||||
}
|
||||
|
||||
// perform the TwoPole effect on the stkFrameBuffer
|
||||
if (otherNodeTwoPole) {
|
||||
otherNodeTwoPole->tick(stkFrameBuffer);
|
||||
}
|
||||
|
||||
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) {
|
||||
if (s < numSamplesDelay) {
|
||||
// pull the earlier sample for the delayed channel
|
||||
int earlierSample = delaySamplePointer[s] * attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||
|
||||
delayedChannel[s] = glm::clamp(delayedChannel[s] + earlierSample,
|
||||
MIN_SAMPLE_VALUE,
|
||||
MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
int16_t currentSample = stkFrameBuffer[s] * attenuationCoefficient;
|
||||
int16_t currentSample = stkFrameBuffer[s] * attenuationCoefficient;
|
||||
|
||||
goodChannel[s] = glm::clamp(goodChannel[s] + currentSample,
|
||||
MIN_SAMPLE_VALUE,
|
||||
MAX_SAMPLE_VALUE);
|
||||
goodChannel[s] = glm::clamp(goodChannel[s] + currentSample,
|
||||
MIN_SAMPLE_VALUE,
|
||||
MAX_SAMPLE_VALUE);
|
||||
|
||||
if (s + numSamplesDelay < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||
int sumSample = delayedChannel[s + numSamplesDelay]
|
||||
+ (currentSample * weakChannelAmplitudeRatio);
|
||||
delayedChannel[s + numSamplesDelay] = glm::clamp(sumSample,
|
||||
MIN_SAMPLE_VALUE,
|
||||
MAX_SAMPLE_VALUE);
|
||||
}
|
||||
if (s + numSamplesDelay < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||
int sumSample = delayedChannel[s + numSamplesDelay]
|
||||
+ (currentSample * weakChannelAmplitudeRatio);
|
||||
delayedChannel[s + numSamplesDelay] = glm::clamp(sumSample,
|
||||
MIN_SAMPLE_VALUE,
|
||||
MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
if (s >= BUFFER_LENGTH_SAMPLES_PER_CHANNEL - PHASE_DELAY_AT_90) {
|
||||
// this could be a delayed sample on the next pass
|
||||
// so store the affected back in the ARB
|
||||
otherNodeBuffer->getNextOutput()[s] = (int16_t) stkFrameBuffer[s];
|
||||
if (s >= BUFFER_LENGTH_SAMPLES_PER_CHANNEL - PHASE_DELAY_AT_90) {
|
||||
// this could be a delayed sample on the next pass
|
||||
// so store the affected back in the ARB
|
||||
otherNodeBuffer->getNextOutput()[s] = (int16_t) stkFrameBuffer[s];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -340,7 +346,6 @@ int main(int argc, const char* argv[]) {
|
|||
if (nodeBuffer->getNextOutput() >= nodeBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
|
||||
nodeBuffer->setNextOutput(nodeBuffer->getBuffer());
|
||||
}
|
||||
|
||||
nodeBuffer->setWillBeAddedToMix(false);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1835,6 +1835,11 @@ void Application::initMenu() {
|
|||
(_simulateLeapHand = debugMenu->addAction("Simulate Leap Hand"))->setCheckable(true);
|
||||
(_testRaveGlove = debugMenu->addAction("Test RaveGlove"))->setCheckable(true);
|
||||
|
||||
QMenu* audioDebugMenu = debugMenu->addMenu("Audio Debugging Tools");
|
||||
audioDebugMenu->addAction("Listen Mode Normal", this, SLOT(setListenModeNormal()), Qt::CTRL | Qt::Key_1);
|
||||
audioDebugMenu->addAction("Listen Mode Point/Radius", this, SLOT(setListenModePoint()), Qt::CTRL | Qt::Key_2);
|
||||
audioDebugMenu->addAction("Listen Mode Single Source", this, SLOT(setListenModeSingleSource()), Qt::CTRL | Qt::Key_3);
|
||||
|
||||
QMenu* settingsMenu = menuBar->addMenu("Settings");
|
||||
(_settingsAutosave = settingsMenu->addAction("Autosave"))->setCheckable(true);
|
||||
_settingsAutosave->setChecked(true);
|
||||
|
@ -1846,6 +1851,37 @@ void Application::initMenu() {
|
|||
_networkAccessManager = new QNetworkAccessManager(this);
|
||||
}
|
||||
|
||||
void Application::setListenModeNormal() {
|
||||
_audio.setListenMode(AudioRingBuffer::NORMAL);
|
||||
}
|
||||
|
||||
void Application::setListenModePoint() {
|
||||
_audio.setListenMode(AudioRingBuffer::OMNI_DIRECTIONAL_POINT);
|
||||
_audio.setListenRadius(1.0);
|
||||
}
|
||||
|
||||
void Application::setListenModeSingleSource() {
|
||||
_audio.setListenMode(AudioRingBuffer::SELECTED_SOURCES);
|
||||
_audio.clearListenSources();
|
||||
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||
if (node->getLinkedData() != NULL && node->getType() == NODE_TYPE_AGENT) {
|
||||
Avatar* avatar = (Avatar *) node->getLinkedData();
|
||||
glm::vec3 headPosition = avatar->getHead().getPosition();
|
||||
glm::vec3 mouseRayOrigin = _myAvatar.getMouseRayOrigin();
|
||||
glm::vec3 mouseRayDirection = _myAvatar.getMouseRayDirection();
|
||||
const float HEAD_SPHERE_RADIUS = 0.07;
|
||||
|
||||
if (rayIntersectsSphere(mouseRayOrigin, mouseRayDirection, headPosition, HEAD_SPHERE_RADIUS)) {
|
||||
int sourceID = avatar->getOwningNode()->getNodeID();
|
||||
_audio.addListenSource(sourceID);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Application::updateFrustumRenderModeAction() {
|
||||
switch (_frustumDrawingMode) {
|
||||
default:
|
||||
|
|
|
@ -170,6 +170,10 @@ private slots:
|
|||
void copyVoxels();
|
||||
void pasteVoxels();
|
||||
void runTests();
|
||||
void setListenModeNormal();
|
||||
void setListenModePoint();
|
||||
void setListenModeSingleSource();
|
||||
|
||||
|
||||
void renderCoverageMap();
|
||||
void renderCoverageMapsRecursively(CoverageMap* map);
|
||||
|
|
|
@ -112,7 +112,7 @@ inline void Audio::performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* o
|
|||
|
||||
// we need the amount of bytes in the buffer + 1 for type
|
||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||
unsigned char dataPacket[BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes];
|
||||
unsigned char dataPacket[MAX_PACKET_SIZE];
|
||||
|
||||
PACKET_TYPE packetType = (Application::getInstance()->shouldEchoAudio())
|
||||
? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO
|
||||
|
@ -123,21 +123,25 @@ inline void Audio::performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* o
|
|||
// pack Source Data
|
||||
memcpy(currentPacketPtr, &_sourceID, sizeof(_sourceID));
|
||||
currentPacketPtr += (sizeof(_sourceID));
|
||||
leadingBytes += (sizeof(_sourceID));
|
||||
|
||||
// pack Listen Mode Data
|
||||
memcpy(currentPacketPtr, &_listenMode, sizeof(_listenMode));
|
||||
currentPacketPtr += (sizeof(_listenMode));
|
||||
leadingBytes += (sizeof(_listenMode));
|
||||
|
||||
if (_listenMode == AudioRingBuffer::OMNI_DIRECTIONAL_POINT) {
|
||||
memcpy(currentPacketPtr, &_listenRadius, sizeof(_listenRadius));
|
||||
currentPacketPtr += (sizeof(_listenRadius));
|
||||
leadingBytes += (sizeof(_listenRadius));
|
||||
} else if (_listenMode == AudioRingBuffer::SELECTED_SOURCES) {
|
||||
memcpy(currentPacketPtr, &_listenSourceCount, sizeof(_listenSourceCount));
|
||||
currentPacketPtr += (sizeof(_listenSourceCount));
|
||||
|
||||
leadingBytes += (sizeof(_listenSourceCount));
|
||||
if (_listenSources) {
|
||||
memcpy(currentPacketPtr, &_listenSources, sizeof(int) * _listenSourceCount);
|
||||
memcpy(currentPacketPtr, _listenSources, sizeof(int) * _listenSourceCount);
|
||||
currentPacketPtr += (sizeof(int) * _listenSourceCount);
|
||||
leadingBytes += (sizeof(int) * _listenSourceCount);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -356,6 +360,12 @@ void Audio::addListenSource(int sourceID) {
|
|||
_listenSourceCount++;
|
||||
}
|
||||
|
||||
void Audio::clearListenSources() {
|
||||
delete[] _listenSources;
|
||||
_listenSources = NULL;
|
||||
_listenSourceCount = 0;
|
||||
}
|
||||
|
||||
void Audio::removeListenSource(int sourceID) {
|
||||
// If we don't yet have a list of listen sources, make one
|
||||
if (_listenSources) {
|
||||
|
|
|
@ -60,6 +60,7 @@ public:
|
|||
void setListenRadius(float radius) { _listenRadius = radius; };
|
||||
void addListenSource(int sourceID);
|
||||
void removeListenSource(int sourceID);
|
||||
void clearListenSources();
|
||||
|
||||
private:
|
||||
PaStream* _stream;
|
||||
|
|
|
@ -164,6 +164,10 @@ public:
|
|||
glm::quat getOrientation () const;
|
||||
glm::quat getWorldAlignedOrientation() const;
|
||||
|
||||
const glm::vec3& getMouseRayOrigin() const { return _mouseRayOrigin; }
|
||||
const glm::vec3& getMouseRayDirection() const { return _mouseRayDirection; }
|
||||
|
||||
|
||||
glm::vec3 getGravity () const { return _gravity; }
|
||||
|
||||
glm::vec3 getUprightHeadPosition() const;
|
||||
|
|
Loading…
Reference in a new issue