mirror of
https://github.com/overte-org/overte.git
synced 2025-08-16 12:31:41 +02:00
Fixes from merge
This commit is contained in:
commit
2820323567
12 changed files with 472 additions and 464 deletions
|
@ -99,6 +99,11 @@ int main(int argc, const char* argv[]) {
|
|||
int nextFrame = 0;
|
||||
timeval startTime;
|
||||
|
||||
unsigned char clientPacket[BUFFER_LENGTH_BYTES + 1];
|
||||
clientPacket[0] = PACKET_HEADER_MIXED_AUDIO;
|
||||
|
||||
int16_t clientSamples[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2] = {};
|
||||
|
||||
gettimeofday(&startTime, NULL);
|
||||
|
||||
while (true) {
|
||||
|
@ -129,8 +134,9 @@ int main(int argc, const char* argv[]) {
|
|||
|
||||
for (AgentList::iterator agent = agentList->begin(); agent != agentList->end(); agent++) {
|
||||
AudioRingBuffer* agentRingBuffer = (AudioRingBuffer*) agent->getLinkedData();
|
||||
|
||||
int16_t clientMix[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2] = {};
|
||||
|
||||
// zero out the client mix for this agent
|
||||
memset(clientSamples, 0, sizeof(clientSamples));
|
||||
|
||||
for (AgentList::iterator otherAgent = agentList->begin(); otherAgent != agentList->end(); otherAgent++) {
|
||||
if (otherAgent != agent || (otherAgent == agent && agentRingBuffer->shouldLoopbackForAgent())) {
|
||||
|
@ -219,11 +225,11 @@ int main(int argc, const char* argv[]) {
|
|||
}
|
||||
|
||||
int16_t* goodChannel = bearingRelativeAngleToSource > 0.0f
|
||||
? clientMix + BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||
: clientMix;
|
||||
? clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||
: clientSamples;
|
||||
int16_t* delayedChannel = bearingRelativeAngleToSource > 0.0f
|
||||
? clientMix
|
||||
: clientMix + BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
? clientSamples
|
||||
: clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
|
||||
int16_t* delaySamplePointer = otherAgentBuffer->getNextOutput() == otherAgentBuffer->getBuffer()
|
||||
? otherAgentBuffer->getBuffer() + RING_BUFFER_SAMPLES - numSamplesDelay
|
||||
|
@ -249,7 +255,8 @@ int main(int argc, const char* argv[]) {
|
|||
}
|
||||
}
|
||||
|
||||
agentList->getAgentSocket().send(agent->getPublicSocket(), clientMix, BUFFER_LENGTH_BYTES);
|
||||
memcpy(clientPacket + 1, clientSamples, sizeof(clientSamples));
|
||||
agentList->getAgentSocket().send(agent->getPublicSocket(), clientPacket, BUFFER_LENGTH_BYTES + 1);
|
||||
}
|
||||
|
||||
// push forward the next output pointers for any audio buffers we used
|
||||
|
@ -268,9 +275,12 @@ int main(int argc, const char* argv[]) {
|
|||
|
||||
// pull any new audio data from agents off of the network stack
|
||||
while (agentList->getAgentSocket().receive(agentAddress, packetData, &receivedBytes)) {
|
||||
if (packetData[0] == PACKET_HEADER_INJECT_AUDIO) {
|
||||
if (packetData[0] == PACKET_HEADER_INJECT_AUDIO || packetData[0] == PACKET_HEADER_MICROPHONE_AUDIO) {
|
||||
char agentType = (packetData[0] == PACKET_HEADER_MICROPHONE_AUDIO)
|
||||
? AGENT_TYPE_AVATAR
|
||||
: AGENT_TYPE_AUDIO_INJECTOR;
|
||||
|
||||
if (agentList->addOrUpdateAgent(agentAddress, agentAddress, packetData[0], agentList->getLastAgentID())) {
|
||||
if (agentList->addOrUpdateAgent(agentAddress, agentAddress, agentType, agentList->getLastAgentID())) {
|
||||
agentList->increaseAgentID();
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <ifaddrs.h>
|
||||
#endif
|
||||
|
||||
#include <QActionGroup>
|
||||
#include <QColorDialog>
|
||||
#include <QDesktopWidget>
|
||||
#include <QGLWidget>
|
||||
|
@ -134,18 +135,16 @@ Application::Application(int& argc, char** argv) :
|
|||
_mouseX(0),
|
||||
_mouseY(0),
|
||||
_mousePressed(false),
|
||||
_mouseMode(NO_EDIT_MODE),
|
||||
_mouseVoxelScale(1.0f / 1024.0f),
|
||||
_paintOn(false),
|
||||
_dominantColor(0),
|
||||
_perfStatsOn(false),
|
||||
_destructiveAddVoxel(false),
|
||||
_chatEntryOn(false),
|
||||
_oculusTextureID(0),
|
||||
_oculusProgram(0),
|
||||
_oculusDistortionScale(1.25),
|
||||
#ifndef _WIN32
|
||||
_audio(&_audioScope, &_myAvatar),
|
||||
_audio(&_audioScope),
|
||||
#endif
|
||||
_stopNetworkReceiveThread(false),
|
||||
_packetCount(0),
|
||||
|
@ -193,10 +192,6 @@ Application::Application(int& argc, char** argv) :
|
|||
// the callback for our instance of AgentList is attachNewHeadToAgent
|
||||
AgentList::getInstance()->linkedDataCreateCallback = &attachNewHeadToAgent;
|
||||
|
||||
#ifndef _WIN32
|
||||
AgentList::getInstance()->audioMixerSocketUpdate = &audioMixerUpdate;
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
WSADATA WsaData;
|
||||
int wsaresult = WSAStartup(MAKEWORD(2,2), &WsaData);
|
||||
|
@ -365,7 +360,8 @@ void Application::paintGL() {
|
|||
glEnable(GL_COLOR_MATERIAL);
|
||||
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
|
||||
|
||||
GLfloat light_position0[] = { 1.0, 1.0, 0.0, 0.0 };
|
||||
glm::vec3 relativeSunLoc = glm::normalize(_environment.getSunLocation() - whichCamera.getPosition());
|
||||
GLfloat light_position0[] = { relativeSunLoc.x, relativeSunLoc.y, relativeSunLoc.z, 0.0 };
|
||||
glLightfv(GL_LIGHT0, GL_POSITION, light_position0);
|
||||
GLfloat ambient_color[] = { 0.7, 0.7, 0.8 };
|
||||
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient_color);
|
||||
|
@ -537,30 +533,6 @@ void Application::keyPressEvent(QKeyEvent* event) {
|
|||
case Qt::Key_Percent:
|
||||
sendVoxelServerAddScene();
|
||||
break;
|
||||
|
||||
case Qt::Key_1:
|
||||
_mouseMode = (_mouseMode == ADD_VOXEL_MODE) ? NO_EDIT_MODE : ADD_VOXEL_MODE;
|
||||
break;
|
||||
|
||||
case Qt::Key_2:
|
||||
_mouseMode = (_mouseMode == DELETE_VOXEL_MODE) ? NO_EDIT_MODE : DELETE_VOXEL_MODE;
|
||||
break;
|
||||
|
||||
case Qt::Key_3:
|
||||
_mouseMode = (_mouseMode == COLOR_VOXEL_MODE) ? NO_EDIT_MODE : COLOR_VOXEL_MODE;
|
||||
break;
|
||||
|
||||
case Qt::Key_4:
|
||||
addVoxelInFrontOfAvatar();
|
||||
break;
|
||||
|
||||
case Qt::Key_5:
|
||||
_mouseVoxelScale /= 2;
|
||||
break;
|
||||
|
||||
case Qt::Key_6:
|
||||
_mouseVoxelScale *= 2;
|
||||
break;
|
||||
|
||||
case Qt::Key_L:
|
||||
_displayLevels = !_displayLevels;
|
||||
|
@ -684,6 +656,12 @@ void Application::keyReleaseEvent(QKeyEvent* event) {
|
|||
void Application::mouseMoveEvent(QMouseEvent* event) {
|
||||
_mouseX = event->x();
|
||||
_mouseY = event->y();
|
||||
|
||||
// detect drag
|
||||
glm::vec3 mouseVoxelPos(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z);
|
||||
if (_colorVoxelMode->isChecked() && event->buttons().testFlag(Qt::LeftButton) && mouseVoxelPos != _lastMouseVoxelPos) {
|
||||
addVoxelUnderCursor();
|
||||
}
|
||||
}
|
||||
|
||||
void Application::mousePressEvent(QMouseEvent* event) {
|
||||
|
@ -692,13 +670,13 @@ void Application::mousePressEvent(QMouseEvent* event) {
|
|||
_mouseY = event->y();
|
||||
_mousePressed = true;
|
||||
|
||||
if (_mouseMode == ADD_VOXEL_MODE || _mouseMode == COLOR_VOXEL_MODE) {
|
||||
if (_addVoxelMode->isChecked() || _colorVoxelMode->isChecked()) {
|
||||
addVoxelUnderCursor();
|
||||
|
||||
} else if (_mouseMode == DELETE_VOXEL_MODE) {
|
||||
} else if (_deleteVoxelMode->isChecked()) {
|
||||
deleteVoxelUnderCursor();
|
||||
}
|
||||
} else if (event->button() == Qt::RightButton && _mouseMode != NO_EDIT_MODE) {
|
||||
} else if (event->button() == Qt::RightButton && checkedVoxelModeAction() != 0) {
|
||||
deleteVoxelUnderCursor();
|
||||
}
|
||||
}
|
||||
|
@ -712,14 +690,14 @@ void Application::mouseReleaseEvent(QMouseEvent* event) {
|
|||
}
|
||||
|
||||
void Application::wheelEvent(QWheelEvent* event) {
|
||||
if (_mouseMode == NO_EDIT_MODE) {
|
||||
if (checkedVoxelModeAction() == 0) {
|
||||
event->ignore();
|
||||
return;
|
||||
}
|
||||
if (event->delta() > 0) {
|
||||
_mouseVoxelScale *= 2;
|
||||
increaseVoxelSize();
|
||||
} else {
|
||||
_mouseVoxelScale /= 2;
|
||||
decreaseVoxelSize();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -809,48 +787,57 @@ void Application::idle() {
|
|||
// tell my avatar the posiion and direction of the ray projected ino the world based on the mouse position
|
||||
_myAvatar.setMouseRay(mouseRayOrigin, mouseRayDirection);
|
||||
|
||||
float distance;
|
||||
BoxFace face;
|
||||
_mouseVoxel.s = 0.0f;
|
||||
if (_mouseMode != NO_EDIT_MODE && _voxels.findRayIntersection(
|
||||
mouseRayOrigin, mouseRayDirection, _mouseVoxel, distance, face)) {
|
||||
// find the nearest voxel with the desired scale
|
||||
if (_mouseVoxelScale > _mouseVoxel.s) {
|
||||
// choose the larger voxel that encompasses the one selected
|
||||
_mouseVoxel.x = _mouseVoxelScale * floorf(_mouseVoxel.x / _mouseVoxelScale);
|
||||
_mouseVoxel.y = _mouseVoxelScale * floorf(_mouseVoxel.y / _mouseVoxelScale);
|
||||
_mouseVoxel.z = _mouseVoxelScale * floorf(_mouseVoxel.z / _mouseVoxelScale);
|
||||
_mouseVoxel.s = _mouseVoxelScale;
|
||||
|
||||
} else {
|
||||
glm::vec3 faceVector = getFaceVector(face);
|
||||
if (_mouseVoxelScale < _mouseVoxel.s) {
|
||||
// find the closest contained voxel
|
||||
glm::vec3 pt = (mouseRayOrigin + mouseRayDirection * distance) / (float)TREE_SCALE -
|
||||
faceVector * (_mouseVoxelScale * 0.5f);
|
||||
_mouseVoxel.x = _mouseVoxelScale * floorf(pt.x / _mouseVoxelScale);
|
||||
_mouseVoxel.y = _mouseVoxelScale * floorf(pt.y / _mouseVoxelScale);
|
||||
_mouseVoxel.z = _mouseVoxelScale * floorf(pt.z / _mouseVoxelScale);
|
||||
if (checkedVoxelModeAction() != 0) {
|
||||
float distance;
|
||||
BoxFace face;
|
||||
if (_voxels.findRayIntersection(mouseRayOrigin, mouseRayDirection, _mouseVoxel, distance, face)) {
|
||||
// find the nearest voxel with the desired scale
|
||||
if (_mouseVoxelScale > _mouseVoxel.s) {
|
||||
// choose the larger voxel that encompasses the one selected
|
||||
_mouseVoxel.x = _mouseVoxelScale * floorf(_mouseVoxel.x / _mouseVoxelScale);
|
||||
_mouseVoxel.y = _mouseVoxelScale * floorf(_mouseVoxel.y / _mouseVoxelScale);
|
||||
_mouseVoxel.z = _mouseVoxelScale * floorf(_mouseVoxel.z / _mouseVoxelScale);
|
||||
_mouseVoxel.s = _mouseVoxelScale;
|
||||
}
|
||||
if (_mouseMode == ADD_VOXEL_MODE) {
|
||||
// use the face to determine the side on which to create a neighbor
|
||||
_mouseVoxel.x += faceVector.x * _mouseVoxel.s;
|
||||
_mouseVoxel.y += faceVector.y * _mouseVoxel.s;
|
||||
_mouseVoxel.z += faceVector.z * _mouseVoxel.s;
|
||||
}
|
||||
}
|
||||
|
||||
if (_mouseMode == COLOR_VOXEL_MODE) {
|
||||
} else {
|
||||
glm::vec3 faceVector = getFaceVector(face);
|
||||
if (_mouseVoxelScale < _mouseVoxel.s) {
|
||||
// find the closest contained voxel
|
||||
glm::vec3 pt = (mouseRayOrigin + mouseRayDirection * distance) / (float)TREE_SCALE -
|
||||
faceVector * (_mouseVoxelScale * 0.5f);
|
||||
_mouseVoxel.x = _mouseVoxelScale * floorf(pt.x / _mouseVoxelScale);
|
||||
_mouseVoxel.y = _mouseVoxelScale * floorf(pt.y / _mouseVoxelScale);
|
||||
_mouseVoxel.z = _mouseVoxelScale * floorf(pt.z / _mouseVoxelScale);
|
||||
_mouseVoxel.s = _mouseVoxelScale;
|
||||
}
|
||||
if (_addVoxelMode->isChecked()) {
|
||||
// use the face to determine the side on which to create a neighbor
|
||||
_mouseVoxel.x += faceVector.x * _mouseVoxel.s;
|
||||
_mouseVoxel.y += faceVector.y * _mouseVoxel.s;
|
||||
_mouseVoxel.z += faceVector.z * _mouseVoxel.s;
|
||||
}
|
||||
}
|
||||
} else if (_addVoxelMode->isChecked()) {
|
||||
// place the voxel a fixed distance away
|
||||
float worldMouseVoxelScale = _mouseVoxelScale * TREE_SCALE;
|
||||
glm::vec3 pt = mouseRayOrigin + mouseRayDirection * (2.0f + worldMouseVoxelScale * 0.5f);
|
||||
_mouseVoxel.x = _mouseVoxelScale * floorf(pt.x / worldMouseVoxelScale);
|
||||
_mouseVoxel.y = _mouseVoxelScale * floorf(pt.y / worldMouseVoxelScale);
|
||||
_mouseVoxel.z = _mouseVoxelScale * floorf(pt.z / worldMouseVoxelScale);
|
||||
_mouseVoxel.s = _mouseVoxelScale;
|
||||
}
|
||||
|
||||
if (_deleteVoxelMode->isChecked()) {
|
||||
// red indicates deletion
|
||||
_mouseVoxel.red = 255;
|
||||
_mouseVoxel.green = _mouseVoxel.blue = 0;
|
||||
|
||||
} else { // _addVoxelMode->isChecked() || _colorVoxelMode->isChecked()
|
||||
QColor paintColor = _voxelPaintColor->data().value<QColor>();
|
||||
_mouseVoxel.red = paintColor.red();
|
||||
_mouseVoxel.green = paintColor.green();
|
||||
_mouseVoxel.blue = paintColor.blue();
|
||||
|
||||
} else if (_mouseMode == DELETE_VOXEL_MODE) {
|
||||
// red indicates deletion
|
||||
_mouseVoxel.red = 255;
|
||||
_mouseVoxel.green = _mouseVoxel.blue = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -905,11 +892,7 @@ void Application::terminate() {
|
|||
// Close serial port
|
||||
// close(serial_fd);
|
||||
|
||||
_myAvatar.writeAvatarDataToFile();
|
||||
|
||||
#ifndef _WIN32
|
||||
_audio.terminate();
|
||||
#endif
|
||||
_myAvatar.writeAvatarDataToFile();
|
||||
|
||||
if (_enableNetworkThread) {
|
||||
_stopNetworkReceiveThread = true;
|
||||
|
@ -985,10 +968,6 @@ void Application::cycleFrustumRenderMode() {
|
|||
updateFrustumRenderModeAction();
|
||||
}
|
||||
|
||||
void Application::setDestructivePaint(bool destructive) {
|
||||
_destructiveAddVoxel = destructive;
|
||||
}
|
||||
|
||||
void Application::setRenderWarnings(bool renderWarnings) {
|
||||
_voxels.setRenderPipelineWarnings(renderWarnings);
|
||||
}
|
||||
|
@ -1041,6 +1020,56 @@ void Application::setWantsDelta(bool wantsDelta) {
|
|||
_myAvatar.setWantDelta(wantsDelta);
|
||||
}
|
||||
|
||||
void Application::updateVoxelModeActions() {
|
||||
// only the sender can be checked
|
||||
foreach (QAction* action, _voxelModeActions->actions()) {
|
||||
if (action->isChecked() && action != sender()) {
|
||||
action->setChecked(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void sendVoxelEditMessage(PACKET_HEADER header, VoxelDetail& detail) {
|
||||
unsigned char* bufferOut;
|
||||
int sizeOut;
|
||||
|
||||
if (createVoxelEditMessage(header, 0, 1, &detail, bufferOut, sizeOut)){
|
||||
AgentList::getInstance()->broadcastToAgents(bufferOut, sizeOut, &AGENT_TYPE_VOXEL, 1);
|
||||
delete bufferOut;
|
||||
}
|
||||
}
|
||||
|
||||
void Application::addVoxelInFrontOfAvatar() {
|
||||
VoxelDetail detail;
|
||||
|
||||
glm::vec3 position = (_myAvatar.getPosition() + _myAvatar.getCameraDirection()) * (1.0f / TREE_SCALE);
|
||||
detail.s = _mouseVoxelScale;
|
||||
|
||||
detail.x = detail.s * floor(position.x / detail.s);
|
||||
detail.y = detail.s * floor(position.y / detail.s);
|
||||
detail.z = detail.s * floor(position.z / detail.s);
|
||||
QColor paintColor = _voxelPaintColor->data().value<QColor>();
|
||||
detail.red = paintColor.red();
|
||||
detail.green = paintColor.green();
|
||||
detail.blue = paintColor.blue();
|
||||
|
||||
PACKET_HEADER message = (_destructiveAddVoxel->isChecked() ?
|
||||
PACKET_HEADER_SET_VOXEL_DESTRUCTIVE : PACKET_HEADER_SET_VOXEL);
|
||||
sendVoxelEditMessage(message, detail);
|
||||
|
||||
// create the voxel locally so it appears immediately
|
||||
_voxels.createVoxel(detail.x, detail.y, detail.z, detail.s,
|
||||
detail.red, detail.green, detail.blue, _destructiveAddVoxel->isChecked());
|
||||
}
|
||||
|
||||
void Application::decreaseVoxelSize() {
|
||||
_mouseVoxelScale /= 2;
|
||||
}
|
||||
|
||||
void Application::increaseVoxelSize() {
|
||||
_mouseVoxelScale *= 2;
|
||||
}
|
||||
|
||||
static QIcon createSwatchIcon(const QColor& color) {
|
||||
QPixmap map(16, 16);
|
||||
map.fill(color);
|
||||
|
@ -1096,11 +1125,29 @@ void Application::initMenu() {
|
|||
_renderStatsOn->setShortcut(Qt::Key_Slash);
|
||||
(_logOn = toolsMenu->addAction("Log"))->setCheckable(true);
|
||||
_logOn->setChecked(true);
|
||||
_voxelPaintColor = toolsMenu->addAction("Voxel Paint Color", this, SLOT(chooseVoxelPaintColor()), Qt::Key_7);
|
||||
|
||||
QMenu* voxelMenu = menuBar->addMenu("Voxels");
|
||||
_voxelModeActions = new QActionGroup(this);
|
||||
_voxelModeActions->setExclusive(false); // exclusivity implies one is always checked
|
||||
(_addVoxelMode = voxelMenu->addAction(
|
||||
"Add Voxel Mode", this, SLOT(updateVoxelModeActions()), Qt::Key_1))->setCheckable(true);
|
||||
_voxelModeActions->addAction(_addVoxelMode);
|
||||
(_deleteVoxelMode = voxelMenu->addAction(
|
||||
"Delete Voxel Mode", this, SLOT(updateVoxelModeActions()), Qt::Key_2))->setCheckable(true);
|
||||
_voxelModeActions->addAction(_deleteVoxelMode);
|
||||
(_colorVoxelMode = voxelMenu->addAction(
|
||||
"Color Voxel Mode", this, SLOT(updateVoxelModeActions()), Qt::Key_3))->setCheckable(true);
|
||||
_voxelModeActions->addAction(_colorVoxelMode);
|
||||
|
||||
voxelMenu->addAction("Place Voxel", this, SLOT(addVoxelInFrontOfAvatar()), Qt::Key_4);
|
||||
voxelMenu->addAction("Decrease Voxel Size", this, SLOT(decreaseVoxelSize()), Qt::Key_5);
|
||||
voxelMenu->addAction("Increase Voxel Size", this, SLOT(increaseVoxelSize()), Qt::Key_6);
|
||||
|
||||
_voxelPaintColor = voxelMenu->addAction("Voxel Paint Color", this, SLOT(chooseVoxelPaintColor()), Qt::Key_7);
|
||||
QColor paintColor(128, 128, 128);
|
||||
_voxelPaintColor->setData(paintColor);
|
||||
_voxelPaintColor->setIcon(createSwatchIcon(paintColor));
|
||||
toolsMenu->addAction("Create Voxel is Destructive", this, SLOT(setDestructivePaint(bool)))->setCheckable(true);
|
||||
(_destructiveAddVoxel = voxelMenu->addAction("Create Voxel is Destructive"))->setCheckable(true);
|
||||
|
||||
QMenu* frustumMenu = menuBar->addMenu("Frustum");
|
||||
(_frustumOn = frustumMenu->addAction("Display Frustum"))->setCheckable(true);
|
||||
|
@ -1189,16 +1236,6 @@ void Application::init() {
|
|||
gettimeofday(&_lastTimeIdle, NULL);
|
||||
}
|
||||
|
||||
static void sendVoxelEditMessage(PACKET_HEADER header, VoxelDetail& detail) {
|
||||
unsigned char* bufferOut;
|
||||
int sizeOut;
|
||||
|
||||
if (createVoxelEditMessage(header, 0, 1, &detail, bufferOut, sizeOut)){
|
||||
AgentList::getInstance()->broadcastToAgents(bufferOut, sizeOut, &AGENT_TYPE_VOXEL, 1);
|
||||
delete bufferOut;
|
||||
}
|
||||
}
|
||||
|
||||
void Application::updateAvatar(float deltaTime) {
|
||||
// Update my avatar's head position from gyros
|
||||
_myAvatar.updateHeadFromGyros(deltaTime, &_serialPort, &_gravity);
|
||||
|
@ -1249,7 +1286,7 @@ void Application::updateAvatar(float deltaTime) {
|
|||
|
||||
// Get audio loudness data from audio input device
|
||||
#ifndef _WIN32
|
||||
_myAvatar.setLoudness(_audio.getInputLoudness());
|
||||
_myAvatar.setLoudness(_audio.getLastInputLoudness());
|
||||
#endif
|
||||
|
||||
// Update Avatar with latest camera and view frustum data...
|
||||
|
@ -1297,7 +1334,8 @@ void Application::updateAvatar(float deltaTime) {
|
|||
_paintingVoxel.y >= 0.0 && _paintingVoxel.y <= 1.0 &&
|
||||
_paintingVoxel.z >= 0.0 && _paintingVoxel.z <= 1.0) {
|
||||
|
||||
PACKET_HEADER message = (_destructiveAddVoxel ? PACKET_HEADER_SET_VOXEL_DESTRUCTIVE : PACKET_HEADER_SET_VOXEL);
|
||||
PACKET_HEADER message = (_destructiveAddVoxel->isChecked() ?
|
||||
PACKET_HEADER_SET_VOXEL_DESTRUCTIVE : PACKET_HEADER_SET_VOXEL);
|
||||
sendVoxelEditMessage(message, _paintingVoxel);
|
||||
}
|
||||
}
|
||||
|
@ -1547,8 +1585,9 @@ void Application::displaySide(Camera& whichCamera) {
|
|||
|
||||
// indicate what we'll be adding/removing in mouse mode, if anything
|
||||
if (_mouseVoxel.s != 0) {
|
||||
glDisable(GL_LIGHTING);
|
||||
glPushMatrix();
|
||||
if (_mouseMode == ADD_VOXEL_MODE) {
|
||||
if (_addVoxelMode->isChecked()) {
|
||||
// use a contrasting color so that we can see what we're doing
|
||||
glColor3ub(_mouseVoxel.red + 128, _mouseVoxel.green + 128, _mouseVoxel.blue + 128);
|
||||
} else {
|
||||
|
@ -1562,6 +1601,7 @@ void Application::displaySide(Camera& whichCamera) {
|
|||
glutWireCube(_mouseVoxel.s);
|
||||
glLineWidth(1.0f);
|
||||
glPopMatrix();
|
||||
glEnable(GL_LIGHTING);
|
||||
}
|
||||
|
||||
if (_renderAvatarsOn->isChecked()) {
|
||||
|
@ -1877,35 +1917,18 @@ void Application::shiftPaintingColor() {
|
|||
_paintingVoxel.blue = (_dominantColor == 2) ? randIntInRange(200, 255) : randIntInRange(40, 100);
|
||||
}
|
||||
|
||||
void Application::addVoxelInFrontOfAvatar() {
|
||||
VoxelDetail detail;
|
||||
|
||||
glm::vec3 position = (_myAvatar.getPosition() + _myAvatar.getCameraDirection()) * (1.0f / TREE_SCALE);
|
||||
detail.s = _mouseVoxelScale;
|
||||
|
||||
detail.x = detail.s * floor(position.x / detail.s);
|
||||
detail.y = detail.s * floor(position.y / detail.s);
|
||||
detail.z = detail.s * floor(position.z / detail.s);
|
||||
QColor paintColor = _voxelPaintColor->data().value<QColor>();
|
||||
detail.red = paintColor.red();
|
||||
detail.green = paintColor.green();
|
||||
detail.blue = paintColor.blue();
|
||||
|
||||
PACKET_HEADER message = (_destructiveAddVoxel ? PACKET_HEADER_SET_VOXEL_DESTRUCTIVE : PACKET_HEADER_SET_VOXEL);
|
||||
sendVoxelEditMessage(message, detail);
|
||||
|
||||
// create the voxel locally so it appears immediately
|
||||
_voxels.createVoxel(detail.x, detail.y, detail.z, detail.s, detail.red, detail.green, detail.blue, _destructiveAddVoxel);
|
||||
}
|
||||
|
||||
void Application::addVoxelUnderCursor() {
|
||||
if (_mouseVoxel.s != 0) {
|
||||
PACKET_HEADER message = (_destructiveAddVoxel ? PACKET_HEADER_SET_VOXEL_DESTRUCTIVE : PACKET_HEADER_SET_VOXEL);
|
||||
PACKET_HEADER message = (_destructiveAddVoxel->isChecked() ?
|
||||
PACKET_HEADER_SET_VOXEL_DESTRUCTIVE : PACKET_HEADER_SET_VOXEL);
|
||||
sendVoxelEditMessage(message, _mouseVoxel);
|
||||
|
||||
// create the voxel locally so it appears immediately
|
||||
_voxels.createVoxel(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s,
|
||||
_mouseVoxel.red, _mouseVoxel.green, _mouseVoxel.blue, _destructiveAddVoxel);
|
||||
_mouseVoxel.red, _mouseVoxel.green, _mouseVoxel.blue, _destructiveAddVoxel->isChecked());
|
||||
|
||||
// remember the position for drag detection
|
||||
_lastMouseVoxelPos = glm::vec3(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1915,6 +1938,9 @@ void Application::deleteVoxelUnderCursor() {
|
|||
|
||||
// delete the voxel locally so it disappears immediately
|
||||
_voxels.deleteVoxelAt(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z, _mouseVoxel.s);
|
||||
|
||||
// remember the position for drag detection
|
||||
_lastMouseVoxelPos = glm::vec3(_mouseVoxel.x, _mouseVoxel.y, _mouseVoxel.z);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1949,18 +1975,23 @@ void Application::setMenuShortcutsEnabled(bool enabled) {
|
|||
setShortcutsEnabled(_window->menuBar(), enabled);
|
||||
}
|
||||
|
||||
// when QActionGroup is set to non-exclusive, it doesn't return anything as checked;
|
||||
// hence, we must check ourselves
|
||||
QAction* Application::checkedVoxelModeAction() const {
|
||||
foreach (QAction* action, _voxelModeActions->actions()) {
|
||||
if (action->isChecked()) {
|
||||
return action;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Application::attachNewHeadToAgent(Agent *newAgent) {
|
||||
if (newAgent->getLinkedData() == NULL) {
|
||||
newAgent->setLinkedData(new Avatar(false));
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
void Application::audioMixerUpdate(in_addr_t newMixerAddress, in_port_t newMixerPort) {
|
||||
static_cast<Application*>(QCoreApplication::instance())->_audio.updateMixerParams(newMixerAddress, newMixerPort);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Receive packets from other agents/servers and decide what to do with them!
|
||||
void* Application::networkReceive(void* args) {
|
||||
sockaddr senderAddress;
|
||||
|
@ -1992,6 +2023,9 @@ void* Application::networkReceive(void* args) {
|
|||
|
||||
printf("The rotation: %f, %f, %f\n", rotationRates[0], rotationRates[1], rotationRates[2]);
|
||||
break;
|
||||
case PACKET_HEADER_MIXED_AUDIO:
|
||||
app->_audio.addReceivedAudioToBuffer(app->_incomingPacket, bytesReceived);
|
||||
break;
|
||||
case PACKET_HEADER_VOXEL_DATA:
|
||||
case PACKET_HEADER_VOXEL_DATA_MONOCHROME:
|
||||
case PACKET_HEADER_Z_COMMAND:
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "ui/ChatEntry.h"
|
||||
|
||||
class QAction;
|
||||
class QActionGroup;
|
||||
class QGLWidget;
|
||||
class QKeyEvent;
|
||||
class QMainWindow;
|
||||
|
@ -57,6 +58,8 @@ public:
|
|||
void mouseReleaseEvent(QMouseEvent* event);
|
||||
|
||||
void wheelEvent(QWheelEvent* event);
|
||||
|
||||
const Avatar& getAvatar() const { return _myAvatar; }
|
||||
|
||||
private slots:
|
||||
|
||||
|
@ -76,7 +79,6 @@ private slots:
|
|||
void setFrustumOffset(bool frustumOffset);
|
||||
void cycleFrustumRenderMode();
|
||||
|
||||
void setDestructivePaint(bool destructive);
|
||||
void setRenderWarnings(bool renderWarnings);
|
||||
void doKillLocalVoxels();
|
||||
void doRandomizeVoxelColors();
|
||||
|
@ -89,6 +91,10 @@ private slots:
|
|||
void setWantsMonochrome(bool wantsMonochrome);
|
||||
void setWantsResIn(bool wantsResIn);
|
||||
void setWantsDelta(bool wantsDelta);
|
||||
void updateVoxelModeActions();
|
||||
void addVoxelInFrontOfAvatar();
|
||||
void decreaseVoxelSize();
|
||||
void increaseVoxelSize();
|
||||
void chooseVoxelPaintColor();
|
||||
|
||||
private:
|
||||
|
@ -110,7 +116,6 @@ private:
|
|||
|
||||
void setupPaintingVoxel();
|
||||
void shiftPaintingColor();
|
||||
void addVoxelInFrontOfAvatar();
|
||||
void addVoxelUnderCursor();
|
||||
void deleteVoxelUnderCursor();
|
||||
|
||||
|
@ -118,10 +123,9 @@ private:
|
|||
|
||||
void setMenuShortcutsEnabled(bool enabled);
|
||||
|
||||
QAction* checkedVoxelModeAction() const;
|
||||
|
||||
static void attachNewHeadToAgent(Agent *newAgent);
|
||||
#ifndef _WIN32
|
||||
static void audioMixerUpdate(in_addr_t newMixerAddress, in_port_t newMixerPort);
|
||||
#endif
|
||||
static void* networkReceive(void* args);
|
||||
|
||||
QMainWindow* _window;
|
||||
|
@ -137,7 +141,12 @@ private:
|
|||
QAction* _renderStatsOn; // Whether to show onscreen text overlay with stats
|
||||
QAction* _renderFrameTimerOn; // Whether to show onscreen text overlay with stats
|
||||
QAction* _logOn; // Whether to show on-screen log
|
||||
QActionGroup* _voxelModeActions; // The group of voxel edit mode actions
|
||||
QAction* _addVoxelMode; // Whether add voxel mode is enabled
|
||||
QAction* _deleteVoxelMode; // Whether delete voxel mode is enabled
|
||||
QAction* _colorVoxelMode; // Whether color voxel mode is enabled
|
||||
QAction* _voxelPaintColor; // The color with which to paint voxels
|
||||
QAction* _destructiveAddVoxel; // when doing voxel editing do we want them to be destructive
|
||||
QAction* _frustumOn; // Whether or not to display the debug view frustum
|
||||
QAction* _viewFrustumFromOffset; // Whether or not to offset the view of the frustum
|
||||
QAction* _cameraFrustum; // which frustum to look at
|
||||
|
@ -190,18 +199,15 @@ private:
|
|||
int _mouseY;
|
||||
bool _mousePressed; // true if mouse has been pressed (clear when finished)
|
||||
|
||||
// The current mode for mouse interaction
|
||||
enum MouseMode { NO_EDIT_MODE, ADD_VOXEL_MODE, DELETE_VOXEL_MODE, COLOR_VOXEL_MODE };
|
||||
MouseMode _mouseMode;
|
||||
VoxelDetail _mouseVoxel; // details of the voxel under the mouse cursor
|
||||
float _mouseVoxelScale; // the scale for adding/removing voxels
|
||||
VoxelDetail _mouseVoxel; // details of the voxel under the mouse cursor
|
||||
float _mouseVoxelScale; // the scale for adding/removing voxels
|
||||
glm::vec3 _lastMouseVoxelPos; // the position of the last mouse voxel edit
|
||||
|
||||
bool _paintOn; // Whether to paint voxels as you fly around
|
||||
unsigned char _dominantColor; // The dominant color of the voxel we're painting
|
||||
VoxelDetail _paintingVoxel; // The voxel we're painting if we're painting
|
||||
|
||||
bool _perfStatsOn; // Do we want to display perfStats?
|
||||
bool _destructiveAddVoxel; // when doing voxel editing do we want them to be destructive
|
||||
|
||||
ChatEntry _chatEntry; // chat entry field
|
||||
bool _chatEntryOn; // Whether to show the chat entry
|
||||
|
|
|
@ -10,19 +10,21 @@
|
|||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <pthread.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <cstring>
|
||||
|
||||
#include <StdDev.h>
|
||||
#include <UDPSocket.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <PacketHeaders.h>
|
||||
#include <AgentList.h>
|
||||
#include <AgentTypes.h>
|
||||
|
||||
#include "Application.h"
|
||||
#include "Audio.h"
|
||||
#include "Util.h"
|
||||
#include "Log.h"
|
||||
|
||||
Oscilloscope * scope;
|
||||
|
||||
const int NUM_AUDIO_CHANNELS = 2;
|
||||
|
||||
const int PACKET_LENGTH_BYTES = 1024;
|
||||
|
@ -55,15 +57,8 @@ const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES / (float)SAMPLE_
|
|||
|
||||
const int AGENT_LOOPBACK_MODIFIER = 307;
|
||||
|
||||
const char LOCALHOST_MIXER[] = "0.0.0.0";
|
||||
const char WORKCLUB_MIXER[] = "192.168.1.19";
|
||||
const char EC2_WEST_MIXER[] = "54.241.92.53";
|
||||
|
||||
const int AUDIO_UDP_LISTEN_PORT = 55444;
|
||||
|
||||
int starve_counter = 0;
|
||||
int numStarves = 0;
|
||||
StDev stdev;
|
||||
bool stopAudioReceiveThread = false;
|
||||
|
||||
int samplesLeftForFlange = 0;
|
||||
int lastYawMeasuredMaximum = 0;
|
||||
|
@ -71,52 +66,42 @@ float flangeIntensity = 0;
|
|||
float flangeRate = 0;
|
||||
float flangeWeight = 0;
|
||||
|
||||
timeval firstPlaybackTimer;
|
||||
int packetsReceivedThisPlayback = 0;
|
||||
float usecsAtStartup = 0;
|
||||
|
||||
/**
|
||||
* Audio callback used by portaudio.
|
||||
* Communicates with Audio via a shared pointer to Audio::data.
|
||||
* Writes input audio channels (if they exist) into Audio::data->buffer,
|
||||
multiplied by Audio::data->inputGain.
|
||||
* Then writes Audio::data->buffer into output audio channels, and clears
|
||||
the portion of Audio::data->buffer that has been read from for reuse.
|
||||
*
|
||||
* @param[in] inputBuffer A pointer to an internal portaudio data buffer containing data read by portaudio.
|
||||
* @param[out] outputBuffer A pointer to an internal portaudio data buffer to be read by the configured output device.
|
||||
* @param[in] frames Number of frames that portaudio requests to be read/written.
|
||||
(Valid size of input/output buffers = frames * number of channels (2) * sizeof data type (float)).
|
||||
* @param[in] timeInfo Portaudio time info. Currently unused.
|
||||
* @param[in] statusFlags Portaudio status flags. Currently unused.
|
||||
* @param[in] userData Pointer to supplied user data (in this case, a pointer to Audio::data).
|
||||
Used to communicate with external code (since portaudio calls this function from another thread).
|
||||
* @return Should be of type PaStreamCallbackResult. Return paComplete to end the stream, or paContinue to continue (default).
|
||||
Can be used to end the stream from within the callback.
|
||||
*/
|
||||
|
||||
int audioCallback (const void *inputBuffer,
|
||||
void *outputBuffer,
|
||||
// inputBuffer A pointer to an internal portaudio data buffer containing data read by portaudio.
|
||||
// outputBuffer A pointer to an internal portaudio data buffer to be read by the configured output device.
|
||||
// frames Number of frames that portaudio requests to be read/written.
|
||||
// timeInfo Portaudio time info. Currently unused.
|
||||
// statusFlags Portaudio status flags. Currently unused.
|
||||
// userData Pointer to supplied user data (in this case, a pointer to the parent Audio object
|
||||
int audioCallback (const void* inputBuffer,
|
||||
void* outputBuffer,
|
||||
unsigned long frames,
|
||||
const PaStreamCallbackTimeInfo *timeInfo,
|
||||
PaStreamCallbackFlags statusFlags,
|
||||
void *userData)
|
||||
{
|
||||
AudioData *data = (AudioData *) userData;
|
||||
void* userData) {
|
||||
|
||||
Audio* parentAudio = (Audio*) userData;
|
||||
AgentList* agentList = AgentList::getInstance();
|
||||
|
||||
Application* interface = (Application*) QCoreApplication::instance();
|
||||
Avatar interfaceAvatar = interface->getAvatar();
|
||||
|
||||
bool addPing = (randFloat() < 0.005f);
|
||||
|
||||
int16_t *inputLeft = ((int16_t **) inputBuffer)[0];
|
||||
int16_t *outputLeft = ((int16_t **) outputBuffer)[0];
|
||||
int16_t *outputRight = ((int16_t **) outputBuffer)[1];
|
||||
|
||||
// Compare the input and output streams to look for correlation
|
||||
data->analyzeEcho(inputLeft, outputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
parentAudio->analyzeEcho(inputLeft, outputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
|
||||
// Add Procedural effects to input samples
|
||||
data->addProceduralSounds(inputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
parentAudio->addProceduralSounds(inputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
|
||||
// add data to the scope
|
||||
scope->addSamples(1, outputLeft, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
scope->addSamples(2, outputRight, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
parentAudio->_scope->addSamples(1, outputLeft, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
parentAudio->_scope->addSamples(2, outputRight, PACKET_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
|
||||
if (inputLeft != NULL) {
|
||||
|
||||
|
@ -127,35 +112,32 @@ int audioCallback (const void *inputBuffer,
|
|||
}
|
||||
|
||||
loudness /= BUFFER_LENGTH_SAMPLES;
|
||||
data->lastInputLoudness = loudness;
|
||||
parentAudio->_lastInputLoudness = loudness;
|
||||
|
||||
// add data to the scope
|
||||
scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
parentAudio->_scope->addSamples(0, inputLeft, BUFFER_LENGTH_SAMPLES);
|
||||
|
||||
if (data->mixerAddress != 0) {
|
||||
sockaddr_in audioMixerSocket;
|
||||
audioMixerSocket.sin_family = AF_INET;
|
||||
audioMixerSocket.sin_addr.s_addr = data->mixerAddress;
|
||||
audioMixerSocket.sin_port = data->mixerPort;
|
||||
|
||||
Agent* audioMixer = agentList->soloAgentOfType(AGENT_TYPE_AUDIO_MIXER);
|
||||
|
||||
if (audioMixer) {
|
||||
int leadingBytes = 2 + (sizeof(float) * 4);
|
||||
|
||||
// we need the amount of bytes in the buffer + 1 for type
|
||||
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
|
||||
unsigned char dataPacket[BUFFER_LENGTH_BYTES + leadingBytes];
|
||||
|
||||
dataPacket[0] = PACKET_HEADER_INJECT_AUDIO;
|
||||
dataPacket[0] = PACKET_HEADER_MICROPHONE_AUDIO;
|
||||
unsigned char *currentPacketPtr = dataPacket + 1;
|
||||
|
||||
// memcpy the three float positions
|
||||
memcpy(currentPacketPtr, &data->linkedAvatar->getHeadPosition(), sizeof(float) * 3);
|
||||
memcpy(currentPacketPtr, &interfaceAvatar.getHeadPosition(), sizeof(float) * 3);
|
||||
currentPacketPtr += (sizeof(float) * 3);
|
||||
|
||||
// tell the mixer not to add additional attenuation to our source
|
||||
*(currentPacketPtr++) = 255;
|
||||
|
||||
// memcpy the corrected render yaw
|
||||
float correctedYaw = fmodf(-1 * data->linkedAvatar->getAbsoluteHeadYaw(), 360);
|
||||
float correctedYaw = fmodf(-1 * interfaceAvatar.getAbsoluteHeadYaw(), 360);
|
||||
|
||||
if (correctedYaw > 180) {
|
||||
correctedYaw -= 360;
|
||||
|
@ -163,26 +145,27 @@ int audioCallback (const void *inputBuffer,
|
|||
correctedYaw += 360;
|
||||
}
|
||||
|
||||
if (data->mixerLoopbackFlag) {
|
||||
if (parentAudio->_mixerLoopbackFlag) {
|
||||
correctedYaw = correctedYaw > 0
|
||||
? correctedYaw + AGENT_LOOPBACK_MODIFIER
|
||||
: correctedYaw - AGENT_LOOPBACK_MODIFIER;
|
||||
? correctedYaw + AGENT_LOOPBACK_MODIFIER
|
||||
: correctedYaw - AGENT_LOOPBACK_MODIFIER;
|
||||
}
|
||||
|
||||
memcpy(currentPacketPtr, &correctedYaw, sizeof(float));
|
||||
currentPacketPtr += sizeof(float);
|
||||
currentPacketPtr += sizeof(float);
|
||||
|
||||
// copy the audio data to the last BUFFER_LENGTH_BYTES bytes of the data packet
|
||||
memcpy(currentPacketPtr, inputLeft, BUFFER_LENGTH_BYTES);
|
||||
|
||||
data->audioSocket->send((sockaddr *)&audioMixerSocket, dataPacket, BUFFER_LENGTH_BYTES + leadingBytes);
|
||||
agentList->getAgentSocket().send(audioMixer->getActiveSocket(), dataPacket, BUFFER_LENGTH_BYTES + leadingBytes);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
memset(outputLeft, 0, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
||||
memset(outputRight, 0, PACKET_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
||||
AudioRingBuffer *ringBuffer = data->ringBuffer;
|
||||
|
||||
AudioRingBuffer* ringBuffer = &parentAudio->_ringBuffer;
|
||||
|
||||
// if we've been reset, and there isn't any new packets yet
|
||||
// just play some silence
|
||||
|
@ -190,15 +173,16 @@ int audioCallback (const void *inputBuffer,
|
|||
if (ringBuffer->getEndOfLastWrite() != NULL) {
|
||||
|
||||
if (!ringBuffer->isStarted() && ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES) {
|
||||
//printLog("Held back, buffer has %d of %d samples required.\n", ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES);
|
||||
//printLog("Held back, buffer has %d of %d samples required.\n",
|
||||
// ringBuffer->diffLastWriteNextOutput(), PACKET_LENGTH_SAMPLES + JITTER_BUFFER_SAMPLES);
|
||||
} else if (ringBuffer->diffLastWriteNextOutput() < PACKET_LENGTH_SAMPLES) {
|
||||
ringBuffer->setStarted(false);
|
||||
|
||||
starve_counter++;
|
||||
packetsReceivedThisPlayback = 0;
|
||||
|
||||
::numStarves++;
|
||||
parentAudio->_packetsReceivedThisPlayback = 0;
|
||||
|
||||
// printLog("Starved #%d\n", starve_counter);
|
||||
data->wasStarved = 10; // Frames to render the indication that the system was starved.
|
||||
parentAudio->_wasStarved = 10; // Frames to render the indication that the system was starved.
|
||||
} else {
|
||||
if (!ringBuffer->isStarted()) {
|
||||
ringBuffer->setStarted(true);
|
||||
|
@ -212,21 +196,22 @@ int audioCallback (const void *inputBuffer,
|
|||
// if we haven't fired off the flange effect, check if we should
|
||||
// TODO: lastMeasuredHeadYaw is now relative to body - check if this still works.
|
||||
|
||||
int lastYawMeasured = fabsf(data->linkedAvatar->getLastMeasuredHeadYaw());
|
||||
int lastYawMeasured = fabsf(interfaceAvatar.getLastMeasuredHeadYaw());
|
||||
|
||||
if (!samplesLeftForFlange && lastYawMeasured > MIN_FLANGE_EFFECT_THRESHOLD) {
|
||||
if (!::samplesLeftForFlange && lastYawMeasured > MIN_FLANGE_EFFECT_THRESHOLD) {
|
||||
// we should flange for one second
|
||||
if ((lastYawMeasuredMaximum = std::max(lastYawMeasuredMaximum, lastYawMeasured)) != lastYawMeasured) {
|
||||
lastYawMeasuredMaximum = std::min(lastYawMeasuredMaximum, MIN_FLANGE_EFFECT_THRESHOLD);
|
||||
if ((::lastYawMeasuredMaximum = std::max(::lastYawMeasuredMaximum, lastYawMeasured)) != lastYawMeasured) {
|
||||
::lastYawMeasuredMaximum = std::min(::lastYawMeasuredMaximum, MIN_FLANGE_EFFECT_THRESHOLD);
|
||||
|
||||
samplesLeftForFlange = SAMPLE_RATE;
|
||||
::samplesLeftForFlange = SAMPLE_RATE;
|
||||
|
||||
flangeIntensity = MIN_FLANGE_INTENSITY +
|
||||
((lastYawMeasuredMaximum - MIN_FLANGE_EFFECT_THRESHOLD) / (float)(MAX_FLANGE_EFFECT_THRESHOLD - MIN_FLANGE_EFFECT_THRESHOLD)) *
|
||||
::flangeIntensity = MIN_FLANGE_INTENSITY +
|
||||
((::lastYawMeasuredMaximum - MIN_FLANGE_EFFECT_THRESHOLD) /
|
||||
(float)(MAX_FLANGE_EFFECT_THRESHOLD - MIN_FLANGE_EFFECT_THRESHOLD)) *
|
||||
(1 - MIN_FLANGE_INTENSITY);
|
||||
|
||||
flangeRate = FLANGE_BASE_RATE * flangeIntensity;
|
||||
flangeWeight = MAX_FLANGE_SAMPLE_WEIGHT * flangeIntensity;
|
||||
::flangeRate = FLANGE_BASE_RATE * ::flangeIntensity;
|
||||
::flangeWeight = MAX_FLANGE_SAMPLE_WEIGHT * ::flangeIntensity;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,13 +220,14 @@ int audioCallback (const void *inputBuffer,
|
|||
int leftSample = ringBuffer->getNextOutput()[s];
|
||||
int rightSample = ringBuffer->getNextOutput()[s + PACKET_LENGTH_SAMPLES_PER_CHANNEL];
|
||||
|
||||
if (samplesLeftForFlange > 0) {
|
||||
float exponent = (SAMPLE_RATE - samplesLeftForFlange - (SAMPLE_RATE / flangeRate)) / (SAMPLE_RATE / flangeRate);
|
||||
int sampleFlangeDelay = (SAMPLE_RATE / (1000 * flangeIntensity)) * powf(2, exponent);
|
||||
if (::samplesLeftForFlange > 0) {
|
||||
float exponent = (SAMPLE_RATE - ::samplesLeftForFlange - (SAMPLE_RATE / ::flangeRate)) /
|
||||
(SAMPLE_RATE / ::flangeRate);
|
||||
int sampleFlangeDelay = (SAMPLE_RATE / (1000 * ::flangeIntensity)) * powf(2, exponent);
|
||||
|
||||
if (samplesLeftForFlange != SAMPLE_RATE || s >= (SAMPLE_RATE / 2000)) {
|
||||
if (::samplesLeftForFlange != SAMPLE_RATE || s >= (SAMPLE_RATE / 2000)) {
|
||||
// we have a delayed sample to add to this sample
|
||||
|
||||
|
||||
int16_t *flangeFrame = ringBuffer->getNextOutput();
|
||||
int flangeIndex = s - sampleFlangeDelay;
|
||||
|
||||
|
@ -257,21 +243,24 @@ int audioCallback (const void *inputBuffer,
|
|||
int16_t leftFlangeSample = flangeFrame[flangeIndex];
|
||||
int16_t rightFlangeSample = flangeFrame[flangeIndex + PACKET_LENGTH_SAMPLES_PER_CHANNEL];
|
||||
|
||||
leftSample = (1 - flangeWeight) * leftSample + (flangeWeight * leftFlangeSample);
|
||||
rightSample = (1 - flangeWeight) * rightSample + (flangeWeight * rightFlangeSample);
|
||||
leftSample = (1 - ::flangeWeight) * leftSample + (::flangeWeight * leftFlangeSample);
|
||||
rightSample = (1 - ::flangeWeight) * rightSample + (::flangeWeight * rightFlangeSample);
|
||||
|
||||
samplesLeftForFlange--;
|
||||
::samplesLeftForFlange--;
|
||||
|
||||
if (samplesLeftForFlange == 0) {
|
||||
lastYawMeasuredMaximum = 0;
|
||||
if (::samplesLeftForFlange == 0) {
|
||||
::lastYawMeasuredMaximum = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
outputLeft[s] = leftSample;
|
||||
outputRight[s] = rightSample;
|
||||
if (!addPing) {
|
||||
outputLeft[s] = leftSample;
|
||||
outputRight[s] = rightSample;
|
||||
} else {
|
||||
outputLeft[s] = outputRight[s] = (int16_t)(sinf((float) s / 15.f) * 8000.f);
|
||||
}
|
||||
}
|
||||
|
||||
ringBuffer->setNextOutput(ringBuffer->getNextOutput() + PACKET_LENGTH_SAMPLES);
|
||||
|
||||
if (ringBuffer->getNextOutput() == ringBuffer->getBuffer() + RING_BUFFER_SAMPLES) {
|
||||
|
@ -280,147 +269,141 @@ int audioCallback (const void *inputBuffer,
|
|||
}
|
||||
}
|
||||
|
||||
if (randFloat() < 0.01) {
|
||||
printLog("Ping!\n");
|
||||
for (int i = 0; i < BUFFER_LENGTH_SAMPLES; i++) {
|
||||
outputLeft[i] = (int16_t) (cosf((float)i / 8.f * 2000.f));
|
||||
}
|
||||
}
|
||||
|
||||
gettimeofday(&data->lastCallback, NULL);
|
||||
gettimeofday(&parentAudio->_lastCallbackTime, NULL);
|
||||
return paContinue;
|
||||
}
|
||||
|
||||
void Audio::updateMixerParams(in_addr_t newMixerAddress, in_port_t newMixerPort) {
|
||||
audioData->mixerAddress = newMixerAddress;
|
||||
audioData->mixerPort = newMixerPort;
|
||||
}
|
||||
|
||||
struct AudioRecThreadStruct {
|
||||
AudioData *sharedAudioData;
|
||||
};
|
||||
|
||||
void *receiveAudioViaUDP(void *args) {
|
||||
AudioRecThreadStruct *threadArgs = (AudioRecThreadStruct *) args;
|
||||
AudioData *sharedAudioData = threadArgs->sharedAudioData;
|
||||
|
||||
int16_t *receivedData = new int16_t[PACKET_LENGTH_SAMPLES];
|
||||
ssize_t receivedBytes;
|
||||
|
||||
// Init Jitter timer values
|
||||
timeval previousReceiveTime, currentReceiveTime = {};
|
||||
gettimeofday(&previousReceiveTime, NULL);
|
||||
gettimeofday(¤tReceiveTime, NULL);
|
||||
|
||||
int totalPacketsReceived = 0;
|
||||
|
||||
stdev.reset();
|
||||
|
||||
while (!stopAudioReceiveThread) {
|
||||
|
||||
if (sharedAudioData->audioSocket->receive((void *)receivedData, &receivedBytes)) {
|
||||
|
||||
gettimeofday(¤tReceiveTime, NULL);
|
||||
totalPacketsReceived++;
|
||||
|
||||
double tDiff = diffclock(&previousReceiveTime, ¤tReceiveTime);
|
||||
//printLog("tDiff %4.1f\n", tDiff);
|
||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||
if (totalPacketsReceived > 3) stdev.addValue(tDiff);
|
||||
if (stdev.getSamples() > 500) {
|
||||
sharedAudioData->measuredJitter = stdev.getStDev();
|
||||
//printLog("Avg: %4.2f, Stdev: %4.2f\n", stdev.getAverage(), sharedAudioData->measuredJitter);
|
||||
stdev.reset();
|
||||
}
|
||||
|
||||
AudioRingBuffer *ringBuffer = sharedAudioData->ringBuffer;
|
||||
|
||||
|
||||
if (!ringBuffer->isStarted()) {
|
||||
packetsReceivedThisPlayback++;
|
||||
}
|
||||
else {
|
||||
//printLog("Audio packet received at %6.0f\n", usecTimestampNow()/1000);
|
||||
}
|
||||
if (packetsReceivedThisPlayback == 1) gettimeofday(&firstPlaybackTimer, NULL);
|
||||
|
||||
ringBuffer->parseData((unsigned char *)receivedData, PACKET_LENGTH_BYTES);
|
||||
|
||||
previousReceiveTime = currentReceiveTime;
|
||||
}
|
||||
void outputPortAudioError(PaError error) {
|
||||
if (error != paNoError) {
|
||||
printLog("-- portaudio termination error --\n");
|
||||
printLog("PortAudio error (%d): %s\n", error, Pa_GetErrorText(error));
|
||||
}
|
||||
|
||||
pthread_exit(0);
|
||||
}
|
||||
|
||||
void Audio::setMixerLoopbackFlag(bool newMixerLoopbackFlag) {
|
||||
audioData->mixerLoopbackFlag = newMixerLoopbackFlag;
|
||||
}
|
||||
|
||||
bool Audio::getMixerLoopbackFlag() {
|
||||
return audioData->mixerLoopbackFlag;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize portaudio and start an audio stream.
|
||||
* Should be called at the beginning of program exection.
|
||||
* @seealso Audio::terminate
|
||||
* @return Returns true if successful or false if an error occurred.
|
||||
Use Audio::getError() to retrieve the error code.
|
||||
*/
|
||||
Audio::Audio(Oscilloscope* s, Avatar* linkedAvatar) {
|
||||
paError = Pa_Initialize();
|
||||
if (paError != paNoError) goto error;
|
||||
|
||||
scope = s;
|
||||
|
||||
audioData = new AudioData();
|
||||
|
||||
audioData->linkedAvatar = linkedAvatar;
|
||||
|
||||
// setup a UDPSocket
|
||||
audioData->audioSocket = new UDPSocket(AUDIO_UDP_LISTEN_PORT);
|
||||
audioData->ringBuffer = new AudioRingBuffer(RING_BUFFER_SAMPLES, PACKET_LENGTH_SAMPLES);
|
||||
|
||||
AudioRecThreadStruct threadArgs;
|
||||
threadArgs.sharedAudioData = audioData;
|
||||
|
||||
pthread_create(&audioReceiveThread, NULL, receiveAudioViaUDP, (void *) &threadArgs);
|
||||
|
||||
paError = Pa_OpenDefaultStream(&stream,
|
||||
2, // input channels
|
||||
2, // output channels
|
||||
(paInt16 | paNonInterleaved), // sample format
|
||||
SAMPLE_RATE, // sample rate (hz)
|
||||
BUFFER_LENGTH_SAMPLES, // frames per buffer
|
||||
audioCallback, // callback function
|
||||
(void *) audioData); // user data to be passed to callback
|
||||
if (paError != paNoError) goto error;
|
||||
|
||||
initialized = true;
|
||||
Audio::Audio(Oscilloscope* scope) :
|
||||
_stream(NULL),
|
||||
_ringBuffer(RING_BUFFER_SAMPLES, PACKET_LENGTH_SAMPLES),
|
||||
_scope(scope),
|
||||
_averagedLatency(0.0),
|
||||
_measuredJitter(0),
|
||||
_wasStarved(0),
|
||||
_lastInputLoudness(0),
|
||||
_mixerLoopbackFlag(false),
|
||||
_lastVelocity(0),
|
||||
_lastAcceleration(0),
|
||||
_totalPacketsReceived(0),
|
||||
_firstPlaybackTime(),
|
||||
_packetsReceivedThisPlayback(0)
|
||||
{
|
||||
outputPortAudioError(Pa_Initialize());
|
||||
outputPortAudioError(Pa_OpenDefaultStream(&_stream,
|
||||
2,
|
||||
2,
|
||||
(paInt16 | paNonInterleaved),
|
||||
SAMPLE_RATE,
|
||||
BUFFER_LENGTH_SAMPLES,
|
||||
audioCallback,
|
||||
(void*) this));
|
||||
|
||||
// start the stream now that sources are good to go
|
||||
Pa_StartStream(stream);
|
||||
if (paError != paNoError) goto error;
|
||||
outputPortAudioError(Pa_StartStream(_stream));
|
||||
|
||||
gettimeofday(&_lastReceiveTime, NULL);
|
||||
}
|
||||
|
||||
Audio::~Audio() {
|
||||
if (_stream) {
|
||||
outputPortAudioError(Pa_CloseStream(_stream));
|
||||
outputPortAudioError(Pa_Terminate());
|
||||
}
|
||||
}
|
||||
|
||||
// Take a pointer to the acquired microphone input samples and add procedural sounds
|
||||
void Audio::addProceduralSounds(int16_t* inputBuffer, int numSamples) {
|
||||
const float MAX_AUDIBLE_VELOCITY = 6.0;
|
||||
const float MIN_AUDIBLE_VELOCITY = 0.1;
|
||||
const int VOLUME_BASELINE = 400;
|
||||
const float SOUND_PITCH = 8.f;
|
||||
|
||||
|
||||
return;
|
||||
float speed = glm::length(_lastVelocity);
|
||||
float volume = VOLUME_BASELINE * (1.f - speed / MAX_AUDIBLE_VELOCITY);
|
||||
|
||||
error:
|
||||
printLog("-- Failed to initialize portaudio --\n");
|
||||
printLog("PortAudio error (%d): %s\n", paError, Pa_GetErrorText(paError));
|
||||
initialized = false;
|
||||
delete[] audioData;
|
||||
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
||||
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
inputBuffer[i] += (int16_t)((cosf((float) i / SOUND_PITCH * speed) * randFloat()) * volume * speed);
|
||||
}
|
||||
}
|
||||
}
|
||||
void Audio::analyzeEcho(int16_t* inputBuffer, int16_t* outputBuffer, int numSamples) {
|
||||
// Compare output and input streams, looking for evidence of correlation needing echo cancellation
|
||||
//
|
||||
// OFFSET_RANGE tells us how many samples to vary the analysis window when looking for correlation,
|
||||
// and should be equal to the largest physical distance between speaker and microphone, where
|
||||
// OFFSET_RANGE = 1 / (speedOfSound (meters / sec) / SamplingRate (samples / sec)) * distance
|
||||
//
|
||||
const int OFFSET_RANGE = 10;
|
||||
const int SIGNAL_FLOOR = 1000;
|
||||
float correlation[2 * OFFSET_RANGE + 1];
|
||||
int numChecked = 0;
|
||||
bool foundSignal = false;
|
||||
|
||||
memset(correlation, 0, sizeof(float) * (2 * OFFSET_RANGE + 1));
|
||||
|
||||
for (int offset = -OFFSET_RANGE; offset <= OFFSET_RANGE; offset++) {
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
if ((i + offset >= 0) && (i + offset < numSamples)) {
|
||||
correlation[offset + OFFSET_RANGE] +=
|
||||
(float) abs(inputBuffer[i] - outputBuffer[i + offset]);
|
||||
numChecked++;
|
||||
foundSignal |= (inputBuffer[i] > SIGNAL_FLOOR);
|
||||
}
|
||||
}
|
||||
correlation[offset + OFFSET_RANGE] /= numChecked;
|
||||
numChecked = 0;
|
||||
if (foundSignal) {
|
||||
printLog("%4.2f, ", correlation[offset + OFFSET_RANGE]);
|
||||
}
|
||||
}
|
||||
if (foundSignal) printLog("\n");
|
||||
}
|
||||
|
||||
|
||||
float Audio::getInputLoudness() const {
|
||||
return audioData->lastInputLoudness;
|
||||
void Audio::addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes) {
|
||||
const int NUM_INITIAL_PACKETS_DISCARD = 3;
|
||||
|
||||
timeval currentReceiveTime;
|
||||
gettimeofday(¤tReceiveTime, NULL);
|
||||
_totalPacketsReceived++;
|
||||
|
||||
double timeDiff = diffclock(&_lastReceiveTime, ¤tReceiveTime);
|
||||
|
||||
// Discard first few received packets for computing jitter (often they pile up on start)
|
||||
if (_totalPacketsReceived > NUM_INITIAL_PACKETS_DISCARD) {
|
||||
::stdev.addValue(timeDiff);
|
||||
}
|
||||
|
||||
if (::stdev.getSamples() > 500) {
|
||||
_measuredJitter = ::stdev.getStDev();
|
||||
//printLog("Avg: %4.2f, Stdev: %4.2f\n", stdev.getAverage(), sharedAudioData->measuredJitter);
|
||||
::stdev.reset();
|
||||
}
|
||||
|
||||
if (!_ringBuffer.isStarted()) {
|
||||
_packetsReceivedThisPlayback++;
|
||||
}
|
||||
|
||||
if (_packetsReceivedThisPlayback == 1) {
|
||||
gettimeofday(&_firstPlaybackTime, NULL);
|
||||
}
|
||||
|
||||
_ringBuffer.parseData((unsigned char *)receivedData, PACKET_LENGTH_BYTES);
|
||||
|
||||
_lastReceiveTime = currentReceiveTime;
|
||||
}
|
||||
|
||||
void Audio::render(int screenWidth, int screenHeight)
|
||||
{
|
||||
if (initialized) {
|
||||
void Audio::render(int screenWidth, int screenHeight) {
|
||||
if (_stream) {
|
||||
glLineWidth(2.0);
|
||||
glBegin(GL_LINES);
|
||||
glColor3f(1,1,1);
|
||||
|
@ -447,25 +430,23 @@ void Audio::render(int screenWidth, int screenHeight)
|
|||
}
|
||||
glEnd();
|
||||
|
||||
|
||||
// Show a bar with the amount of audio remaining in ring buffer beyond current playback
|
||||
float remainingBuffer = 0;
|
||||
timeval currentTime;
|
||||
gettimeofday(¤tTime, NULL);
|
||||
float timeLeftInCurrentBuffer = 0;
|
||||
if (audioData->lastCallback.tv_usec > 0) {
|
||||
timeLeftInCurrentBuffer = AUDIO_CALLBACK_MSECS - diffclock(&audioData->lastCallback, ¤tTime);
|
||||
if (_lastCallbackTime.tv_usec > 0) {
|
||||
timeLeftInCurrentBuffer = AUDIO_CALLBACK_MSECS - diffclock(&_lastCallbackTime, ¤tTime);
|
||||
}
|
||||
|
||||
// /(1000.0*(float)BUFFER_LENGTH_SAMPLES/(float)SAMPLE_RATE) * frameWidth
|
||||
|
||||
if (audioData->ringBuffer->getEndOfLastWrite() != NULL)
|
||||
remainingBuffer = audioData->ringBuffer->diffLastWriteNextOutput() / PACKET_LENGTH_SAMPLES * AUDIO_CALLBACK_MSECS;
|
||||
if (_ringBuffer.getEndOfLastWrite() != NULL)
|
||||
remainingBuffer = _ringBuffer.diffLastWriteNextOutput() / PACKET_LENGTH_SAMPLES * AUDIO_CALLBACK_MSECS;
|
||||
|
||||
if (audioData->wasStarved == 0) glColor3f(0, 1, 0);
|
||||
else {
|
||||
glColor3f(0.5 + (float)audioData->wasStarved/20.0, 0, 0);
|
||||
audioData->wasStarved--;
|
||||
if (_wasStarved == 0) {
|
||||
glColor3f(0, 1, 0);
|
||||
} else {
|
||||
glColor3f(0.5 + (_wasStarved / 20.0f), 0, 0);
|
||||
_wasStarved--;
|
||||
}
|
||||
|
||||
glBegin(GL_QUADS);
|
||||
|
@ -475,26 +456,29 @@ void Audio::render(int screenWidth, int screenHeight)
|
|||
glVertex2f(startX, bottomY - 2);
|
||||
glEnd();
|
||||
|
||||
if (audioData->averagedLatency == 0.0) audioData->averagedLatency = remainingBuffer + timeLeftInCurrentBuffer;
|
||||
else audioData->averagedLatency = 0.99*audioData->averagedLatency + 0.01*((float)remainingBuffer + (float)timeLeftInCurrentBuffer);
|
||||
if (_averagedLatency == 0.0) {
|
||||
_averagedLatency = remainingBuffer + timeLeftInCurrentBuffer;
|
||||
} else {
|
||||
_averagedLatency = 0.99f * _averagedLatency + 0.01f * (remainingBuffer + timeLeftInCurrentBuffer);
|
||||
}
|
||||
|
||||
// Show a yellow bar with the averaged msecs latency you are hearing (from time of packet receipt)
|
||||
glColor3f(1,1,0);
|
||||
glBegin(GL_QUADS);
|
||||
glVertex2f(startX + audioData->averagedLatency/AUDIO_CALLBACK_MSECS*frameWidth - 2, topY - 2);
|
||||
glVertex2f(startX + audioData->averagedLatency/AUDIO_CALLBACK_MSECS*frameWidth + 2, topY - 2);
|
||||
glVertex2f(startX + audioData->averagedLatency/AUDIO_CALLBACK_MSECS*frameWidth + 2, bottomY + 2);
|
||||
glVertex2f(startX + audioData->averagedLatency/AUDIO_CALLBACK_MSECS*frameWidth - 2, bottomY + 2);
|
||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 2, topY - 2);
|
||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth + 2, topY - 2);
|
||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth + 2, bottomY + 2);
|
||||
glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 2, bottomY + 2);
|
||||
glEnd();
|
||||
|
||||
char out[40];
|
||||
sprintf(out, "%3.0f\n", audioData->averagedLatency);
|
||||
drawtext(startX + audioData->averagedLatency/AUDIO_CALLBACK_MSECS*frameWidth - 10, topY-10, 0.10, 0, 1, 0, out, 1,1,0);
|
||||
sprintf(out, "%3.0f\n", _averagedLatency);
|
||||
drawtext(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 10, topY - 10, 0.10, 0, 1, 0, out, 1,1,0);
|
||||
//drawtext(startX + 0, topY-10, 0.08, 0, 1, 0, out, 1,1,0);
|
||||
|
||||
// Show a Cyan bar with the most recently measured jitter stdev
|
||||
|
||||
int jitterPels = (float) audioData->measuredJitter/ ((1000.0*(float)PACKET_LENGTH_SAMPLES/(float)SAMPLE_RATE)) * (float)frameWidth;
|
||||
int jitterPels = _measuredJitter / ((1000.0f * PACKET_LENGTH_SAMPLES / SAMPLE_RATE)) * frameWidth;
|
||||
|
||||
glColor3f(0,1,1);
|
||||
glBegin(GL_QUADS);
|
||||
|
@ -504,7 +488,7 @@ void Audio::render(int screenWidth, int screenHeight)
|
|||
glVertex2f(startX + jitterPels - 2, bottomY + 2);
|
||||
glEnd();
|
||||
|
||||
sprintf(out,"%3.1f\n", audioData->measuredJitter);
|
||||
sprintf(out,"%3.1f\n", _measuredJitter);
|
||||
drawtext(startX + jitterPels - 5, topY-10, 0.10, 0, 1, 0, out, 0,1,1);
|
||||
|
||||
sprintf(out, "%3.1fms\n", JITTER_BUFFER_LENGTH_MSECS);
|
||||
|
@ -512,34 +496,4 @@ void Audio::render(int screenWidth, int screenHeight)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the running audio stream, and deinitialize portaudio.
|
||||
* Should be called at the end of program execution.
|
||||
* @return Returns true if the initialization was successful, or false if an error occured.
|
||||
The error code may be retrieved by Audio::getError().
|
||||
*/
|
||||
bool Audio::terminate() {
|
||||
stopAudioReceiveThread = true;
|
||||
pthread_join(audioReceiveThread, NULL);
|
||||
|
||||
if (initialized) {
|
||||
initialized = false;
|
||||
|
||||
paError = Pa_CloseStream(stream);
|
||||
if (paError != paNoError) goto error;
|
||||
|
||||
paError = Pa_Terminate();
|
||||
if (paError != paNoError) goto error;
|
||||
}
|
||||
|
||||
delete audioData;
|
||||
|
||||
return true;
|
||||
|
||||
error:
|
||||
printLog("-- portaudio termination error --\n");
|
||||
printLog("PortAudio error (%d): %s\n", paError, Pa_GetErrorText(paError));
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -10,44 +10,48 @@
|
|||
#define __interface__Audio__
|
||||
|
||||
#include <portaudio.h>
|
||||
#include "AudioData.h"
|
||||
|
||||
#include <AudioRingBuffer.h>
|
||||
|
||||
#include "Oscilloscope.h"
|
||||
#include "Avatar.h"
|
||||
|
||||
class Audio {
|
||||
public:
|
||||
// initializes audio I/O
|
||||
Audio(Oscilloscope *s, Avatar *linkedAvatar);
|
||||
|
||||
void render();
|
||||
Audio(Oscilloscope* scope);
|
||||
~Audio();
|
||||
|
||||
void render(int screenWidth, int screenHeight);
|
||||
|
||||
bool getMixerLoopbackFlag();
|
||||
void setMixerLoopbackFlag(bool newMixerLoopbackFlag);
|
||||
void setMixerLoopbackFlag(bool mixerLoopbackFlag) { _mixerLoopbackFlag = mixerLoopbackFlag; }
|
||||
|
||||
float getInputLoudness() const;
|
||||
void updateMixerParams(in_addr_t mixerAddress, in_port_t mixerPort);
|
||||
float getLastInputLoudness() const { return _lastInputLoudness; };
|
||||
|
||||
void setLastAcceleration(glm::vec3 a) { audioData->setLastAcceleration(a); };
|
||||
void setLastVelocity(glm::vec3 v) { audioData->setLastVelocity(v); };
|
||||
void setLastAcceleration(glm::vec3 lastAcceleration) { _lastAcceleration = lastAcceleration; };
|
||||
void setLastVelocity(glm::vec3 lastVelocity) { _lastVelocity = lastVelocity; };
|
||||
|
||||
// terminates audio I/O
|
||||
bool terminate();
|
||||
void addProceduralSounds(int16_t* inputBuffer, int numSamples);
|
||||
void analyzeEcho(int16_t* inputBuffer, int16_t* outputBuffer, int numSamples);
|
||||
|
||||
|
||||
void addReceivedAudioToBuffer(unsigned char* receivedData, int receivedBytes);
|
||||
private:
|
||||
bool initialized;
|
||||
AudioData *audioData;
|
||||
|
||||
// protects constructor so that public init method is used
|
||||
Audio();
|
||||
|
||||
// hold potential error returned from PortAudio functions
|
||||
PaError paError;
|
||||
|
||||
// audio stream handle
|
||||
PaStream *stream;
|
||||
|
||||
// audio receive thread
|
||||
pthread_t audioReceiveThread;
|
||||
PaStream* _stream;
|
||||
AudioRingBuffer _ringBuffer;
|
||||
Oscilloscope* _scope;
|
||||
timeval _lastCallbackTime;
|
||||
timeval _lastReceiveTime;
|
||||
float _averagedLatency;
|
||||
float _measuredJitter;
|
||||
int _wasStarved;
|
||||
float _lastInputLoudness;
|
||||
bool _mixerLoopbackFlag;
|
||||
glm::vec3 _lastVelocity;
|
||||
glm::vec3 _lastAcceleration;
|
||||
int _totalPacketsReceived;
|
||||
timeval _firstPlaybackTime;
|
||||
int _packetsReceivedThisPlayback;
|
||||
|
||||
// give access to AudioData class from audioCallback
|
||||
friend int audioCallback (const void*, void*, unsigned long, const PaStreamCallbackTimeInfo*, PaStreamCallbackFlags, void*);
|
||||
|
|
|
@ -123,7 +123,7 @@ void Oscilloscope::render(int x, int y) {
|
|||
glDrawArrays(GL_LINES, MAX_SAMPLES * 0, usedWidth);
|
||||
glColor3f(0.0f, 1.0f ,1.0f);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES * 1, usedWidth);
|
||||
glColor3f(1.0f, 1.0f ,0.0f);
|
||||
glColor3f(0.0f, 1.0f ,1.0f);
|
||||
glDrawArrays(GL_LINES, MAX_SAMPLES * 2, usedWidth);
|
||||
glDisableClientState(GL_VERTEX_ARRAY);
|
||||
glPopMatrix();
|
||||
|
|
|
@ -243,14 +243,10 @@ bool AgentList::addOrUpdateAgent(sockaddr *publicSocket, sockaddr *localSocket,
|
|||
// set the agent active right away
|
||||
newAgent->activatePublicSocket();
|
||||
}
|
||||
|
||||
if (newAgent->getType() == AGENT_TYPE_AUDIO_MIXER && audioMixerSocketUpdate != NULL) {
|
||||
// this is an audio mixer
|
||||
// for now that means we need to tell the audio class
|
||||
// to use the local socket information the domain server gave us
|
||||
sockaddr_in *publicSocketIn = (sockaddr_in *)publicSocket;
|
||||
audioMixerSocketUpdate(publicSocketIn->sin_addr.s_addr, publicSocketIn->sin_port);
|
||||
} else if (newAgent->getType() == AGENT_TYPE_VOXEL || newAgent->getType() == AGENT_TYPE_AVATAR_MIXER) {
|
||||
|
||||
if (newAgent->getType() == AGENT_TYPE_VOXEL ||
|
||||
newAgent->getType() == AGENT_TYPE_AVATAR_MIXER ||
|
||||
newAgent->getType() == AGENT_TYPE_AUDIO_MIXER) {
|
||||
// this is currently the cheat we use to talk directly to our test servers on EC2
|
||||
// to be removed when we have a proper identification strategy
|
||||
newAgent->activatePublicSocket();
|
||||
|
|
|
@ -46,7 +46,6 @@ public:
|
|||
AgentListIterator end() const;
|
||||
|
||||
void(*linkedDataCreateCallback)(Agent *);
|
||||
void(*audioMixerSocketUpdate)(in_addr_t, in_port_t);
|
||||
|
||||
int size() { return _numAgents; }
|
||||
|
||||
|
|
|
@ -20,8 +20,9 @@
|
|||
// Agent Type Codes
|
||||
const char AGENT_TYPE_DOMAIN = 'D';
|
||||
const char AGENT_TYPE_VOXEL = 'V';
|
||||
const char AGENT_TYPE_AVATAR = 'I'; // could also be injector???
|
||||
const char AGENT_TYPE_AVATAR = 'I';
|
||||
const char AGENT_TYPE_AUDIO_MIXER = 'M';
|
||||
const char AGENT_TYPE_AVATAR_MIXER = 'W';
|
||||
const char AGENT_TYPE_AUDIO_INJECTOR = 'A';
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,12 +22,9 @@ const float BUFFER_SEND_INTERVAL_USECS = (BUFFER_LENGTH_SAMPLES / SAMPLE_RATE) *
|
|||
|
||||
AudioInjector::AudioInjector(const char* filename) :
|
||||
_numTotalBytesAudio(0),
|
||||
_position(),
|
||||
_bearing(0),
|
||||
_attenuationModifier(255)
|
||||
{
|
||||
_position[0] = 0.0f;
|
||||
_position[1] = 0.0f;
|
||||
_position[2] = 0.0f;
|
||||
_attenuationModifier(255) {
|
||||
|
||||
std::fstream sourceFile;
|
||||
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
//
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "PacketHeaders.h"
|
||||
|
||||
#include "AudioRingBuffer.h"
|
||||
|
||||
AudioRingBuffer::AudioRingBuffer(int ringSamples, int bufferSamples) :
|
||||
|
@ -46,18 +49,22 @@ AudioRingBuffer* AudioRingBuffer::clone() const {
|
|||
const int AGENT_LOOPBACK_MODIFIER = 307;
|
||||
|
||||
int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
||||
if (numBytes > (_bufferLengthSamples * sizeof(int16_t))) {
|
||||
|
||||
unsigned char* dataBuffer = sourceBuffer + 1;
|
||||
|
||||
if (sourceBuffer[0] == PACKET_HEADER_INJECT_AUDIO ||
|
||||
sourceBuffer[0] == PACKET_HEADER_MICROPHONE_AUDIO) {
|
||||
// if this came from an injector or interface client
|
||||
// there's data required for spatialization to pull out
|
||||
|
||||
unsigned char *dataPtr = sourceBuffer + 1;
|
||||
memcpy(&_position, dataBuffer, sizeof(_position));
|
||||
dataBuffer += (sizeof(_position));
|
||||
|
||||
memcpy(&_position, dataPtr, sizeof(_position));
|
||||
dataPtr += (sizeof(_position));
|
||||
|
||||
unsigned int attenuationByte = *(dataPtr++);
|
||||
unsigned int attenuationByte = *(dataBuffer++);
|
||||
_attenuationRatio = attenuationByte / 255.0f;
|
||||
|
||||
memcpy(&_bearing, dataPtr, sizeof(float));
|
||||
dataPtr += sizeof(_bearing);
|
||||
memcpy(&_bearing, dataBuffer, sizeof(float));
|
||||
dataBuffer += sizeof(_bearing);
|
||||
|
||||
if (_bearing > 180 || _bearing < -180) {
|
||||
// we were passed an invalid bearing because this agent wants loopback (pressed the H key)
|
||||
|
@ -70,8 +77,6 @@ int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
|||
} else {
|
||||
_shouldLoopbackForAgent = false;
|
||||
}
|
||||
|
||||
sourceBuffer = dataPtr;
|
||||
}
|
||||
|
||||
if (!_endOfLastWrite) {
|
||||
|
@ -82,7 +87,7 @@ int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
|||
_started = false;
|
||||
}
|
||||
|
||||
memcpy(_endOfLastWrite, sourceBuffer, _bufferLengthSamples * sizeof(int16_t));
|
||||
memcpy(_endOfLastWrite, dataBuffer, _bufferLengthSamples * sizeof(int16_t));
|
||||
|
||||
_endOfLastWrite += _bufferLengthSamples;
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@ const PACKET_HEADER PACKET_HEADER_PING_REPLY = 'R';
|
|||
const PACKET_HEADER PACKET_HEADER_HEAD_DATA = 'H';
|
||||
const PACKET_HEADER PACKET_HEADER_Z_COMMAND = 'Z';
|
||||
const PACKET_HEADER PACKET_HEADER_INJECT_AUDIO = 'I';
|
||||
const PACKET_HEADER PACKET_HEADER_MIXED_AUDIO = 'A';
|
||||
const PACKET_HEADER PACKET_HEADER_MICROPHONE_AUDIO = 'M';
|
||||
const PACKET_HEADER PACKET_HEADER_SET_VOXEL = 'S';
|
||||
const PACKET_HEADER PACKET_HEADER_SET_VOXEL_DESTRUCTIVE = 'O';
|
||||
const PACKET_HEADER PACKET_HEADER_ERASE_VOXEL = 'E';
|
||||
|
|
Loading…
Reference in a new issue