mirror of
https://github.com/overte-org/overte.git
synced 2025-04-22 19:13:38 +02:00
Merge branch 'master' of https://github.com/worklist/hifi
This commit is contained in:
commit
e9e6f97084
5 changed files with 34 additions and 38 deletions
|
@ -58,9 +58,8 @@ void attachAvatarDataToNode(Node* newNode) {
|
|||
// determine which avatars are included in the packet stream
|
||||
// 4) we should optimize the avatar data format to be more compact (100 bytes is pretty wasteful).
|
||||
void broadcastAvatarData() {
|
||||
static unsigned char broadcastPacketBuffer[MAX_PACKET_SIZE];
|
||||
static unsigned char broadcastPacket[MAX_PACKET_SIZE];
|
||||
static unsigned char avatarDataBuffer[MAX_PACKET_SIZE];
|
||||
unsigned char* broadcastPacket = (unsigned char*)&broadcastPacketBuffer[0];
|
||||
int numHeaderBytes = populateTypeAndVersion(broadcastPacket, PACKET_TYPE_BULK_AVATAR_DATA);
|
||||
unsigned char* currentBufferPosition = broadcastPacket + numHeaderBytes;
|
||||
int packetLength = currentBufferPosition - broadcastPacket;
|
||||
|
@ -79,7 +78,9 @@ void broadcastAvatarData() {
|
|||
// send back a packet with other active node data to this node
|
||||
for (NodeList::iterator otherNode = nodeList->begin(); otherNode != nodeList->end(); otherNode++) {
|
||||
if (otherNode->getLinkedData() && otherNode->getUUID() != node->getUUID()) {
|
||||
unsigned char* avatarDataEndpoint = addNodeToBroadcastPacket((unsigned char*)&avatarDataBuffer[0], &*node);
|
||||
|
||||
unsigned char* avatarDataEndpoint = addNodeToBroadcastPacket((unsigned char*)&avatarDataBuffer[0],
|
||||
&*otherNode);
|
||||
int avatarDataLength = avatarDataEndpoint - (unsigned char*)&avatarDataBuffer;
|
||||
|
||||
if (avatarDataLength + packetLength <= MAX_PACKET_SIZE) {
|
||||
|
@ -89,7 +90,8 @@ void broadcastAvatarData() {
|
|||
} else {
|
||||
packetsSent++;
|
||||
//printf("packetsSent=%d packetLength=%d\n", packetsSent, packetLength);
|
||||
nodeList->getNodeSocket().writeDatagram((char*) broadcastPacket, currentBufferPosition - broadcastPacket,
|
||||
nodeList->getNodeSocket().writeDatagram((char*) broadcastPacket,
|
||||
currentBufferPosition - broadcastPacket,
|
||||
node->getActiveSocket()->getAddress(),
|
||||
node->getActiveSocket()->getPort());
|
||||
|
||||
|
|
|
@ -10,15 +10,6 @@
|
|||
#include <stdlib.h>
|
||||
#include <cmath>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include "Syssocket.h"
|
||||
#include "Systime.h"
|
||||
#else
|
||||
#include <sys/time.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <ifaddrs.h>
|
||||
#endif
|
||||
|
||||
#include <glm/gtx/component_wise.hpp>
|
||||
#include <glm/gtx/quaternion.hpp>
|
||||
#include <glm/gtx/vector_angle.hpp>
|
||||
|
@ -2006,7 +1997,7 @@ void Application::updateAvatars(float deltaTime, glm::vec3 mouseRayOrigin, glm::
|
|||
|
||||
for(NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) {
|
||||
node->lock();
|
||||
if (node->getLinkedData() != NULL) {
|
||||
if (node->getLinkedData()) {
|
||||
Avatar *avatar = (Avatar *)node->getLinkedData();
|
||||
if (!avatar->isInitialized()) {
|
||||
avatar->init();
|
||||
|
@ -2426,10 +2417,8 @@ void Application::updateAudio(float deltaTime) {
|
|||
PerformanceWarning warn(showWarnings, "Application::updateAudio()");
|
||||
|
||||
// Update audio stats for procedural sounds
|
||||
#ifndef _WIN32
|
||||
_audio.setLastAcceleration(_myAvatar.getThrust());
|
||||
_audio.setLastVelocity(_myAvatar.getVelocity());
|
||||
#endif
|
||||
}
|
||||
|
||||
void Application::updateCursor(float deltaTime) {
|
||||
|
@ -2569,10 +2558,8 @@ void Application::updateAvatar(float deltaTime) {
|
|||
}
|
||||
|
||||
// Get audio loudness data from audio input device
|
||||
#ifndef _WIN32
|
||||
_myAvatar.getHead().setAudioLoudness(_audio.getLastInputLoudness());
|
||||
#endif
|
||||
|
||||
_myAvatar.getHead().setAudioLoudness(_audio.getLastInputLoudness());
|
||||
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
|
||||
// send head/hand data to the avatar mixer and voxel server
|
||||
|
@ -3203,14 +3190,12 @@ void Application::displayOverlay() {
|
|||
}
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::Stats)) {
|
||||
_audio.render(_glWidget->width(), _glWidget->height());
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::Oscilloscope)) {
|
||||
_audioScope.render(45, _glWidget->height() - 200);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
//noiseTest(_glWidget->width(), _glWidget->height());
|
||||
|
||||
|
|
|
@ -186,15 +186,23 @@ void Audio::handleAudioInput() {
|
|||
QByteArray inputByteArray = _inputDevice->read(CALLBACK_IO_BUFFER_SIZE);
|
||||
|
||||
if (_isBufferSendCallback) {
|
||||
// this is the second half of a full buffer of data
|
||||
|
||||
// zero out the monoAudioSamples array
|
||||
memset(monoAudioSamples, 0, BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
||||
// copy samples from the inputByteArray to the stereoInputBuffer
|
||||
memcpy((char*) (stereoInputBuffer + bufferSizeSamples), inputByteArray.data(), inputByteArray.size());
|
||||
|
||||
// Measure the loudness of the signal from the microphone and store in audio object
|
||||
float loudness = 0;
|
||||
for (int i = 0; i < BUFFER_LENGTH_SAMPLES_PER_CHANNEL * SAMPLE_RATE_RATIO; i += 2) {
|
||||
loudness += abs(stereoInputBuffer[i]);
|
||||
}
|
||||
|
||||
loudness /= BUFFER_LENGTH_SAMPLES_PER_CHANNEL * SAMPLE_RATE_RATIO;
|
||||
_lastInputLoudness = loudness;
|
||||
|
||||
} else {
|
||||
// this is the first half of a full buffer of data
|
||||
// zero out the monoAudioSamples array
|
||||
memset(monoAudioSamples, 0, BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
|
||||
// take samples we have in this callback and store them in the first half of the static buffer
|
||||
// to send off in the next callback
|
||||
memcpy((char*) stereoInputBuffer, inputByteArray.data(), inputByteArray.size());
|
||||
|
@ -507,7 +515,7 @@ void Audio::render(int screenWidth, int screenHeight) {
|
|||
}
|
||||
|
||||
// Take a pointer to the acquired microphone input samples and add procedural sounds
|
||||
void Audio::addProceduralSounds(int16_t* inputBuffer, int16_t* stereoOutput, int numSamples) {
|
||||
void Audio::addProceduralSounds(int16_t* monoInput, int16_t* stereoUpsampledOutput, int numSamples) {
|
||||
const float MAX_AUDIBLE_VELOCITY = 6.0;
|
||||
const float MIN_AUDIBLE_VELOCITY = 0.1;
|
||||
const int VOLUME_BASELINE = 400;
|
||||
|
@ -522,8 +530,8 @@ void Audio::addProceduralSounds(int16_t* inputBuffer, int16_t* stereoOutput, int
|
|||
// Add a noise-modulated sinewave with volume that tapers off with speed increasing
|
||||
if ((speed > MIN_AUDIBLE_VELOCITY) && (speed < MAX_AUDIBLE_VELOCITY)) {
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
inputBuffer[i] += (int16_t)(sinf((float) (_proceduralEffectSample + i) / SOUND_PITCH )
|
||||
* volume * (1.f + randFloat() * 0.25f) * speed);
|
||||
monoInput[i] += (int16_t)(sinf((float) (_proceduralEffectSample + i) / SOUND_PITCH )
|
||||
* volume * (1.f + randFloat() * 0.25f) * speed);
|
||||
}
|
||||
}
|
||||
const float COLLISION_SOUND_CUTOFF_LEVEL = 0.01f;
|
||||
|
@ -536,17 +544,17 @@ void Audio::addProceduralSounds(int16_t* inputBuffer, int16_t* stereoOutput, int
|
|||
for (int i = 0; i < numSamples; i++) {
|
||||
t = (float) _proceduralEffectSample + (float) i;
|
||||
|
||||
sample = sinf(t * _collisionSoundFrequency) +
|
||||
sinf(t * _collisionSoundFrequency / DOWN_TWO_OCTAVES) +
|
||||
sinf(t * _collisionSoundFrequency / DOWN_FOUR_OCTAVES * UP_MAJOR_FIFTH);
|
||||
sample = sinf(t * _collisionSoundFrequency)
|
||||
+ sinf(t * _collisionSoundFrequency / DOWN_TWO_OCTAVES)
|
||||
+ sinf(t * _collisionSoundFrequency / DOWN_FOUR_OCTAVES * UP_MAJOR_FIFTH);
|
||||
sample *= _collisionSoundMagnitude * COLLISION_SOUND_MAX_VOLUME;
|
||||
|
||||
int16_t collisionSample = (int16_t) sample;
|
||||
|
||||
inputBuffer[i] += collisionSample;
|
||||
monoInput[i] += collisionSample;
|
||||
|
||||
for (int j = (i * 4); j < (i * 4) + 4; j++) {
|
||||
stereoOutput[j] += collisionSample;
|
||||
stereoUpsampledOutput[j] += collisionSample;
|
||||
}
|
||||
|
||||
_collisionSoundMagnitude *= _collisionSoundDuration;
|
||||
|
@ -569,10 +577,10 @@ void Audio::addProceduralSounds(int16_t* inputBuffer, int16_t* stereoOutput, int
|
|||
|
||||
int16_t collisionSample = (int16_t) sample;
|
||||
|
||||
inputBuffer[i] += collisionSample;
|
||||
monoInput[i] += collisionSample;
|
||||
|
||||
for (int j = (i * 4); j < (i * 4) + 4; j++) {
|
||||
stereoOutput[j] += collisionSample;
|
||||
stereoUpsampledOutput[j] += collisionSample;
|
||||
}
|
||||
|
||||
_drumSoundVolume *= (1.f - _drumSoundDecay);
|
||||
|
|
|
@ -113,7 +113,7 @@ private:
|
|||
inline void performIO(int16_t* inputLeft, int16_t* outputLeft, int16_t* outputRight);
|
||||
|
||||
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
||||
void addProceduralSounds(int16_t* inputBuffer, int16_t* stereoOutput, int numSamples);
|
||||
void addProceduralSounds(int16_t* monoInput, int16_t* stereoUpsampledOutput, int numSamples);
|
||||
|
||||
void renderToolIcon(int screenHeight);
|
||||
};
|
||||
|
|
|
@ -175,6 +175,7 @@ void NodeList::processBulkNodeData(const HifiSockAddr& senderAddress, unsigned c
|
|||
Node* bulkSendNode = nodeWithAddress(senderAddress);
|
||||
|
||||
if (bulkSendNode) {
|
||||
|
||||
bulkSendNode->setLastHeardMicrostamp(usecTimestampNow());
|
||||
bulkSendNode->recordBytesReceived(numTotalBytes);
|
||||
|
||||
|
|
Loading…
Reference in a new issue