mirror of
https://github.com/overte-org/overte.git
synced 2025-04-18 00:26:33 +02:00
Merge branch 'master' of https://github.com/worklist/hifi
This commit is contained in:
commit
2885c14480
4 changed files with 7 additions and 12 deletions
|
@ -149,9 +149,7 @@ int main(int argc, const char* argv[]) {
|
|||
int numSamplesDelay = 0;
|
||||
float weakChannelAmplitudeRatio = 1.f;
|
||||
|
||||
if (otherAgent != agent) {
|
||||
printf("DEBUG: The bearing for this agent is %f\n", agentRingBuffer->getBearing());
|
||||
|
||||
if (otherAgent != agent) {
|
||||
Position agentPosition = agentRingBuffer->getPosition();
|
||||
Position otherAgentPosition = otherAgentBuffer->getPosition();
|
||||
|
||||
|
|
|
@ -37,9 +37,6 @@ const float AUDIO_INJECT_PROXIMITY = 0.4f;
|
|||
|
||||
bool stopReceiveAgentDataThread;
|
||||
|
||||
int TEMP_AUDIO_LISTEN_PORT = 55439;
|
||||
UDPSocket audioSocket(TEMP_AUDIO_LISTEN_PORT);
|
||||
|
||||
void *receiveAgentData(void *args) {
|
||||
sockaddr senderAddress;
|
||||
ssize_t bytesReceived;
|
||||
|
@ -152,7 +149,7 @@ int main(int argc, const char* argv[]) {
|
|||
|
||||
// use the UDPSocket instance attached to our agent list to send avatar data to mixer
|
||||
agentList->getAgentSocket()->send(avatarMixer->getActiveSocket(), broadcastPacket, packetPosition - broadcastPacket);
|
||||
}
|
||||
}
|
||||
|
||||
if (!eveAudioInjector.isInjectingAudio()) {
|
||||
// enumerate the other agents to decide if one is close enough that eve should talk
|
||||
|
|
|
@ -59,7 +59,7 @@ public:
|
|||
|
||||
void wheelEvent(QWheelEvent* event);
|
||||
|
||||
const Avatar& getAvatar() const { return _myAvatar; }
|
||||
Avatar* getAvatar() { return &_myAvatar; }
|
||||
|
||||
private slots:
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ int audioCallback (const void* inputBuffer,
|
|||
AgentList* agentList = AgentList::getInstance();
|
||||
|
||||
Application* interface = (Application*) QCoreApplication::instance();
|
||||
Avatar interfaceAvatar = interface->getAvatar();
|
||||
Avatar* interfaceAvatar = interface->getAvatar();
|
||||
|
||||
int16_t *inputLeft = ((int16_t **) inputBuffer)[0];
|
||||
|
||||
|
@ -119,14 +119,14 @@ int audioCallback (const void* inputBuffer,
|
|||
unsigned char *currentPacketPtr = dataPacket + 1;
|
||||
|
||||
// memcpy the three float positions
|
||||
memcpy(currentPacketPtr, &interfaceAvatar.getHeadPosition(), sizeof(float) * 3);
|
||||
memcpy(currentPacketPtr, &interfaceAvatar->getHeadPosition(), sizeof(float) * 3);
|
||||
currentPacketPtr += (sizeof(float) * 3);
|
||||
|
||||
// tell the mixer not to add additional attenuation to our source
|
||||
*(currentPacketPtr++) = 255;
|
||||
|
||||
// memcpy the corrected render yaw
|
||||
float correctedYaw = fmodf(-1 * interfaceAvatar.getAbsoluteHeadYaw(), 360);
|
||||
float correctedYaw = fmodf(-1 * interfaceAvatar->getAbsoluteHeadYaw(), 360);
|
||||
|
||||
if (correctedYaw > 180) {
|
||||
correctedYaw -= 360;
|
||||
|
@ -188,7 +188,7 @@ int audioCallback (const void* inputBuffer,
|
|||
// if we haven't fired off the flange effect, check if we should
|
||||
// TODO: lastMeasuredHeadYaw is now relative to body - check if this still works.
|
||||
|
||||
int lastYawMeasured = fabsf(interfaceAvatar.getLastMeasuredHeadYaw());
|
||||
int lastYawMeasured = fabsf(interfaceAvatar->getLastMeasuredHeadYaw());
|
||||
|
||||
if (!::samplesLeftForFlange && lastYawMeasured > MIN_FLANGE_EFFECT_THRESHOLD) {
|
||||
// we should flange for one second
|
||||
|
|
Loading…
Reference in a new issue