mirror of
https://github.com/overte-org/overte.git
synced 2025-08-08 20:56:52 +02:00
Merge pull request #1414 from birarda/injected-audio
C++ API for Audio Injection of samples from URL
This commit is contained in:
commit
a6c2334f49
15 changed files with 289 additions and 18 deletions
|
@ -22,6 +22,8 @@
|
||||||
const char ASSIGNMENT_CLIENT_TARGET_NAME[] = "assignment-client";
|
const char ASSIGNMENT_CLIENT_TARGET_NAME[] = "assignment-client";
|
||||||
const long long ASSIGNMENT_REQUEST_INTERVAL_MSECS = 1 * 1000;
|
const long long ASSIGNMENT_REQUEST_INTERVAL_MSECS = 1 * 1000;
|
||||||
|
|
||||||
|
int hifiSockAddrMeta = qRegisterMetaType<HifiSockAddr>("HifiSockAddr");
|
||||||
|
|
||||||
AssignmentClient::AssignmentClient(int &argc, char **argv,
|
AssignmentClient::AssignmentClient(int &argc, char **argv,
|
||||||
Assignment::Type requestAssignmentType,
|
Assignment::Type requestAssignmentType,
|
||||||
const HifiSockAddr& customAssignmentServerSocket,
|
const HifiSockAddr& customAssignmentServerSocket,
|
||||||
|
@ -31,7 +33,6 @@ AssignmentClient::AssignmentClient(int &argc, char **argv,
|
||||||
_currentAssignment(NULL)
|
_currentAssignment(NULL)
|
||||||
{
|
{
|
||||||
// register meta type is required for queued invoke method on Assignment subclasses
|
// register meta type is required for queued invoke method on Assignment subclasses
|
||||||
qRegisterMetaType<HifiSockAddr>("HifiSockAddr");
|
|
||||||
|
|
||||||
// set the logging target to the the CHILD_TARGET_NAME
|
// set the logging target to the the CHILD_TARGET_NAME
|
||||||
Logging::setTargetName(ASSIGNMENT_CLIENT_TARGET_NAME);
|
Logging::setTargetName(ASSIGNMENT_CLIENT_TARGET_NAME);
|
||||||
|
|
|
@ -53,8 +53,6 @@
|
||||||
const short JITTER_BUFFER_MSECS = 12;
|
const short JITTER_BUFFER_MSECS = 12;
|
||||||
const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_MSECS * (SAMPLE_RATE / 1000.0);
|
const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_MSECS * (SAMPLE_RATE / 1000.0);
|
||||||
|
|
||||||
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE) * 1000 * 1000);
|
|
||||||
|
|
||||||
const char AUDIO_MIXER_LOGGING_TARGET_NAME[] = "audio-mixer";
|
const char AUDIO_MIXER_LOGGING_TARGET_NAME[] = "audio-mixer";
|
||||||
|
|
||||||
void attachNewBufferToNode(Node *newNode) {
|
void attachNewBufferToNode(Node *newNode) {
|
||||||
|
@ -202,8 +200,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
|
PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i];
|
||||||
|
|
||||||
if ((*otherNode != *node
|
if ((*otherNode != *node
|
||||||
|| otherNodeBuffer->getType() != PositionalAudioRingBuffer::Microphone
|
|| otherNodeBuffer->shouldLoopbackForNode())
|
||||||
|| nodeRingBuffer->shouldLoopbackForNode())
|
|
||||||
&& otherNodeBuffer->willBeAddedToMix()) {
|
&& otherNodeBuffer->willBeAddedToMix()) {
|
||||||
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,7 @@
|
||||||
#include "AvatarAudioRingBuffer.h"
|
#include "AvatarAudioRingBuffer.h"
|
||||||
|
|
||||||
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
|
AvatarAudioRingBuffer::AvatarAudioRingBuffer() :
|
||||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone),
|
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Microphone) {
|
||||||
_shouldLoopbackForNode(false) {
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,14 +18,10 @@ public:
|
||||||
AvatarAudioRingBuffer();
|
AvatarAudioRingBuffer();
|
||||||
|
|
||||||
int parseData(unsigned char* sourceBuffer, int numBytes);
|
int parseData(unsigned char* sourceBuffer, int numBytes);
|
||||||
|
|
||||||
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
|
||||||
private:
|
private:
|
||||||
// disallow copying of AvatarAudioRingBuffer objects
|
// disallow copying of AvatarAudioRingBuffer objects
|
||||||
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
AvatarAudioRingBuffer(const AvatarAudioRingBuffer&);
|
||||||
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&);
|
AvatarAudioRingBuffer& operator= (const AvatarAudioRingBuffer&);
|
||||||
|
|
||||||
bool _shouldLoopbackForNode;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* defined(__hifi__AvatarAudioRingBuffer__) */
|
#endif /* defined(__hifi__AvatarAudioRingBuffer__) */
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#include <QFileDialog>
|
#include <QFileDialog>
|
||||||
#include <QDesktopServices>
|
#include <QDesktopServices>
|
||||||
|
|
||||||
|
#include <AudioInjector.h>
|
||||||
#include <NodeTypes.h>
|
#include <NodeTypes.h>
|
||||||
#include <Logging.h>
|
#include <Logging.h>
|
||||||
#include <OctalCode.h>
|
#include <OctalCode.h>
|
||||||
|
|
|
@ -42,7 +42,7 @@ static const int ICON_LEFT = 20;
|
||||||
static const int BOTTOM_PADDING = 110;
|
static const int BOTTOM_PADDING = 110;
|
||||||
|
|
||||||
Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples, QObject* parent) :
|
Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples, QObject* parent) :
|
||||||
QObject(parent),
|
AbstractAudioInterface(parent),
|
||||||
_audioInput(NULL),
|
_audioInput(NULL),
|
||||||
_desiredInputFormat(),
|
_desiredInputFormat(),
|
||||||
_inputFormat(),
|
_inputFormat(),
|
||||||
|
@ -458,9 +458,37 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
int16_t ringBufferSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
int16_t ringBufferSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
||||||
_ringBuffer.readSamples(ringBufferSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
_ringBuffer.readSamples(ringBufferSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
|
||||||
|
|
||||||
|
// add the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL from each QByteArray
|
||||||
|
// in our _localInjectionByteArrays QVector to the _localInjectedSamples
|
||||||
|
|
||||||
// add to the output samples whatever is in the _localAudioOutput byte array
|
// add to the output samples whatever is in the _localAudioOutput byte array
|
||||||
// that lets this user hear sound effects and loopback (if enabled)
|
// that lets this user hear sound effects and loopback (if enabled)
|
||||||
|
|
||||||
|
for (int b = 0; b < _localInjectionByteArrays.size(); b++) {
|
||||||
|
QByteArray audioByteArray = _localInjectionByteArrays.at(b);
|
||||||
|
|
||||||
|
int16_t* byteArraySamples = (int16_t*) audioByteArray.data();
|
||||||
|
|
||||||
|
int samplesToRead = MIN(audioByteArray.size() / sizeof(int16_t),
|
||||||
|
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
|
for (int i = 0; i < samplesToRead; i++) {
|
||||||
|
_localInjectedSamples[i] = glm::clamp(_localInjectedSamples[i] + byteArraySamples[i],
|
||||||
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (samplesToRead < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||||
|
// there isn't anything left to inject from this byte array, remove it from the vector
|
||||||
|
_localInjectionByteArrays.remove(b);
|
||||||
|
} else {
|
||||||
|
// pull out the bytes we just read for outputs
|
||||||
|
audioByteArray.remove(0, samplesToRead * sizeof(int16_t));
|
||||||
|
|
||||||
|
// still data left to read - replace the byte array in the QVector with the smaller one
|
||||||
|
_localInjectionByteArrays.replace(b, audioByteArray);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
|
||||||
ringBufferSamples[i * 2] = glm::clamp(ringBufferSamples[i * 2] + _localInjectedSamples[i],
|
ringBufferSamples[i * 2] = glm::clamp(ringBufferSamples[i * 2] + _localInjectedSamples[i],
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
|
@ -696,6 +724,11 @@ void Audio::startDrumSound(float volume, float frequency, float duration, float
|
||||||
_drumSoundSample = 0;
|
_drumSoundSample = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Audio::handleAudioByteArray(const QByteArray& audioByteArray) {
|
||||||
|
// add this byte array to our QVector
|
||||||
|
_localInjectionByteArrays.append(audioByteArray);
|
||||||
|
}
|
||||||
|
|
||||||
void Audio::renderToolIcon(int screenHeight) {
|
void Audio::renderToolIcon(int screenHeight) {
|
||||||
|
|
||||||
_iconBounds = QRect(ICON_LEFT, screenHeight - BOTTOM_PADDING, ICON_SIZE, ICON_SIZE);
|
_iconBounds = QRect(ICON_LEFT, screenHeight - BOTTOM_PADDING, ICON_SIZE, ICON_SIZE);
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include "InterfaceConfig.h"
|
#include "InterfaceConfig.h"
|
||||||
|
|
||||||
#include <QtCore/QObject>
|
#include <QtCore/QObject>
|
||||||
|
#include <QtCore/QVector>
|
||||||
#include <QtMultimedia/QAudioFormat>
|
#include <QtMultimedia/QAudioFormat>
|
||||||
|
|
||||||
#include <AbstractAudioInterface.h>
|
#include <AbstractAudioInterface.h>
|
||||||
|
@ -31,7 +32,7 @@ class QAudioInput;
|
||||||
class QAudioOutput;
|
class QAudioOutput;
|
||||||
class QIODevice;
|
class QIODevice;
|
||||||
|
|
||||||
class Audio : public QObject, public AbstractAudioInterface {
|
class Audio : public AbstractAudioInterface {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
// setup for audio I/O
|
// setup for audio I/O
|
||||||
|
@ -51,7 +52,7 @@ public:
|
||||||
|
|
||||||
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen);
|
||||||
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
virtual void startDrumSound(float volume, float frequency, float duration, float decay);
|
||||||
|
|
||||||
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; }
|
float getCollisionSoundMagnitude() { return _collisionSoundMagnitude; }
|
||||||
|
|
||||||
bool getCollisionFlashesScreen() { return _collisionFlashesScreen; }
|
bool getCollisionFlashesScreen() { return _collisionFlashesScreen; }
|
||||||
|
@ -65,14 +66,17 @@ public slots:
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
void reset();
|
void reset();
|
||||||
|
|
||||||
|
virtual void handleAudioByteArray(const QByteArray& audioByteArray);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
QByteArray firstInputFrame;
|
QByteArray firstInputFrame;
|
||||||
QAudioInput* _audioInput;
|
QAudioInput* _audioInput;
|
||||||
QAudioFormat _desiredInputFormat;
|
QAudioFormat _desiredInputFormat;
|
||||||
QAudioFormat _inputFormat;
|
QAudioFormat _inputFormat;
|
||||||
QIODevice* _inputDevice;
|
QIODevice* _inputDevice;
|
||||||
int16_t _localInjectedSamples[NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL];
|
|
||||||
int _numInputCallbackBytes;
|
int _numInputCallbackBytes;
|
||||||
|
int16_t _localInjectedSamples[NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL];
|
||||||
|
QVector<QByteArray> _localInjectionByteArrays;
|
||||||
QAudioOutput* _audioOutput;
|
QAudioOutput* _audioOutput;
|
||||||
QAudioFormat _desiredOutputFormat;
|
QAudioFormat _desiredOutputFormat;
|
||||||
QAudioFormat _outputFormat;
|
QAudioFormat _outputFormat;
|
||||||
|
|
|
@ -10,10 +10,19 @@
|
||||||
#ifndef __hifi__AbstractAudioInterface__
|
#ifndef __hifi__AbstractAudioInterface__
|
||||||
#define __hifi__AbstractAudioInterface__
|
#define __hifi__AbstractAudioInterface__
|
||||||
|
|
||||||
class AbstractAudioInterface {
|
#include <QtCore/QObject>
|
||||||
|
|
||||||
|
class AbstractAudioInterface : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
|
AbstractAudioInterface(QObject* parent = 0) : QObject(parent) {};
|
||||||
|
|
||||||
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen) = 0;
|
virtual void startCollisionSound(float magnitude, float frequency, float noise, float duration, bool flashScreen) = 0;
|
||||||
virtual void startDrumSound(float volume, float frequency, float duration, float decay) = 0;
|
virtual void startDrumSound(float volume, float frequency, float duration, float decay) = 0;
|
||||||
|
public slots:
|
||||||
|
virtual void handleAudioByteArray(const QByteArray& audioByteArray) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Q_DECLARE_METATYPE(AbstractAudioInterface*)
|
||||||
|
|
||||||
#endif /* defined(__hifi__AbstractAudioInterface__) */
|
#endif /* defined(__hifi__AbstractAudioInterface__) */
|
162
libraries/audio/src/AudioInjector.cpp
Normal file
162
libraries/audio/src/AudioInjector.cpp
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
//
|
||||||
|
// AudioInjector.cpp
|
||||||
|
// hifi
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 12/19/2013.
|
||||||
|
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <sys/time.h>
|
||||||
|
|
||||||
|
#include <QtNetwork/QNetworkAccessManager>
|
||||||
|
#include <QtNetwork/QNetworkReply>
|
||||||
|
#include <QtNetwork/QNetworkRequest>
|
||||||
|
|
||||||
|
#include <NodeList.h>
|
||||||
|
#include <PacketHeaders.h>
|
||||||
|
#include <SharedUtil.h>
|
||||||
|
#include <UUID.h>
|
||||||
|
|
||||||
|
#include "AbstractAudioInterface.h"
|
||||||
|
#include "AudioRingBuffer.h"
|
||||||
|
|
||||||
|
#include "AudioInjector.h"
|
||||||
|
|
||||||
|
int abstractAudioPointerMeta = qRegisterMetaType<AbstractAudioInterface*>("AbstractAudioInterface*");
|
||||||
|
|
||||||
|
AudioInjector::AudioInjector(const QUrl& sampleURL) :
|
||||||
|
_currentSendPosition(0),
|
||||||
|
_sourceURL(sampleURL),
|
||||||
|
_position(0,0,0),
|
||||||
|
_orientation(),
|
||||||
|
_volume(1.0f),
|
||||||
|
_shouldLoopback(true)
|
||||||
|
{
|
||||||
|
// we want to live on our own thread
|
||||||
|
moveToThread(&_thread);
|
||||||
|
connect(&_thread, SIGNAL(started()), this, SLOT(startDownload()));
|
||||||
|
_thread.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioInjector::startDownload() {
|
||||||
|
// assume we have a QApplication or QCoreApplication instance and use the
|
||||||
|
// QNetworkAccess manager to grab the raw audio file at the given URL
|
||||||
|
|
||||||
|
QNetworkAccessManager *manager = new QNetworkAccessManager(this);
|
||||||
|
connect(manager, SIGNAL(finished(QNetworkReply*)),
|
||||||
|
this, SLOT(replyFinished(QNetworkReply*)));
|
||||||
|
|
||||||
|
manager->get(QNetworkRequest(_sourceURL));
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioInjector::replyFinished(QNetworkReply* reply) {
|
||||||
|
// replace our samples array with the downloaded data
|
||||||
|
_sampleByteArray = reply->readAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioInjector::injectViaThread(AbstractAudioInterface* localAudioInterface) {
|
||||||
|
// use Qt::AutoConnection so that this is called on our thread, if appropriate
|
||||||
|
QMetaObject::invokeMethod(this, "injectAudio", Qt::AutoConnection, Q_ARG(AbstractAudioInterface*, localAudioInterface));
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioInjector::injectAudio(AbstractAudioInterface* localAudioInterface) {
|
||||||
|
|
||||||
|
// make sure we actually have samples downloaded to inject
|
||||||
|
if (_sampleByteArray.size()) {
|
||||||
|
// give our sample byte array to the local audio interface, if we have it, so it can be handled locally
|
||||||
|
if (localAudioInterface) {
|
||||||
|
// assume that localAudioInterface could be on a separate thread, use Qt::AutoConnection to handle properly
|
||||||
|
QMetaObject::invokeMethod(localAudioInterface, "handleAudioByteArray",
|
||||||
|
Qt::AutoConnection,
|
||||||
|
Q_ARG(QByteArray, _sampleByteArray));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
|
||||||
|
// reset the current send position to the beginning
|
||||||
|
_currentSendPosition = 0;
|
||||||
|
|
||||||
|
// setup the packet for injected audio
|
||||||
|
unsigned char injectedAudioPacket[MAX_PACKET_SIZE];
|
||||||
|
unsigned char* currentPacketPosition = injectedAudioPacket;
|
||||||
|
|
||||||
|
int numBytesPacketHeader = populateTypeAndVersion(injectedAudioPacket, PACKET_TYPE_INJECT_AUDIO);
|
||||||
|
currentPacketPosition += numBytesPacketHeader;
|
||||||
|
|
||||||
|
// pack the session UUID for this Node
|
||||||
|
QByteArray rfcSessionUUID = NodeList::getInstance()->getOwnerUUID().toRfc4122();
|
||||||
|
memcpy(currentPacketPosition, rfcSessionUUID.constData(), rfcSessionUUID.size());
|
||||||
|
currentPacketPosition += rfcSessionUUID.size();
|
||||||
|
|
||||||
|
// pick a random UUID to use for this stream
|
||||||
|
QUuid randomStreamUUID;
|
||||||
|
QByteArray rfcStreamUUID = randomStreamUUID.toRfc4122();
|
||||||
|
memcpy(currentPacketPosition, rfcStreamUUID, rfcStreamUUID.size());
|
||||||
|
currentPacketPosition += rfcStreamUUID.size();
|
||||||
|
|
||||||
|
// pack the flag for loopback
|
||||||
|
memcpy(currentPacketPosition, &_shouldLoopback, sizeof(_shouldLoopback));
|
||||||
|
currentPacketPosition += sizeof(_shouldLoopback);
|
||||||
|
|
||||||
|
// pack the position for injected audio
|
||||||
|
memcpy(currentPacketPosition, &_position, sizeof(_position));
|
||||||
|
currentPacketPosition += sizeof(_position);
|
||||||
|
|
||||||
|
// pack our orientation for injected audio
|
||||||
|
memcpy(currentPacketPosition, &_orientation, sizeof(_orientation));
|
||||||
|
currentPacketPosition += sizeof(_orientation);
|
||||||
|
|
||||||
|
// pack zero for radius
|
||||||
|
float radius = 0;
|
||||||
|
memcpy(currentPacketPosition, &radius, sizeof(radius));
|
||||||
|
currentPacketPosition += sizeof(radius);
|
||||||
|
|
||||||
|
// pack 255 for attenuation byte
|
||||||
|
uchar volume = MAX_INJECTOR_VOLUME * _volume;
|
||||||
|
memcpy(currentPacketPosition, &volume, sizeof(volume));
|
||||||
|
currentPacketPosition += sizeof(volume);
|
||||||
|
|
||||||
|
timeval startTime = {};
|
||||||
|
gettimeofday(&startTime, NULL);
|
||||||
|
int nextFrame = 0;
|
||||||
|
|
||||||
|
// loop to send off our audio in NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL byte chunks
|
||||||
|
while (_currentSendPosition < _sampleByteArray.size()) {
|
||||||
|
|
||||||
|
int bytesToCopy = std::min(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL,
|
||||||
|
_sampleByteArray.size() - _currentSendPosition);
|
||||||
|
|
||||||
|
// copy the next NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL bytes to the packet
|
||||||
|
memcpy(currentPacketPosition, _sampleByteArray.data() + _currentSendPosition,
|
||||||
|
bytesToCopy);
|
||||||
|
|
||||||
|
|
||||||
|
// grab our audio mixer from the NodeList, if it exists
|
||||||
|
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
|
||||||
|
|
||||||
|
if (audioMixer && nodeList->getNodeActiveSocketOrPing(audioMixer)) {
|
||||||
|
// send off this audio packet
|
||||||
|
nodeList->getNodeSocket().writeDatagram((char*) injectedAudioPacket,
|
||||||
|
(currentPacketPosition - injectedAudioPacket) + bytesToCopy,
|
||||||
|
audioMixer->getActiveSocket()->getAddress(),
|
||||||
|
audioMixer->getActiveSocket()->getPort());
|
||||||
|
}
|
||||||
|
|
||||||
|
_currentSendPosition += bytesToCopy;
|
||||||
|
|
||||||
|
// send two packets before the first sleep so the mixer can start playback right away
|
||||||
|
|
||||||
|
if (_currentSendPosition != bytesToCopy && _currentSendPosition < _sampleByteArray.size()) {
|
||||||
|
// not the first packet and not done
|
||||||
|
// sleep for the appropriate time
|
||||||
|
int usecToSleep = usecTimestamp(&startTime) + (++nextFrame * BUFFER_SEND_INTERVAL_USECS) - usecTimestampNow();
|
||||||
|
|
||||||
|
if (usecToSleep > 0) {
|
||||||
|
usleep(usecToSleep);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
54
libraries/audio/src/AudioInjector.h
Normal file
54
libraries/audio/src/AudioInjector.h
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
//
|
||||||
|
// AudioInjector.h
|
||||||
|
// hifi
|
||||||
|
//
|
||||||
|
// Created by Stephen Birarda on 12/19/2013.
|
||||||
|
// Copyright (c) 2013 HighFidelity, Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef __hifi__AudioInjector__
|
||||||
|
#define __hifi__AudioInjector__
|
||||||
|
|
||||||
|
#include <QtCore/QObject>
|
||||||
|
#include <QtCore/QThread>
|
||||||
|
#include <QtCore/QUrl>
|
||||||
|
|
||||||
|
#include <glm/glm.hpp>
|
||||||
|
#include <glm/gtx/quaternion.hpp>
|
||||||
|
|
||||||
|
class AbstractAudioInterface;
|
||||||
|
class QNetworkReply;
|
||||||
|
|
||||||
|
const uchar MAX_INJECTOR_VOLUME = 0xFF;
|
||||||
|
|
||||||
|
class AudioInjector : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
AudioInjector(const QUrl& sampleURL);
|
||||||
|
|
||||||
|
int size() const { return _sampleByteArray.size(); }
|
||||||
|
|
||||||
|
void setPosition(const glm::vec3& position) { _position = position; }
|
||||||
|
void setOrientation(const glm::quat& orientation) { _orientation = orientation; }
|
||||||
|
void setVolume(float volume) { _volume = std::max(fabsf(volume), 1.0f); }
|
||||||
|
void setShouldLoopback(bool shouldLoopback) { _shouldLoopback = shouldLoopback; }
|
||||||
|
public slots:
|
||||||
|
void injectViaThread(AbstractAudioInterface* localAudioInterface = NULL);
|
||||||
|
|
||||||
|
private:
|
||||||
|
QByteArray _sampleByteArray;
|
||||||
|
int _currentSendPosition;
|
||||||
|
QThread _thread;
|
||||||
|
QUrl _sourceURL;
|
||||||
|
glm::vec3 _position;
|
||||||
|
glm::quat _orientation;
|
||||||
|
float _volume;
|
||||||
|
uchar _shouldLoopback;
|
||||||
|
|
||||||
|
private slots:
|
||||||
|
void startDownload();
|
||||||
|
void replyFinished(QNetworkReply* reply);
|
||||||
|
void injectAudio(AbstractAudioInterface* localAudioInterface);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* defined(__hifi__AudioInjector__) */
|
|
@ -25,6 +25,9 @@ const int NETWORK_BUFFER_LENGTH_SAMPLES_STEREO = NETWORK_BUFFER_LENGTH_BYTES_STE
|
||||||
const int NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
|
const int NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
|
||||||
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
|
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
|
||||||
|
|
||||||
|
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||||
|
/ (float) SAMPLE_RATE) * 1000 * 1000);
|
||||||
|
|
||||||
const short RING_BUFFER_LENGTH_FRAMES = 10;
|
const short RING_BUFFER_LENGTH_FRAMES = 10;
|
||||||
|
|
||||||
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
|
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
|
||||||
|
|
|
@ -32,6 +32,12 @@ int InjectedAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes
|
||||||
// push past the UUID for this node and the stream identifier
|
// push past the UUID for this node and the stream identifier
|
||||||
currentBuffer += (NUM_BYTES_RFC4122_UUID * 2);
|
currentBuffer += (NUM_BYTES_RFC4122_UUID * 2);
|
||||||
|
|
||||||
|
// pull the loopback flag and set our boolean
|
||||||
|
uchar shouldLoopback;
|
||||||
|
memcpy(&shouldLoopback, currentBuffer, sizeof(shouldLoopback));
|
||||||
|
currentBuffer += sizeof(shouldLoopback);
|
||||||
|
_shouldLoopbackForNode = (shouldLoopback == 1);
|
||||||
|
|
||||||
// use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data
|
// use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data
|
||||||
currentBuffer += parsePositionalData(currentBuffer, numBytes - (currentBuffer - sourceBuffer));
|
currentBuffer += parsePositionalData(currentBuffer, numBytes - (currentBuffer - sourceBuffer));
|
||||||
|
|
||||||
|
@ -42,6 +48,8 @@ int InjectedAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes
|
||||||
unsigned int attenuationByte = *(currentBuffer++);
|
unsigned int attenuationByte = *(currentBuffer++);
|
||||||
_attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME;
|
_attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME;
|
||||||
|
|
||||||
|
qDebug() << "Copying" << numBytes - (currentBuffer - sourceBuffer) << "for injected ring buffer\n";
|
||||||
|
|
||||||
currentBuffer += writeData((char*) currentBuffer, numBytes - (currentBuffer - sourceBuffer));
|
currentBuffer += writeData((char*) currentBuffer, numBytes - (currentBuffer - sourceBuffer));
|
||||||
|
|
||||||
return currentBuffer - sourceBuffer;
|
return currentBuffer - sourceBuffer;
|
||||||
|
|
|
@ -19,7 +19,8 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
|
||||||
_type(type),
|
_type(type),
|
||||||
_position(0.0f, 0.0f, 0.0f),
|
_position(0.0f, 0.0f, 0.0f),
|
||||||
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
||||||
_willBeAddedToMix(false)
|
_willBeAddedToMix(false),
|
||||||
|
_shouldLoopbackForNode(false)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,8 @@ public:
|
||||||
bool willBeAddedToMix() const { return _willBeAddedToMix; }
|
bool willBeAddedToMix() const { return _willBeAddedToMix; }
|
||||||
void setWillBeAddedToMix(bool willBeAddedToMix) { _willBeAddedToMix = willBeAddedToMix; }
|
void setWillBeAddedToMix(bool willBeAddedToMix) { _willBeAddedToMix = willBeAddedToMix; }
|
||||||
|
|
||||||
|
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
||||||
|
|
||||||
PositionalAudioRingBuffer::Type getType() const { return _type; }
|
PositionalAudioRingBuffer::Type getType() const { return _type; }
|
||||||
const glm::vec3& getPosition() const { return _position; }
|
const glm::vec3& getPosition() const { return _position; }
|
||||||
const glm::quat& getOrientation() const { return _orientation; }
|
const glm::quat& getOrientation() const { return _orientation; }
|
||||||
|
@ -46,6 +48,7 @@ protected:
|
||||||
glm::vec3 _position;
|
glm::vec3 _position;
|
||||||
glm::quat _orientation;
|
glm::quat _orientation;
|
||||||
bool _willBeAddedToMix;
|
bool _willBeAddedToMix;
|
||||||
|
bool _shouldLoopbackForNode;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* defined(__hifi__PositionalAudioRingBuffer__) */
|
#endif /* defined(__hifi__PositionalAudioRingBuffer__) */
|
||||||
|
|
|
@ -18,7 +18,7 @@ PACKET_VERSION versionForPacketType(PACKET_TYPE type) {
|
||||||
case PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO:
|
case PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO:
|
||||||
case PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO:
|
case PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO:
|
||||||
return 2;
|
return 2;
|
||||||
|
|
||||||
case PACKET_TYPE_HEAD_DATA:
|
case PACKET_TYPE_HEAD_DATA:
|
||||||
return 12;
|
return 12;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue