move audio env data over to new packet

This commit is contained in:
Atlante45 2014-10-24 14:27:55 -07:00
parent a7663db8e7
commit 847ef64b66
6 changed files with 68 additions and 62 deletions

View file

@ -638,8 +638,9 @@ void AudioMixer::run() {
int nextFrame = 0; int nextFrame = 0;
QElapsedTimer timer; QElapsedTimer timer;
timer.start(); timer.start();
char clientMixBuffer[MAX_PACKET_SIZE]; char clientMixBuffer[MAX_PACKET_SIZE];
char clientEnvBuffer[MAX_PACKET_SIZE];
int usecToSleep = BUFFER_SEND_INTERVAL_USECS; int usecToSleep = BUFFER_SEND_INTERVAL_USECS;
@ -718,23 +719,25 @@ void AudioMixer::run() {
&& nodeData->getAvatarAudioStream()) { && nodeData->getAvatarAudioStream()) {
int streamsMixed = prepareMixForListeningNode(node.data()); int streamsMixed = prepareMixForListeningNode(node.data());
char* dataAt; char* mixDataAt;
if (streamsMixed > 0) { if (streamsMixed > 0) {
// pack header // pack headers
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio); int numBytesMixPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
dataAt = clientMixBuffer + numBytesPacketHeader; mixDataAt = clientMixBuffer + numBytesMixPacketHeader;
// pack sequence number // pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber(); quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16)); memcpy(mixDataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16); mixDataAt += sizeof(quint16);
// Pack stream properties
bool hasReverb = false;
float reverbTime;
float wetLevel;
// pack mixed audio samples
memcpy(mixDataAt, _mixSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
mixDataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
// Send stream properties
bool hasReverb = false;
float reverbTime, wetLevel;
// find reverb properties // find reverb properties
for (int i = 0; i < _zoneReverbSettings.size(); ++i) { for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData()); AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
@ -762,48 +765,45 @@ void AudioMixer::run() {
float CHANCE_OF_SEND = 0.01f; float CHANCE_OF_SEND = 0.01f;
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND); bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
unsigned char bitset = 0;
if (sendData) { if (sendData) {
int numBytesEnvPacketHeader = populatePacketHeader(clientEnvBuffer, PacketTypeAudioEnvironment);
char* envDataAt = clientEnvBuffer + numBytesEnvPacketHeader;
unsigned char bitset = 0;
setAtBit(bitset, HAS_DATA_BIT); setAtBit(bitset, HAS_DATA_BIT);
if (hasReverb) { if (hasReverb) {
setAtBit(bitset, HAS_REVERB_BIT); setAtBit(bitset, HAS_REVERB_BIT);
} }
memcpy(dataAt, &bitset, sizeof(unsigned char)); memcpy(envDataAt, &bitset, sizeof(unsigned char));
dataAt += sizeof(unsigned char); envDataAt += sizeof(unsigned char);
if (hasReverb) { if (hasReverb) {
memcpy(dataAt, &reverbTime, sizeof(float)); memcpy(envDataAt, &reverbTime, sizeof(float));
dataAt += sizeof(float); envDataAt += sizeof(float);
memcpy(dataAt, &wetLevel, sizeof(float)); memcpy(envDataAt, &wetLevel, sizeof(float));
dataAt += sizeof(float); envDataAt += sizeof(float);
} }
} else { nodeList->writeDatagram(clientEnvBuffer, envDataAt - clientEnvBuffer, node);
memcpy(dataAt, &bitset, sizeof(unsigned char));
dataAt += sizeof(unsigned char);
} }
// pack mixed audio samples
memcpy(dataAt, _mixSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
} else { } else {
// pack header // pack header
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeSilentAudioFrame); int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeSilentAudioFrame);
dataAt = clientMixBuffer + numBytesPacketHeader; mixDataAt = clientMixBuffer + numBytesPacketHeader;
// pack sequence number // pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber(); quint16 sequence = nodeData->getOutgoingSequenceNumber();
memcpy(dataAt, &sequence, sizeof(quint16)); memcpy(mixDataAt, &sequence, sizeof(quint16));
dataAt += sizeof(quint16); mixDataAt += sizeof(quint16);
// pack number of silent audio samples // pack number of silent audio samples
quint16 numSilentSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; quint16 numSilentSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
memcpy(dataAt, &numSilentSamples, sizeof(quint16)); memcpy(mixDataAt, &numSilentSamples, sizeof(quint16));
dataAt += sizeof(quint16); mixDataAt += sizeof(quint16);
} }
// send mixed audio packet // send mixed audio packet
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node); nodeList->writeDatagram(clientMixBuffer, mixDataAt - clientMixBuffer, node);
nodeData->incrementOutgoingMixedAudioSequenceNumber(); nodeData->incrementOutgoingMixedAudioSequenceNumber();
// send an audio stream stats packet if it's time // send an audio stream stats packet if it's time

View file

@ -1018,6 +1018,27 @@ void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
} }
} }
void Audio::parseAudioEnvironmentData(const QByteArray &packet) {
int numBytesPacketHeader = numBytesForPacketHeader(packet);
const char* dataAt = packet.constData() + numBytesPacketHeader;
char bitset;
memcpy(&bitset, dataAt, sizeof(char));
dataAt += sizeof(char);
bool hasReverb = oneAtBit(bitset, HAS_REVERB_BIT);;
if (hasReverb) {
float reverbTime, wetLevel;
memcpy(&reverbTime, dataAt, sizeof(float));
dataAt += sizeof(float);
memcpy(&wetLevel, dataAt, sizeof(float));
dataAt += sizeof(float);
_receivedAudioStream.setReverb(reverbTime, wetLevel);
} else {
_receivedAudioStream.clearReverb();
}
}
void Audio::sendDownstreamAudioStatsPacket() { void Audio::sendDownstreamAudioStatsPacket() {
// since this function is called every second, we'll sample for some of our stats here // since this function is called every second, we'll sample for some of our stats here

View file

@ -125,6 +125,7 @@ public slots:
void stop(); void stop();
void addReceivedAudioToStream(const QByteArray& audioByteArray); void addReceivedAudioToStream(const QByteArray& audioByteArray);
void parseAudioStreamStatsPacket(const QByteArray& packet); void parseAudioStreamStatsPacket(const QByteArray& packet);
void parseAudioEnvironmentData(const QByteArray& packet);
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples); void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
void handleAudioInput(); void handleAudioInput();
void reset(); void reset();

View file

@ -49,14 +49,18 @@ void DatagramProcessor::processDatagrams() {
PacketType incomingType = packetTypeForPacket(incomingPacket); PacketType incomingType = packetTypeForPacket(incomingPacket);
// only process this packet if we have a match on the packet version // only process this packet if we have a match on the packet version
switch (incomingType) { switch (incomingType) {
case PacketTypeAudioEnvironment:
case PacketTypeAudioStreamStats:
case PacketTypeMixedAudio: case PacketTypeMixedAudio:
case PacketTypeSilentAudioFrame: case PacketTypeSilentAudioFrame: {
case PacketTypeAudioStreamStats: { if (incomingType == PacketTypeAudioStreamStats) {
if (incomingType != PacketTypeAudioStreamStats) { QMetaObject::invokeMethod(&application->_audio, "parseAudioStreamStatsPacket", Qt::QueuedConnection,
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection, Q_ARG(QByteArray, incomingPacket));
} else if (incomingType == PacketTypeAudioEnvironment) {
QMetaObject::invokeMethod(&application->_audio, "parseAudioEnvironmentData", Qt::QueuedConnection,
Q_ARG(QByteArray, incomingPacket)); Q_ARG(QByteArray, incomingPacket));
} else { } else {
QMetaObject::invokeMethod(&application->_audio, "parseAudioStreamStatsPacket", Qt::QueuedConnection, QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToStream", Qt::QueuedConnection,
Q_ARG(QByteArray, incomingPacket)); Q_ARG(QByteArray, incomingPacket));
} }

View file

@ -170,27 +170,9 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
} }
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
int read = 0;
if (type == PacketTypeMixedAudio) {
char bitset;
memcpy(&bitset, packetAfterSeqNum.data() + read, sizeof(char));
read += sizeof(char);
bool hasData = oneAtBit(bitset, HAS_DATA_BIT);
if (hasData) {
_hasReverb = oneAtBit(bitset, HAS_REVERB_BIT);
if (_hasReverb) {
memcpy(&_reverbTime, packetAfterSeqNum.data() + read, sizeof(float));
read += sizeof(float);
memcpy(&_wetLevel, packetAfterSeqNum.data() + read, sizeof(float));
read += sizeof(float);
}
}
}
// mixed audio packets do not have any info between the seq num and the audio data. // mixed audio packets do not have any info between the seq num and the audio data.
numAudioSamples = (packetAfterSeqNum.size() - read) / sizeof(int16_t); numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
return read; return 0;
} }
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) { int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {

View file

@ -53,7 +53,7 @@ PacketVersion versionForPacketType(PacketType type) {
case PacketTypeSilentAudioFrame: case PacketTypeSilentAudioFrame:
return 4; return 4;
case PacketTypeMixedAudio: case PacketTypeMixedAudio:
return 3; return 1;
case PacketTypeAvatarData: case PacketTypeAvatarData:
return 3; return 3;
case PacketTypeAvatarIdentity: case PacketTypeAvatarIdentity:
@ -71,11 +71,9 @@ PacketVersion versionForPacketType(PacketType type) {
return 1; return 1;
case PacketTypeOctreeStats: case PacketTypeOctreeStats:
return 1; return 1;
case PacketTypeEntityAddOrEdit: case PacketTypeEntityAddOrEdit:
case PacketTypeEntityData: case PacketTypeEntityData:
return VERSION_ENTITIES_SUPPORT_DIMENSIONS; return VERSION_ENTITIES_SUPPORT_DIMENSIONS;
case PacketTypeEntityErase: case PacketTypeEntityErase:
return 2; return 2;
case PacketTypeAudioStreamStats: case PacketTypeAudioStreamStats: