mirror of
https://github.com/lubosz/overte.git
synced 2025-04-23 20:54:25 +02:00
pack position and orientation in silent frames
This commit is contained in:
parent
47b061983f
commit
9cbc53abc3
5 changed files with 45 additions and 27 deletions
|
@ -19,28 +19,38 @@ AvatarAudioStream::AvatarAudioStream(bool isStereo, const InboundAudioStream::Se
|
|||
}
|
||||
|
||||
int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
|
||||
_shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho);
|
||||
|
||||
int readBytes = 0;
|
||||
|
||||
// read the channel flag
|
||||
quint8 channelFlag = packetAfterSeqNum.at(readBytes);
|
||||
bool isStereo = channelFlag == 1;
|
||||
readBytes += sizeof(quint8);
|
||||
if (type == PacketTypeSilentAudioFrame) {
|
||||
const char* dataAt = packetAfterSeqNum.constData();
|
||||
quint16 numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt));
|
||||
readBytes += sizeof(quint16);
|
||||
numAudioSamples = (int)numSilentSamples;
|
||||
|
||||
// if isStereo value has changed, restart the ring buffer with new frame size
|
||||
if (isStereo != _isStereo) {
|
||||
_ringBuffer.resizeForFrameSize(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
_isStereo = isStereo;
|
||||
// read the positional data
|
||||
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
||||
|
||||
} else {
|
||||
_shouldLoopbackForNode = (type == PacketTypeMicrophoneAudioWithEcho);
|
||||
|
||||
// read the channel flag
|
||||
quint8 channelFlag = packetAfterSeqNum.at(readBytes);
|
||||
bool isStereo = channelFlag == 1;
|
||||
readBytes += sizeof(quint8);
|
||||
|
||||
// if isStereo value has changed, restart the ring buffer with new frame size
|
||||
if (isStereo != _isStereo) {
|
||||
_ringBuffer.resizeForFrameSize(isStereo ? NETWORK_BUFFER_LENGTH_SAMPLES_STEREO : NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||
_isStereo = isStereo;
|
||||
}
|
||||
|
||||
// read the positional data
|
||||
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
||||
|
||||
// calculate how many samples are in this packet
|
||||
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
|
||||
numAudioSamples = numAudioBytes / sizeof(int16_t);
|
||||
}
|
||||
|
||||
// read the positional data
|
||||
readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes));
|
||||
|
||||
// calculate how many samples are in this packet
|
||||
int numAudioBytes = packetAfterSeqNum.size() - readBytes;
|
||||
numAudioSamples = numAudioBytes / sizeof(int16_t);
|
||||
|
||||
return readBytes;
|
||||
}
|
||||
|
|
|
@ -753,6 +753,15 @@ void Audio::handleAudioInput() {
|
|||
quint16 numSilentSamples = numNetworkSamples;
|
||||
memcpy(currentPacketPtr, &numSilentSamples, sizeof(quint16));
|
||||
currentPacketPtr += sizeof(quint16);
|
||||
|
||||
// memcpy the three float positions
|
||||
memcpy(currentPacketPtr, &headPosition, sizeof(headPosition));
|
||||
currentPacketPtr += (sizeof(headPosition));
|
||||
|
||||
// memcpy our orientation
|
||||
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
|
||||
currentPacketPtr += sizeof(headOrientation);
|
||||
|
||||
} else {
|
||||
// set the mono/stereo byte
|
||||
*currentPacketPtr++ = isStereo;
|
||||
|
|
|
@ -109,14 +109,8 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
|||
|
||||
int networkSamples;
|
||||
|
||||
if (packetType == PacketTypeSilentAudioFrame) {
|
||||
quint16 numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt));
|
||||
readBytes += sizeof(quint16);
|
||||
networkSamples = (int)numSilentSamples;
|
||||
} else {
|
||||
// parse the info after the seq number and before the audio data (the stream properties)
|
||||
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), networkSamples);
|
||||
}
|
||||
// parse the info after the seq number and before the audio data (the stream properties)
|
||||
readBytes += parseStreamProperties(packetType, packet.mid(readBytes), networkSamples);
|
||||
|
||||
// handle this packet based on its arrival status.
|
||||
switch (arrivalInfo._status) {
|
||||
|
|
|
@ -51,7 +51,7 @@ PacketVersion versionForPacketType(PacketType type) {
|
|||
case PacketTypeMicrophoneAudioWithEcho:
|
||||
return 2;
|
||||
case PacketTypeSilentAudioFrame:
|
||||
return 3;
|
||||
return 4;
|
||||
case PacketTypeMixedAudio:
|
||||
return 1;
|
||||
case PacketTypeAvatarData:
|
||||
|
|
|
@ -505,6 +505,11 @@ void ScriptEngine::run() {
|
|||
// write the number of silent samples so the audio-mixer can uphold timing
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(&SCRIPT_AUDIO_BUFFER_SAMPLES), sizeof(int16_t));
|
||||
|
||||
// use the orientation and position of this avatar for the source of this audio
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(&_avatarData->getPosition()), sizeof(glm::vec3));
|
||||
glm::quat headOrientation = _avatarData->getHeadOrientation();
|
||||
packetStream.writeRawData(reinterpret_cast<const char*>(&headOrientation), sizeof(glm::quat));
|
||||
|
||||
} else if (nextSoundOutput) {
|
||||
// assume scripted avatar audio is mono and set channel flag to zero
|
||||
packetStream << (quint8)0;
|
||||
|
|
Loading…
Reference in a new issue