mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-15 13:58:47 +02:00
numAudioSamples now calculated to be device samples instead of network samples
This commit is contained in:
parent
332e75453e
commit
748c9e2065
6 changed files with 34 additions and 19 deletions
|
@ -729,9 +729,7 @@ void Audio::handleAudioInput() {
|
|||
|
||||
void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||
|
||||
// NOTE: we assume inputBuffer contains NETWORK_BUFFER_LENGTH_SAMPLES_STEREO audio samples
|
||||
|
||||
const int numNetworkOutputSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
||||
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
||||
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||
|
||||
|
@ -741,7 +739,7 @@ void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QBy
|
|||
const int16_t* receivedSamples;
|
||||
if (_processSpatialAudio) {
|
||||
unsigned int sampleTime = _spatialAudioStart;
|
||||
QByteArray buffer = inputBuffer.left(numNetworkOutputSamples * sizeof(int16_t));
|
||||
QByteArray buffer = inputBuffer;
|
||||
|
||||
// Accumulate direct transmission of audio from sender to receiver
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||
|
@ -1595,7 +1593,7 @@ void Audio::renderLineStrip(const float* color, int x, int y, int n, int offset,
|
|||
|
||||
void Audio::outputFormatChanged() {
|
||||
_outputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormat.channelCount() * _outputFormat.sampleRate() / _desiredOutputFormat.sampleRate();
|
||||
_receivedAudioStream.resizeFrame(_outputFrameSize);
|
||||
_receivedAudioStream.outputFormatChanged(_outputFormat);
|
||||
}
|
||||
|
||||
bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
|
||||
|
@ -1648,7 +1646,6 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
|||
// cleanup any previously initialized device
|
||||
if (_audioOutput) {
|
||||
_audioOutput->stop();
|
||||
//_outputDevice = NULL;
|
||||
|
||||
delete _audioOutput;
|
||||
_audioOutput = NULL;
|
||||
|
|
|
@ -52,10 +52,8 @@ public:
|
|||
|
||||
void start() { open(QIODevice::ReadOnly); }
|
||||
void stop() { close(); }
|
||||
|
||||
qint64 readData(char * data, qint64 maxSize);
|
||||
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
|
||||
|
||||
private:
|
||||
Audio& _parent;
|
||||
};
|
||||
|
@ -299,7 +297,6 @@ private:
|
|||
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
|
||||
|
||||
AudioOutputIODevice _audioOutputIODevice;
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -91,7 +91,6 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
|||
|
||||
frameReceivedUpdateTimingStats();
|
||||
|
||||
|
||||
// TODO: handle generalized silent packet here?????
|
||||
|
||||
// parse the info after the seq number and before the audio data.(the stream properties)
|
||||
|
@ -170,7 +169,6 @@ int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starv
|
|||
return samplesPopped;
|
||||
}
|
||||
|
||||
|
||||
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveOnFail) {
|
||||
int framesPopped = 0;
|
||||
int framesAvailable = _ringBuffer.framesAvailable();
|
||||
|
|
|
@ -81,8 +81,6 @@ public:
|
|||
|
||||
void setMaxFramesOverDesired(int maxFramesOverDesired) { _maxFramesOverDesired = maxFramesOverDesired; }
|
||||
|
||||
void resizeFrame(int numFrameSamples) { _ringBuffer.resizeForFrameSize(numFrameSamples); }
|
||||
|
||||
virtual AudioStreamStats getAudioStreamStats() const;
|
||||
|
||||
/// returns the desired number of jitter buffer frames under the dyanmic jitter buffers scheme
|
||||
|
@ -129,7 +127,7 @@ protected:
|
|||
InboundAudioStream& operator= (const InboundAudioStream&);
|
||||
|
||||
/// parses the info between the seq num and the audio data in the network packet and calculates
|
||||
/// how many audio samples this packet contains
|
||||
/// how many audio samples this packet contains (used when filling in samples for dropped packets).
|
||||
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
||||
|
||||
/// parses the audio data in the network packet.
|
||||
|
|
|
@ -12,11 +12,29 @@
|
|||
#include "MixedProcessedAudioStream.h"
|
||||
|
||||
MixedProcessedAudioStream ::MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
|
||||
: MixedAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
|
||||
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
|
||||
{
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream ::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
void MixedProcessedAudioStream::outputFormatChanged(const QAudioFormat& outputFormat) {
|
||||
_outputFormatChannelsTimesSampleRate = outputFormat.channelCount() * outputFormat.sampleRate();
|
||||
int deviceOutputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormatChannelsTimesSampleRate / SAMPLE_RATE;
|
||||
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||
int numNetworkSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||
|
||||
// since numAudioSamples is used to know how many samples to add for each dropped packet before this one,
|
||||
// we want to set it to the number of device audio samples since this stream contains device audio samples, not network samples.
|
||||
const int STEREO_DIVIDER = 2;
|
||||
numAudioSamples = numNetworkSamples * _outputFormatChannelsTimesSampleRate / (STEREO_DIVIDER * SAMPLE_RATE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
|
||||
QByteArray outputBuffer;
|
||||
emit processSamples(packetAfterStreamProperties, outputBuffer);
|
||||
|
|
|
@ -12,19 +12,26 @@
|
|||
#ifndef hifi_MixedProcessedAudioStream_h
|
||||
#define hifi_MixedProcessedAudioStream_h
|
||||
|
||||
#include "MixedAudioStream.h"
|
||||
#include <QtMultimedia/QAudioFormat>
|
||||
#include "InboundAudioStream.h"
|
||||
|
||||
class MixedProcessedAudioStream : public MixedAudioStream {
|
||||
class MixedProcessedAudioStream : public InboundAudioStream {
|
||||
Q_OBJECT
|
||||
public:
|
||||
MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
|
||||
|
||||
signals:
|
||||
void outputFormatChanged(const QAudioFormat& outputFormat);
|
||||
|
||||
signals:
|
||||
|
||||
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||
|
||||
protected:
|
||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||
|
||||
private:
|
||||
int _outputFormatChannelsTimesSampleRate;
|
||||
};
|
||||
|
||||
#endif // hifi_MixedProcessedAudioStream_h
|
||||
|
|
Loading…
Reference in a new issue