mirror of
https://github.com/lubosz/overte.git
synced 2025-04-13 11:52:09 +02:00
STEREO_FACTOR -> AudioConstants::STEREO
This commit is contained in:
parent
5af95c6062
commit
afce8c4a45
4 changed files with 13 additions and 22 deletions
|
@ -250,8 +250,6 @@ int AudioScope::addSilenceToScope(QByteArray* byteArray, int frameOffset, int si
|
|||
}
|
||||
|
||||
|
||||
const int STEREO_FACTOR = 2;
|
||||
|
||||
void AudioScope::addStereoSilenceToScope(int silentSamplesPerChannel) {
|
||||
if (!_isEnabled || _isPaused) {
|
||||
return;
|
||||
|
@ -265,10 +263,10 @@ void AudioScope::addStereoSamplesToScope(const QByteArray& samples) {
|
|||
return;
|
||||
}
|
||||
const int16_t* samplesData = reinterpret_cast<const int16_t*>(samples.data());
|
||||
int samplesPerChannel = samples.size() / sizeof(int16_t) / STEREO_FACTOR;
|
||||
int samplesPerChannel = samples.size() / sizeof(int16_t) / AudioConstants::STEREO;
|
||||
|
||||
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, samplesData, samplesPerChannel, 0, STEREO_FACTOR);
|
||||
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, samplesData, samplesPerChannel, 1, STEREO_FACTOR);
|
||||
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, samplesData, samplesPerChannel, 0, AudioConstants::STEREO);
|
||||
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, samplesData, samplesPerChannel, 1, AudioConstants::STEREO);
|
||||
|
||||
_scopeLastFrame = samples.right(AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||
}
|
||||
|
@ -282,9 +280,9 @@ void AudioScope::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
|
|||
int samplesToWriteThisIteration = std::min(samplesRemaining, (int) AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
float fade = calculateRepeatedFrameFadeFactor(indexOfRepeat);
|
||||
addBufferToScope(_scopeOutputLeft, _scopeOutputOffset, lastFrameData,
|
||||
samplesToWriteThisIteration, 0, STEREO_FACTOR, fade);
|
||||
samplesToWriteThisIteration, 0, AudioConstants::STEREO, fade);
|
||||
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset,
|
||||
lastFrameData, samplesToWriteThisIteration, 1, STEREO_FACTOR, fade);
|
||||
lastFrameData, samplesToWriteThisIteration, 1, AudioConstants::STEREO, fade);
|
||||
|
||||
samplesRemaining -= samplesToWriteThisIteration;
|
||||
indexOfRepeat++;
|
||||
|
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
#include "AudioConstants.h"
|
||||
|
||||
static const int STEREO_FACTOR = 2;
|
||||
|
||||
MixedAudioStream::MixedAudioStream(int numFramesCapacity, int numStaticJitterFrames) :
|
||||
InboundAudioStream(STEREO_FACTOR, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||
InboundAudioStream(AudioConstants::STEREO, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||
numFramesCapacity, numStaticJitterFrames) {}
|
||||
|
|
|
@ -12,17 +12,15 @@
|
|||
#include "MixedProcessedAudioStream.h"
|
||||
#include "AudioLogging.h"
|
||||
|
||||
static const int STEREO_FACTOR = 2;
|
||||
|
||||
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFramesCapacity, int numStaticJitterFrames)
|
||||
: InboundAudioStream(STEREO_FACTOR, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||
: InboundAudioStream(AudioConstants::STEREO, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||
numFramesCapacity, numStaticJitterFrames) {}
|
||||
|
||||
void MixedProcessedAudioStream::outputFormatChanged(int sampleRate, int channelCount) {
|
||||
_outputSampleRate = sampleRate;
|
||||
_outputChannelCount = channelCount;
|
||||
int deviceOutputFrameFrames = networkToDeviceFrames(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / STEREO_FACTOR);
|
||||
int deviceOutputFrameSamples = deviceOutputFrameFrames * STEREO_FACTOR;
|
||||
int deviceOutputFrameFrames = networkToDeviceFrames(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / AudioConstants::STEREO);
|
||||
int deviceOutputFrameSamples = deviceOutputFrameFrames * AudioConstants::STEREO;
|
||||
_ringBuffer.resizeForFrameSize(deviceOutputFrameSamples);
|
||||
}
|
||||
|
||||
|
@ -55,16 +53,16 @@ int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray&
|
|||
|
||||
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
|
||||
qCDebug(audiostream, "Wrote %d samples to buffer (%d available)", outputBuffer.size() / (int)sizeof(int16_t), getSamplesAvailable());
|
||||
|
||||
|
||||
return packetAfterStreamProperties.size();
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream::networkToDeviceFrames(int networkFrames) {
|
||||
return ((quint64)networkFrames * _outputChannelCount * _outputSampleRate) /
|
||||
(quint64)(STEREO_FACTOR * AudioConstants::SAMPLE_RATE);
|
||||
(quint64)(AudioConstants::STEREO * AudioConstants::SAMPLE_RATE);
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream::deviceToNetworkFrames(int deviceFrames) {
|
||||
return (quint64)deviceFrames * (quint64)(STEREO_FACTOR * AudioConstants::SAMPLE_RATE) /
|
||||
return (quint64)deviceFrames * (quint64)(AudioConstants::STEREO * AudioConstants::SAMPLE_RATE) /
|
||||
(_outputSampleRate * _outputChannelCount);
|
||||
}
|
||||
|
|
|
@ -21,11 +21,8 @@
|
|||
#include <udt/PacketHeaders.h>
|
||||
#include <UUID.h>
|
||||
|
||||
static const int MONO_FACTOR = 1;
|
||||
static const int STEREO_FACTOR = 2;
|
||||
|
||||
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames) :
|
||||
InboundAudioStream(isStereo ? STEREO_FACTOR : MONO_FACTOR,
|
||||
InboundAudioStream(isStereo ? AudioConstants::STEREO : AudioConstants::MONO,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY,
|
||||
numStaticJitterFrames),
|
||||
|
|
Loading…
Reference in a new issue