mirror of
https://github.com/HifiExperiments/overte.git
synced 2025-08-09 17:38:34 +02:00
handle input from Audio class in AudioScope
This commit is contained in:
parent
1e91d0fe93
commit
60447e48cb
4 changed files with 30 additions and 17 deletions
|
@ -831,12 +831,9 @@ void Audio::handleAudioInput() {
|
||||||
if (!_isStereoInput && _proceduralAudioOutput) {
|
if (!_isStereoInput && _proceduralAudioOutput) {
|
||||||
processProceduralAudio(networkAudioSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
processProceduralAudio(networkAudioSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if (!_isStereoInput && _scopeEnabled && !_scopeEnabledPause) {
|
emit inputReceived(QByteArray(reinterpret_cast<const char*>(networkAudioSamples),
|
||||||
// unsigned int numMonoAudioChannels = 1;
|
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL));
|
||||||
// unsigned int monoAudioChannel = 0;
|
|
||||||
// _scopeInputOffset = addBufferToScope(_scopeInput, _scopeInputOffset, networkAudioSamples, NETWORK_SAMPLES_PER_FRAME, monoAudioChannel, numMonoAudioChannels);
|
|
||||||
// }
|
|
||||||
|
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||||
|
|
|
@ -164,9 +164,7 @@ public slots:
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
bool muteToggled();
|
bool muteToggled();
|
||||||
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
void inputReceived(const QByteArray& inputSamples);
|
||||||
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
|
||||||
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// setup for audio I/O
|
// setup for audio I/O
|
||||||
|
|
|
@ -43,6 +43,7 @@ AudioScope::AudioScope() :
|
||||||
this, &AudioScope::addLastFrameRepeatedWithFadeToScope);
|
this, &AudioScope::addLastFrameRepeatedWithFadeToScope);
|
||||||
connect(&audioIO->getReceivedAudioStream(), &MixedProcessedAudioStream::addedStereoSamples,
|
connect(&audioIO->getReceivedAudioStream(), &MixedProcessedAudioStream::addedStereoSamples,
|
||||||
this, &AudioScope::addStereoSamplesToScope);
|
this, &AudioScope::addStereoSamplesToScope);
|
||||||
|
connect(audioIO, &Audio::inputReceived, this, &AudioScope::addInputToScope);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioScope::toggle() {
|
void AudioScope::toggle() {
|
||||||
|
@ -301,3 +302,16 @@ void AudioScope::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
|
||||||
indexOfRepeat++;
|
indexOfRepeat++;
|
||||||
} while (samplesRemaining > 0);
|
} while (samplesRemaining > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AudioScope::addInputToScope(const QByteArray& inputSamples) {
|
||||||
|
if (!_isEnabled || _isPaused) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int INPUT_AUDIO_CHANNEL = 0;
|
||||||
|
const int NUM_INPUT_CHANNELS = 1;
|
||||||
|
|
||||||
|
_scopeInputOffset = addBufferToScope(_scopeInput, _scopeInputOffset,
|
||||||
|
reinterpret_cast<const int16_t*>(inputSamples.data()),
|
||||||
|
inputSamples.size() / sizeof(int16_t), INPUT_AUDIO_CHANNEL, NUM_INPUT_CHANNELS);
|
||||||
|
}
|
||||||
|
|
|
@ -20,11 +20,6 @@
|
||||||
class AudioScope : public QObject, public DependencyManager::Dependency {
|
class AudioScope : public QObject, public DependencyManager::Dependency {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
// Audio scope methods for data acquisition
|
|
||||||
int addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamples,
|
|
||||||
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade = 1.0f);
|
|
||||||
int addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples);
|
|
||||||
|
|
||||||
// Audio scope methods for rendering
|
// Audio scope methods for rendering
|
||||||
static void renderBackground(const float* color, int x, int y, int width, int height);
|
static void renderBackground(const float* color, int x, int y, int width, int height);
|
||||||
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
|
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
|
||||||
|
@ -45,13 +40,22 @@ public slots:
|
||||||
void selectAudioScopeFiveFrames();
|
void selectAudioScopeFiveFrames();
|
||||||
void selectAudioScopeTwentyFrames();
|
void selectAudioScopeTwentyFrames();
|
||||||
void selectAudioScopeFiftyFrames();
|
void selectAudioScopeFiftyFrames();
|
||||||
void addStereoSilenceToScope(int silentSamplesPerChannel);
|
|
||||||
void addLastFrameRepeatedWithFadeToScope(int samplesPerChannel);
|
|
||||||
void addStereoSamplesToScope(const QByteArray& samples);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
AudioScope();
|
AudioScope();
|
||||||
|
|
||||||
|
private slots:
|
||||||
|
void addStereoSilenceToScope(int silentSamplesPerChannel);
|
||||||
|
void addLastFrameRepeatedWithFadeToScope(int samplesPerChannel);
|
||||||
|
void addStereoSamplesToScope(const QByteArray& samples);
|
||||||
|
void addInputToScope(const QByteArray& inputSamples);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
// Audio scope methods for data acquisition
|
||||||
|
int addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamples,
|
||||||
|
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade = 1.0f);
|
||||||
|
int addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples);
|
||||||
|
|
||||||
bool _isEnabled;
|
bool _isEnabled;
|
||||||
bool _isPaused;
|
bool _isPaused;
|
||||||
int _scopeInputOffset;
|
int _scopeInputOffset;
|
||||||
|
|
Loading…
Reference in a new issue