mirror of
https://github.com/overte-org/overte.git
synced 2025-08-19 00:38:17 +02:00
remove Audio methods re-introduced after merge
This commit is contained in:
parent
dfb02aea0c
commit
6d8f319e51
1 changed files with 0 additions and 424 deletions
|
@ -893,430 +893,6 @@ bool Audio::outputLocalInjector(bool isStereo, qreal volume, AudioInjector* inje
|
|||
return false;
|
||||
}
|
||||
|
||||
void Audio::toggleScope() {
|
||||
_scopeEnabled = !_scopeEnabled;
|
||||
if (_scopeEnabled) {
|
||||
allocateScope();
|
||||
} else {
|
||||
freeScope();
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::toggleScopePause() {
|
||||
_scopeEnabledPause = !_scopeEnabledPause;
|
||||
}
|
||||
|
||||
void Audio::toggleStats() {
|
||||
_statsEnabled = !_statsEnabled;
|
||||
}
|
||||
|
||||
void Audio::toggleStatsShowInjectedStreams() {
|
||||
_statsShowInjectedStreams = !_statsShowInjectedStreams;
|
||||
}
|
||||
|
||||
void Audio::selectAudioScopeFiveFrames() {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiveFrames)) {
|
||||
reallocateScope(5);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::selectAudioScopeTwentyFrames() {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeTwentyFrames)) {
|
||||
reallocateScope(20);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::selectAudioScopeFiftyFrames() {
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioScopeFiftyFrames)) {
|
||||
reallocateScope(50);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::allocateScope() {
|
||||
_scopeInputOffset = 0;
|
||||
_scopeOutputOffset = 0;
|
||||
int num = _samplesPerScope * sizeof(int16_t);
|
||||
_scopeInput = new QByteArray(num, 0);
|
||||
_scopeOutputLeft = new QByteArray(num, 0);
|
||||
_scopeOutputRight = new QByteArray(num, 0);
|
||||
}
|
||||
|
||||
void Audio::reallocateScope(int frames) {
|
||||
if (_framesPerScope != frames) {
|
||||
_framesPerScope = frames;
|
||||
_samplesPerScope = NETWORK_SAMPLES_PER_FRAME * _framesPerScope;
|
||||
QMutexLocker lock(&_guard);
|
||||
freeScope();
|
||||
allocateScope();
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::freeScope() {
|
||||
if (_scopeInput) {
|
||||
delete _scopeInput;
|
||||
_scopeInput = 0;
|
||||
}
|
||||
if (_scopeOutputLeft) {
|
||||
delete _scopeOutputLeft;
|
||||
_scopeOutputLeft = 0;
|
||||
}
|
||||
if (_scopeOutputRight) {
|
||||
delete _scopeOutputRight;
|
||||
_scopeOutputRight = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int Audio::addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamplesPerChannel,
|
||||
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade) {
|
||||
if (!_scopeEnabled || _scopeEnabledPause) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Temporary variable receives sample value
|
||||
float sample;
|
||||
|
||||
QMutexLocker lock(&_guard);
|
||||
// Short int pointer to mapped samples in byte array
|
||||
int16_t* destination = (int16_t*) byteArray->data();
|
||||
|
||||
for (int i = 0; i < sourceSamplesPerChannel; i++) {
|
||||
sample = (float)source[i * sourceNumberOfChannels + sourceChannel];
|
||||
destination[frameOffset] = sample / (float) MAX_16_BIT_AUDIO_SAMPLE * (float)SCOPE_HEIGHT / 2.0f;
|
||||
frameOffset = (frameOffset == _samplesPerScope - 1) ? 0 : frameOffset + 1;
|
||||
}
|
||||
return frameOffset;
|
||||
}
|
||||
|
||||
int Audio::addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples) {
|
||||
|
||||
QMutexLocker lock(&_guard);
|
||||
// Short int pointer to mapped samples in byte array
|
||||
int16_t* destination = (int16_t*)byteArray->data();
|
||||
|
||||
if (silentSamples >= _samplesPerScope) {
|
||||
memset(destination, 0, byteArray->size());
|
||||
return frameOffset;
|
||||
}
|
||||
|
||||
int samplesToBufferEnd = _samplesPerScope - frameOffset;
|
||||
if (silentSamples > samplesToBufferEnd) {
|
||||
memset(destination + frameOffset, 0, samplesToBufferEnd * sizeof(int16_t));
|
||||
memset(destination, 0, silentSamples - samplesToBufferEnd * sizeof(int16_t));
|
||||
} else {
|
||||
memset(destination + frameOffset, 0, silentSamples * sizeof(int16_t));
|
||||
}
|
||||
|
||||
return (frameOffset + silentSamples) % _samplesPerScope;
|
||||
}
|
||||
|
||||
void Audio::renderStats(const float* color, int width, int height) {
|
||||
if (!_statsEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
const int linesWhenCentered = _statsShowInjectedStreams ? 34 : 27;
|
||||
const int CENTERED_BACKGROUND_HEIGHT = STATS_HEIGHT_PER_LINE * linesWhenCentered;
|
||||
|
||||
int lines = _statsShowInjectedStreams ? _audioMixerInjectedStreamAudioStatsMap.size() * 7 + 27 : 27;
|
||||
int statsHeight = STATS_HEIGHT_PER_LINE * lines;
|
||||
|
||||
|
||||
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
|
||||
|
||||
int x = std::max((width - (int)STATS_WIDTH) / 2, 0);
|
||||
int y = std::max((height - CENTERED_BACKGROUND_HEIGHT) / 2, 0);
|
||||
int w = STATS_WIDTH;
|
||||
int h = statsHeight;
|
||||
renderBackground(backgroundColor, x, y, w, h);
|
||||
|
||||
|
||||
int horizontalOffset = x + 5;
|
||||
int verticalOffset = y;
|
||||
|
||||
float scale = 0.10f;
|
||||
float rotation = 0.0f;
|
||||
int font = 2;
|
||||
|
||||
|
||||
char latencyStatString[512];
|
||||
|
||||
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||
|
||||
float audioInputBufferLatency = 0.0f, inputRingBufferLatency = 0.0f, networkRoundtripLatency = 0.0f, mixerRingBufferLatency = 0.0f, outputRingBufferLatency = 0.0f, audioOutputBufferLatency = 0.0f;
|
||||
|
||||
AudioStreamStats downstreamAudioStreamStats = _receivedAudioStream.getAudioStreamStats();
|
||||
SharedNodePointer audioMixerNodePointer = NodeList::getInstance()->soloNodeOfType(NodeType::AudioMixer);
|
||||
if (!audioMixerNodePointer.isNull()) {
|
||||
audioInputBufferLatency = _audioInputMsecsReadStats.getWindowAverage();
|
||||
inputRingBufferLatency = getInputRingBufferAverageMsecsAvailable();
|
||||
networkRoundtripLatency = audioMixerNodePointer->getPingMs();
|
||||
mixerRingBufferLatency = _audioMixerAvatarStreamAudioStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
||||
outputRingBufferLatency = downstreamAudioStreamStats._framesAvailableAverage * BUFFER_SEND_INTERVAL_MSECS;
|
||||
audioOutputBufferLatency = _audioOutputMsecsUnplayedStats.getWindowAverage();
|
||||
}
|
||||
float totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency + outputRingBufferLatency + audioOutputBufferLatency;
|
||||
|
||||
sprintf(latencyStatString, " Audio input buffer: %7.2fms - avg msecs of samples read to the input ring buffer in last 10s", audioInputBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Input ring buffer: %7.2fms - avg msecs of samples in input ring buffer in last 10s", inputRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Network to mixer: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " AudioMixer ring buffer: %7.2fms - avg msecs of samples in audio mixer's ring buffer in last 10s", mixerRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Network to client: %7.2fms - half of last ping value calculated by the node list", networkRoundtripLatency / 2.0f);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Output ring buffer: %7.2fms - avg msecs of samples in output ring buffer in last 10s", outputRingBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " Audio output buffer: %7.2fms - avg msecs of samples in audio output buffer in last 10s", audioOutputBufferLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
sprintf(latencyStatString, " TOTAL: %7.2fms\n", totalLatency);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, latencyStatString, color);
|
||||
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char clientUpstreamMicLabelString[] = "Upstream Mic Audio Packets Sent Gaps (by client):";
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, clientUpstreamMicLabelString, color);
|
||||
|
||||
char stringBuffer[512];
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(_packetSentTimeGaps.getMin()).toLatin1().data(),
|
||||
formatUsecTime(_packetSentTimeGaps.getMax()).toLatin1().data(),
|
||||
formatUsecTime(_packetSentTimeGaps.getAverage()).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(_packetSentTimeGaps.getWindowMin()).toLatin1().data(),
|
||||
formatUsecTime(_packetSentTimeGaps.getWindowMax()).toLatin1().data(),
|
||||
formatUsecTime(_packetSentTimeGaps.getWindowAverage()).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char upstreamMicLabelString[] = "Upstream mic audio stats (received and reported by audio-mixer):";
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamMicLabelString, color);
|
||||
|
||||
renderAudioStreamStats(_audioMixerAvatarStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char downstreamLabelString[] = "Downstream mixed audio stats:";
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamLabelString, color);
|
||||
|
||||
renderAudioStreamStats(downstreamAudioStreamStats, horizontalOffset, verticalOffset, scale, rotation, font, color, true);
|
||||
|
||||
|
||||
if (_statsShowInjectedStreams) {
|
||||
|
||||
foreach(const AudioStreamStats& injectedStreamAudioStats, _audioMixerInjectedStreamAudioStatsMap) {
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE; // blank line
|
||||
|
||||
char upstreamInjectedLabelString[512];
|
||||
sprintf(upstreamInjectedLabelString, "Upstream injected audio stats: stream ID: %s",
|
||||
injectedStreamAudioStats._streamIdentifier.toString().toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, upstreamInjectedLabelString, color);
|
||||
|
||||
renderAudioStreamStats(injectedStreamAudioStats, horizontalOffset, verticalOffset, scale, rotation, font, color);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::renderAudioStreamStats(const AudioStreamStats& streamStats, int horizontalOffset, int& verticalOffset,
|
||||
float scale, float rotation, int font, const float* color, bool isDownstreamStats) {
|
||||
|
||||
char stringBuffer[512];
|
||||
|
||||
sprintf(stringBuffer, " Packet loss | overall: %5.2f%% (%d lost), last_30s: %5.2f%% (%d lost)",
|
||||
streamStats._packetStreamStats.getLostRate() * 100.0f,
|
||||
streamStats._packetStreamStats._lost,
|
||||
streamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
streamStats._packetStreamWindowStats._lost);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
if (isDownstreamStats) {
|
||||
|
||||
const float BUFFER_SEND_INTERVAL_MSECS = BUFFER_SEND_INTERVAL_USECS / (float)USECS_PER_MSEC;
|
||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u+%d, available: %u+%d",
|
||||
streamStats._desiredJitterBufferFrames,
|
||||
streamStats._framesAvailableAverage,
|
||||
(int)(getAudioOutputAverageMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS),
|
||||
streamStats._framesAvailable,
|
||||
(int)(getAudioOutputMsecsUnplayed() / BUFFER_SEND_INTERVAL_MSECS));
|
||||
} else {
|
||||
sprintf(stringBuffer, " Ringbuffer frames | desired: %u, avg_available(10s): %u, available: %u",
|
||||
streamStats._desiredJitterBufferFrames,
|
||||
streamStats._framesAvailableAverage,
|
||||
streamStats._framesAvailable);
|
||||
}
|
||||
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Ringbuffer stats | starves: %u, prev_starve_lasted: %u, frames_dropped: %u, overflows: %u",
|
||||
streamStats._starveCount,
|
||||
streamStats._consecutiveNotMixedCount,
|
||||
streamStats._framesDropped,
|
||||
streamStats._overflowCount);
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (overall) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(streamStats._timeGapMin).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapMax).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapAverage).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
|
||||
sprintf(stringBuffer, " Inter-packet timegaps (last 30s) | min: %9s, max: %9s, avg: %9s",
|
||||
formatUsecTime(streamStats._timeGapWindowMin).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapWindowMax).toLatin1().data(),
|
||||
formatUsecTime(streamStats._timeGapWindowAverage).toLatin1().data());
|
||||
verticalOffset += STATS_HEIGHT_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, stringBuffer, color);
|
||||
}
|
||||
|
||||
|
||||
void Audio::renderScope(int width, int height) {
|
||||
|
||||
if (!_scopeEnabled)
|
||||
return;
|
||||
|
||||
static const float backgroundColor[4] = { 0.4f, 0.4f, 0.4f, 0.6f };
|
||||
static const float gridColor[4] = { 0.7f, 0.7f, 0.7f, 1.0f };
|
||||
static const float inputColor[4] = { 0.3f, 1.0f, 0.3f, 1.0f };
|
||||
static const float outputLeftColor[4] = { 1.0f, 0.3f, 0.3f, 1.0f };
|
||||
static const float outputRightColor[4] = { 0.3f, 0.3f, 1.0f, 1.0f };
|
||||
static const int gridRows = 2;
|
||||
int gridCols = _framesPerScope;
|
||||
|
||||
int x = (width - (int)SCOPE_WIDTH) / 2;
|
||||
int y = (height - (int)SCOPE_HEIGHT) / 2;
|
||||
int w = (int)SCOPE_WIDTH;
|
||||
int h = (int)SCOPE_HEIGHT;
|
||||
|
||||
renderBackground(backgroundColor, x, y, w, h);
|
||||
renderGrid(gridColor, x, y, w, h, gridRows, gridCols);
|
||||
|
||||
QMutexLocker lock(&_guard);
|
||||
renderLineStrip(inputColor, x, y, _samplesPerScope, _scopeInputOffset, _scopeInput);
|
||||
renderLineStrip(outputLeftColor, x, y, _samplesPerScope, _scopeOutputOffset, _scopeOutputLeft);
|
||||
renderLineStrip(outputRightColor, x, y, _samplesPerScope, _scopeOutputOffset, _scopeOutputRight);
|
||||
}
|
||||
|
||||
void Audio::renderBackground(const float* color, int x, int y, int width, int height) {
|
||||
|
||||
glColor4fv(color);
|
||||
DependencyManager::get<GeometryCache>()->renderQuad(x, y, width, height);
|
||||
glColor4f(1, 1, 1, 1);
|
||||
}
|
||||
|
||||
void Audio::renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols) {
|
||||
|
||||
glColor4fv(color);
|
||||
glBegin(GL_LINES);
|
||||
|
||||
int dx = width / cols;
|
||||
int dy = height / rows;
|
||||
int tx = x;
|
||||
int ty = y;
|
||||
|
||||
// Draw horizontal grid lines
|
||||
for (int i = rows + 1; --i >= 0; ) {
|
||||
glVertex2i(x, ty);
|
||||
glVertex2i(x + width, ty);
|
||||
ty += dy;
|
||||
}
|
||||
// Draw vertical grid lines
|
||||
for (int i = cols + 1; --i >= 0; ) {
|
||||
glVertex2i(tx, y);
|
||||
glVertex2i(tx, y + height);
|
||||
tx += dx;
|
||||
}
|
||||
glEnd();
|
||||
glColor4f(1, 1, 1, 1);
|
||||
}
|
||||
|
||||
void Audio::renderLineStrip(const float* color, int x, int y, int n, int offset, const QByteArray* byteArray) {
|
||||
|
||||
glColor4fv(color);
|
||||
glBegin(GL_LINE_STRIP);
|
||||
|
||||
int16_t sample;
|
||||
int16_t* samples = ((int16_t*) byteArray->data()) + offset;
|
||||
int numSamplesToAverage = _framesPerScope / DEFAULT_FRAMES_PER_SCOPE;
|
||||
int count = (n - offset) / numSamplesToAverage;
|
||||
int remainder = (n - offset) % numSamplesToAverage;
|
||||
y += SCOPE_HEIGHT / 2;
|
||||
|
||||
// Compute and draw the sample averages from the offset position
|
||||
for (int i = count; --i >= 0; ) {
|
||||
sample = 0;
|
||||
for (int j = numSamplesToAverage; --j >= 0; ) {
|
||||
sample += *samples++;
|
||||
}
|
||||
sample /= numSamplesToAverage;
|
||||
glVertex2i(x++, y - sample);
|
||||
}
|
||||
|
||||
// Compute and draw the sample average across the wrap boundary
|
||||
if (remainder != 0) {
|
||||
sample = 0;
|
||||
for (int j = remainder; --j >= 0; ) {
|
||||
sample += *samples++;
|
||||
}
|
||||
|
||||
samples = (int16_t*) byteArray->data();
|
||||
|
||||
for (int j = numSamplesToAverage - remainder; --j >= 0; ) {
|
||||
sample += *samples++;
|
||||
}
|
||||
sample /= numSamplesToAverage;
|
||||
glVertex2i(x++, y - sample);
|
||||
} else {
|
||||
samples = (int16_t*) byteArray->data();
|
||||
}
|
||||
|
||||
// Compute and draw the sample average from the beginning to the offset
|
||||
count = (offset - remainder) / numSamplesToAverage;
|
||||
for (int i = count; --i >= 0; ) {
|
||||
sample = 0;
|
||||
for (int j = numSamplesToAverage; --j >= 0; ) {
|
||||
sample += *samples++;
|
||||
}
|
||||
sample /= numSamplesToAverage;
|
||||
glVertex2i(x++, y - sample);
|
||||
}
|
||||
glEnd();
|
||||
glColor4f(1, 1, 1, 1);
|
||||
}
|
||||
|
||||
|
||||
>>>>>>> 315b932445c04e84f0f20227c7f2ff02bb2a2d23
|
||||
void Audio::outputFormatChanged() {
|
||||
int outputFormatChannelCountTimesSampleRate = _outputFormat.channelCount() * _outputFormat.sampleRate();
|
||||
_outputFrameSize = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * outputFormatChannelCountTimesSampleRate / _desiredOutputFormat.sampleRate();
|
||||
|
|
Loading…
Reference in a new issue