mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 01:24:36 +02:00
Cleaning up android build warnings
This commit is contained in:
parent
f3aa534327
commit
403499bf32
7 changed files with 90 additions and 134 deletions
|
@ -8494,6 +8494,8 @@ void Application::toggleLogDialog() {
|
||||||
Qt::WindowFlags flags = _logDialog->windowFlags() | Qt::Tool;
|
Qt::WindowFlags flags = _logDialog->windowFlags() | Qt::Tool;
|
||||||
_logDialog->setWindowFlags(flags);
|
_logDialog->setWindowFlags(flags);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
Q_UNUSED(keepOnTop)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,25 +60,13 @@ const int AudioClient::MIN_BUFFER_FRAMES = 1;
|
||||||
|
|
||||||
const int AudioClient::MAX_BUFFER_FRAMES = 20;
|
const int AudioClient::MAX_BUFFER_FRAMES = 20;
|
||||||
|
|
||||||
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 100;
|
|
||||||
|
|
||||||
#if defined(Q_OS_ANDROID)
|
#if defined(Q_OS_ANDROID)
|
||||||
static const int CHECK_INPUT_READS_MSECS = 2000;
|
static const int CHECK_INPUT_READS_MSECS = 2000;
|
||||||
static const int MIN_READS_TO_CONSIDER_INPUT_ALIVE = 10;
|
static const int MIN_READS_TO_CONSIDER_INPUT_ALIVE = 10;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const auto DEFAULT_POSITION_GETTER = []{ return Vectors::ZERO; };
|
const AudioClient::AudioPositionGetter AudioClient::DEFAULT_POSITION_GETTER = []{ return Vectors::ZERO; };
|
||||||
static const auto DEFAULT_ORIENTATION_GETTER = [] { return Quaternions::IDENTITY; };
|
const AudioClient::AudioOrientationGetter AudioClient::DEFAULT_ORIENTATION_GETTER = [] { return Quaternions::IDENTITY; };
|
||||||
|
|
||||||
static const int DEFAULT_BUFFER_FRAMES = 1;
|
|
||||||
|
|
||||||
// OUTPUT_CHANNEL_COUNT is audio pipeline output format, which is always 2 channel.
|
|
||||||
// _outputFormat.channelCount() is device output format, which may be 1 or multichannel.
|
|
||||||
static const int OUTPUT_CHANNEL_COUNT = 2;
|
|
||||||
|
|
||||||
static const bool DEFAULT_STARVE_DETECTION_ENABLED = true;
|
|
||||||
static const int STARVE_DETECTION_THRESHOLD = 3;
|
|
||||||
static const int STARVE_DETECTION_PERIOD = 10 * 1000; // 10 Seconds
|
|
||||||
|
|
||||||
Setting::Handle<bool> dynamicJitterBufferEnabled("dynamicJitterBuffersEnabled",
|
Setting::Handle<bool> dynamicJitterBufferEnabled("dynamicJitterBuffersEnabled",
|
||||||
InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED);
|
InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED);
|
||||||
|
@ -272,55 +260,7 @@ static inline float convertToFloat(int16_t sample) {
|
||||||
return (float)sample * (1 / 32768.0f);
|
return (float)sample * (1 / 32768.0f);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioClient::AudioClient() :
|
AudioClient::AudioClient() {
|
||||||
AbstractAudioInterface(),
|
|
||||||
_gate(this),
|
|
||||||
_audioInput(NULL),
|
|
||||||
_dummyAudioInput(NULL),
|
|
||||||
_desiredInputFormat(),
|
|
||||||
_inputFormat(),
|
|
||||||
_numInputCallbackBytes(0),
|
|
||||||
_audioOutput(NULL),
|
|
||||||
_desiredOutputFormat(),
|
|
||||||
_outputFormat(),
|
|
||||||
_outputFrameSize(0),
|
|
||||||
_numOutputCallbackBytes(0),
|
|
||||||
_loopbackAudioOutput(NULL),
|
|
||||||
_loopbackOutputDevice(NULL),
|
|
||||||
_inputRingBuffer(0),
|
|
||||||
_localInjectorsStream(0, 1),
|
|
||||||
_receivedAudioStream(RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES),
|
|
||||||
_isStereoInput(false),
|
|
||||||
_outputStarveDetectionStartTimeMsec(0),
|
|
||||||
_outputStarveDetectionCount(0),
|
|
||||||
_outputBufferSizeFrames("audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES),
|
|
||||||
_sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
|
|
||||||
_outputStarveDetectionEnabled("audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED),
|
|
||||||
_lastRawInputLoudness(0.0f),
|
|
||||||
_lastSmoothedRawInputLoudness(0.0f),
|
|
||||||
_lastInputLoudness(0.0f),
|
|
||||||
_timeSinceLastClip(-1.0f),
|
|
||||||
_muted(false),
|
|
||||||
_shouldEchoLocally(false),
|
|
||||||
_shouldEchoToServer(false),
|
|
||||||
_isNoiseGateEnabled(true),
|
|
||||||
_isAECEnabled(true),
|
|
||||||
_reverb(false),
|
|
||||||
_reverbOptions(&_scriptReverbOptions),
|
|
||||||
_inputToNetworkResampler(NULL),
|
|
||||||
_networkToOutputResampler(NULL),
|
|
||||||
_localToOutputResampler(NULL),
|
|
||||||
_loopbackResampler(NULL),
|
|
||||||
_audioLimiter(AudioConstants::SAMPLE_RATE, OUTPUT_CHANNEL_COUNT),
|
|
||||||
_outgoingAvatarAudioSequenceNumber(0),
|
|
||||||
_audioOutputIODevice(_localInjectorsStream, _receivedAudioStream, this),
|
|
||||||
_stats(&_receivedAudioStream),
|
|
||||||
_positionGetter(DEFAULT_POSITION_GETTER),
|
|
||||||
#if defined(Q_OS_ANDROID)
|
|
||||||
_checkInputTimer(this),
|
|
||||||
_isHeadsetPluggedIn(false),
|
|
||||||
#endif
|
|
||||||
_orientationGetter(DEFAULT_ORIENTATION_GETTER) {
|
|
||||||
|
|
||||||
// avoid putting a lock in the device callback
|
// avoid putting a lock in the device callback
|
||||||
assert(_localSamplesAvailable.is_lock_free());
|
assert(_localSamplesAvailable.is_lock_free());
|
||||||
|
|
|
@ -292,6 +292,18 @@ protected:
|
||||||
virtual void customDeleter() override;
|
virtual void customDeleter() override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES{ 100 };
|
||||||
|
// OUTPUT_CHANNEL_COUNT is audio pipeline output format, which is always 2 channel.
|
||||||
|
// _outputFormat.channelCount() is device output format, which may be 1 or multichannel.
|
||||||
|
static const int OUTPUT_CHANNEL_COUNT{ 2 };
|
||||||
|
static const bool DEFAULT_STARVE_DETECTION_ENABLED{ true };
|
||||||
|
static const int STARVE_DETECTION_THRESHOLD{ 3 };
|
||||||
|
static const int STARVE_DETECTION_PERIOD{ 10 * 1000 }; // 10 Seconds
|
||||||
|
|
||||||
|
static const AudioPositionGetter DEFAULT_POSITION_GETTER;
|
||||||
|
static const AudioOrientationGetter DEFAULT_ORIENTATION_GETTER;
|
||||||
|
static const int DEFAULT_BUFFER_FRAMES{ 1 };
|
||||||
|
|
||||||
friend class CheckDevicesThread;
|
friend class CheckDevicesThread;
|
||||||
friend class LocalInjectorsThread;
|
friend class LocalInjectorsThread;
|
||||||
|
|
||||||
|
@ -307,9 +319,9 @@ private:
|
||||||
float gainForSource(float distance, float volume);
|
float gainForSource(float distance, float volume);
|
||||||
|
|
||||||
#ifdef Q_OS_ANDROID
|
#ifdef Q_OS_ANDROID
|
||||||
QTimer _checkInputTimer;
|
QTimer _checkInputTimer{ this };
|
||||||
long _inputReadsSinceLastCheck = 0l;
|
long _inputReadsSinceLastCheck = 0l;
|
||||||
bool _isHeadsetPluggedIn;
|
bool _isHeadsetPluggedIn { false };
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
class Gate {
|
class Gate {
|
||||||
|
@ -336,68 +348,68 @@ private:
|
||||||
bool _isSimulatingJitter{ false };
|
bool _isSimulatingJitter{ false };
|
||||||
};
|
};
|
||||||
|
|
||||||
Gate _gate;
|
Gate _gate{ this };
|
||||||
|
|
||||||
Mutex _injectorsMutex;
|
Mutex _injectorsMutex;
|
||||||
QAudioInput* _audioInput;
|
QAudioInput* _audioInput{ nullptr };
|
||||||
QTimer* _dummyAudioInput;
|
QTimer* _dummyAudioInput{ nullptr };
|
||||||
QAudioFormat _desiredInputFormat;
|
QAudioFormat _desiredInputFormat;
|
||||||
QAudioFormat _inputFormat;
|
QAudioFormat _inputFormat;
|
||||||
QIODevice* _inputDevice;
|
QIODevice* _inputDevice{ nullptr };
|
||||||
int _numInputCallbackBytes;
|
int _numInputCallbackBytes{ 0 };
|
||||||
QAudioOutput* _audioOutput;
|
QAudioOutput* _audioOutput{ nullptr };
|
||||||
std::atomic<bool> _audioOutputInitialized { false };
|
std::atomic<bool> _audioOutputInitialized { false };
|
||||||
QAudioFormat _desiredOutputFormat;
|
QAudioFormat _desiredOutputFormat;
|
||||||
QAudioFormat _outputFormat;
|
QAudioFormat _outputFormat;
|
||||||
int _outputFrameSize;
|
int _outputFrameSize{ 0 };
|
||||||
int _numOutputCallbackBytes;
|
int _numOutputCallbackBytes{ 0 };
|
||||||
QAudioOutput* _loopbackAudioOutput;
|
QAudioOutput* _loopbackAudioOutput{ nullptr };
|
||||||
QIODevice* _loopbackOutputDevice;
|
QIODevice* _loopbackOutputDevice{ nullptr };
|
||||||
AudioRingBuffer _inputRingBuffer;
|
AudioRingBuffer _inputRingBuffer{ 0 };
|
||||||
LocalInjectorsStream _localInjectorsStream;
|
LocalInjectorsStream _localInjectorsStream{ 0 , 1 };
|
||||||
// In order to use _localInjectorsStream as a lock-free pipe,
|
// In order to use _localInjectorsStream as a lock-free pipe,
|
||||||
// use it with a single producer/consumer, and track available samples and injectors
|
// use it with a single producer/consumer, and track available samples and injectors
|
||||||
std::atomic<int> _localSamplesAvailable { 0 };
|
std::atomic<int> _localSamplesAvailable { 0 };
|
||||||
std::atomic<bool> _localInjectorsAvailable { false };
|
std::atomic<bool> _localInjectorsAvailable { false };
|
||||||
MixedProcessedAudioStream _receivedAudioStream;
|
MixedProcessedAudioStream _receivedAudioStream{ RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES };
|
||||||
bool _isStereoInput;
|
bool _isStereoInput{ false };
|
||||||
std::atomic<bool> _enablePeakValues { false };
|
std::atomic<bool> _enablePeakValues { false };
|
||||||
|
|
||||||
quint64 _outputStarveDetectionStartTimeMsec;
|
quint64 _outputStarveDetectionStartTimeMsec{ 0 };
|
||||||
int _outputStarveDetectionCount;
|
int _outputStarveDetectionCount { 0 };
|
||||||
|
|
||||||
Setting::Handle<int> _outputBufferSizeFrames;
|
Setting::Handle<int> _outputBufferSizeFrames{"audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES};
|
||||||
int _sessionOutputBufferSizeFrames;
|
int _sessionOutputBufferSizeFrames{ _outputBufferSizeFrames.get() };
|
||||||
Setting::Handle<bool> _outputStarveDetectionEnabled;
|
Setting::Handle<bool> _outputStarveDetectionEnabled{ "audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED};
|
||||||
|
|
||||||
StDev _stdev;
|
StDev _stdev;
|
||||||
QElapsedTimer _timeSinceLastReceived;
|
QElapsedTimer _timeSinceLastReceived;
|
||||||
float _lastRawInputLoudness; // before mute/gate
|
float _lastRawInputLoudness{ 0.0f }; // before mute/gate
|
||||||
float _lastSmoothedRawInputLoudness;
|
float _lastSmoothedRawInputLoudness{ 0.0f };
|
||||||
float _lastInputLoudness; // after mute/gate
|
float _lastInputLoudness{ 0.0f }; // after mute/gate
|
||||||
float _timeSinceLastClip;
|
float _timeSinceLastClip{ -1.0f };
|
||||||
int _totalInputAudioSamples;
|
int _totalInputAudioSamples;
|
||||||
|
|
||||||
bool _muted;
|
bool _muted{ false };
|
||||||
bool _shouldEchoLocally;
|
bool _shouldEchoLocally{ false };
|
||||||
bool _shouldEchoToServer;
|
bool _shouldEchoToServer{ false };
|
||||||
bool _isNoiseGateEnabled;
|
bool _isNoiseGateEnabled{ true };
|
||||||
bool _warnWhenMuted;
|
bool _warnWhenMuted;
|
||||||
bool _isAECEnabled;
|
bool _isAECEnabled{ true };
|
||||||
|
|
||||||
bool _reverb;
|
bool _reverb{ false };
|
||||||
AudioEffectOptions _scriptReverbOptions;
|
AudioEffectOptions _scriptReverbOptions;
|
||||||
AudioEffectOptions _zoneReverbOptions;
|
AudioEffectOptions _zoneReverbOptions;
|
||||||
AudioEffectOptions* _reverbOptions;
|
AudioEffectOptions* _reverbOptions{ &_scriptReverbOptions };
|
||||||
AudioReverb _sourceReverb { AudioConstants::SAMPLE_RATE };
|
AudioReverb _sourceReverb { AudioConstants::SAMPLE_RATE };
|
||||||
AudioReverb _listenerReverb { AudioConstants::SAMPLE_RATE };
|
AudioReverb _listenerReverb { AudioConstants::SAMPLE_RATE };
|
||||||
AudioReverb _localReverb { AudioConstants::SAMPLE_RATE };
|
AudioReverb _localReverb { AudioConstants::SAMPLE_RATE };
|
||||||
|
|
||||||
// possible streams needed for resample
|
// possible streams needed for resample
|
||||||
AudioSRC* _inputToNetworkResampler;
|
AudioSRC* _inputToNetworkResampler{ nullptr };
|
||||||
AudioSRC* _networkToOutputResampler;
|
AudioSRC* _networkToOutputResampler{ nullptr };
|
||||||
AudioSRC* _localToOutputResampler;
|
AudioSRC* _localToOutputResampler{ nullptr };
|
||||||
AudioSRC* _loopbackResampler;
|
AudioSRC* _loopbackResampler{ nullptr };
|
||||||
|
|
||||||
// for network audio (used by network audio thread)
|
// for network audio (used by network audio thread)
|
||||||
int16_t _networkScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
|
int16_t _networkScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
|
||||||
|
@ -416,7 +428,7 @@ private:
|
||||||
int16_t _localScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
|
int16_t _localScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
|
||||||
float* _localOutputMixBuffer { NULL };
|
float* _localOutputMixBuffer { NULL };
|
||||||
Mutex _localAudioMutex;
|
Mutex _localAudioMutex;
|
||||||
AudioLimiter _audioLimiter;
|
AudioLimiter _audioLimiter{ AudioConstants::SAMPLE_RATE, OUTPUT_CHANNEL_COUNT };
|
||||||
|
|
||||||
// Adds Reverb
|
// Adds Reverb
|
||||||
void configureReverb();
|
void configureReverb();
|
||||||
|
@ -445,17 +457,17 @@ private:
|
||||||
int calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const;
|
int calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const;
|
||||||
int calculateNumberOfFrameSamples(int numBytes) const;
|
int calculateNumberOfFrameSamples(int numBytes) const;
|
||||||
|
|
||||||
quint16 _outgoingAvatarAudioSequenceNumber;
|
quint16 _outgoingAvatarAudioSequenceNumber{ 0 };
|
||||||
|
|
||||||
AudioOutputIODevice _audioOutputIODevice;
|
AudioOutputIODevice _audioOutputIODevice{ _localInjectorsStream, _receivedAudioStream, this };
|
||||||
|
|
||||||
AudioIOStats _stats;
|
AudioIOStats _stats{ &_receivedAudioStream };
|
||||||
|
|
||||||
AudioGate* _audioGate { nullptr };
|
AudioGate* _audioGate { nullptr };
|
||||||
bool _audioGateOpen { true };
|
bool _audioGateOpen { true };
|
||||||
|
|
||||||
AudioPositionGetter _positionGetter;
|
AudioPositionGetter _positionGetter{ DEFAULT_POSITION_GETTER };
|
||||||
AudioOrientationGetter _orientationGetter;
|
AudioOrientationGetter _orientationGetter{ DEFAULT_ORIENTATION_GETTER };
|
||||||
|
|
||||||
glm::vec3 avatarBoundingBoxCorner;
|
glm::vec3 avatarBoundingBoxCorner;
|
||||||
glm::vec3 avatarBoundingBoxScale;
|
glm::vec3 avatarBoundingBoxScale;
|
||||||
|
|
|
@ -21,8 +21,10 @@ const uint32_t MAX_RANGE_QUERY_DEPTH = 1;
|
||||||
static bool timeElapsed = true;
|
static bool timeElapsed = true;
|
||||||
#else
|
#else
|
||||||
const uint32_t MAX_RANGE_QUERY_DEPTH = 10000;
|
const uint32_t MAX_RANGE_QUERY_DEPTH = 10000;
|
||||||
|
#if !defined(USE_GLES)
|
||||||
static bool timeElapsed = false;
|
static bool timeElapsed = false;
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(USE_GLES)
|
#if defined(USE_GLES)
|
||||||
static bool hasTimerExtension() {
|
static bool hasTimerExtension() {
|
||||||
|
|
|
@ -17,34 +17,6 @@
|
||||||
|
|
||||||
namespace gpu { namespace gles {
|
namespace gpu { namespace gles {
|
||||||
|
|
||||||
|
|
||||||
// returns the FOV from the projection matrix
|
|
||||||
static inline vec4 extractFov( const glm::mat4& m) {
|
|
||||||
static const std::array<vec4, 4> CLIPS{ {
|
|
||||||
{ 1, 0, 0, 1 },
|
|
||||||
{ -1, 0, 0, 1 },
|
|
||||||
{ 0, 1, 0, 1 },
|
|
||||||
{ 0, -1, 0, 1 }
|
|
||||||
} };
|
|
||||||
|
|
||||||
glm::mat4 mt = glm::transpose(m);
|
|
||||||
vec4 v, result;
|
|
||||||
// Left
|
|
||||||
v = mt * CLIPS[0];
|
|
||||||
result.x = -atanf(v.z / v.x);
|
|
||||||
// Right
|
|
||||||
v = mt * CLIPS[1];
|
|
||||||
result.y = atanf(v.z / v.x);
|
|
||||||
// Down
|
|
||||||
v = mt * CLIPS[2];
|
|
||||||
result.z = -atanf(v.z / v.y);
|
|
||||||
// Up
|
|
||||||
v = mt * CLIPS[3];
|
|
||||||
result.w = atanf(v.z / v.y);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class GLESFramebuffer : public gl::GLFramebuffer {
|
class GLESFramebuffer : public gl::GLFramebuffer {
|
||||||
using Parent = gl::GLFramebuffer;
|
using Parent = gl::GLFramebuffer;
|
||||||
static GLuint allocate() {
|
static GLuint allocate() {
|
||||||
|
|
|
@ -35,7 +35,7 @@ namespace shader {
|
||||||
const Dialect DEFAULT_DIALECT = Dialect::glsl310es;
|
const Dialect DEFAULT_DIALECT = Dialect::glsl310es;
|
||||||
|
|
||||||
const std::vector<Dialect>& allDialects() {
|
const std::vector<Dialect>& allDialects() {
|
||||||
static const std::vector<Dialect> ALL_DIALECTS{ { Dialect::glsl310es } };
|
static const std::vector<Dialect> ALL_DIALECTS{ Dialect::glsl310es };
|
||||||
return ALL_DIALECTS;
|
return ALL_DIALECTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,8 @@
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
#include <glm/glm.hpp>
|
#include <glm/glm.hpp>
|
||||||
#include <glm/gtc/quaternion.hpp>
|
#include <glm/gtc/quaternion.hpp>
|
||||||
#include <glm/gtx/quaternion.hpp>
|
#include <glm/gtx/quaternion.hpp>
|
||||||
|
@ -364,4 +366,30 @@ inline int fastLrintf(float x) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// returns the FOV from the projection matrix
|
||||||
|
inline glm::vec4 extractFov( const glm::mat4& m) {
|
||||||
|
static const std::array<glm::vec4, 4> CLIPS{ {
|
||||||
|
{ 1, 0, 0, 1 },
|
||||||
|
{ -1, 0, 0, 1 },
|
||||||
|
{ 0, 1, 0, 1 },
|
||||||
|
{ 0, -1, 0, 1 }
|
||||||
|
} };
|
||||||
|
|
||||||
|
glm::mat4 mt = glm::transpose(m);
|
||||||
|
glm::vec4 v, result;
|
||||||
|
// Left
|
||||||
|
v = mt * CLIPS[0];
|
||||||
|
result.x = -atanf(v.z / v.x);
|
||||||
|
// Right
|
||||||
|
v = mt * CLIPS[1];
|
||||||
|
result.y = atanf(v.z / v.x);
|
||||||
|
// Down
|
||||||
|
v = mt * CLIPS[2];
|
||||||
|
result.z = -atanf(v.z / v.y);
|
||||||
|
// Up
|
||||||
|
v = mt * CLIPS[3];
|
||||||
|
result.w = atanf(v.z / v.y);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
#endif // hifi_GLMHelpers_h
|
#endif // hifi_GLMHelpers_h
|
||||||
|
|
Loading…
Reference in a new issue