diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp
index 6a320e53ee..21d44a4145 100644
--- a/interface/src/Application.cpp
+++ b/interface/src/Application.cpp
@@ -8494,6 +8494,8 @@ void Application::toggleLogDialog() {
             Qt::WindowFlags flags = _logDialog->windowFlags() | Qt::Tool;
             _logDialog->setWindowFlags(flags);
         }
+#else
+        Q_UNUSED(keepOnTop)
 #endif
     }
 
diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp
index 5e7a49c015..8f71b1bd33 100644
--- a/libraries/audio-client/src/AudioClient.cpp
+++ b/libraries/audio-client/src/AudioClient.cpp
@@ -60,25 +60,13 @@ const int AudioClient::MIN_BUFFER_FRAMES = 1;
 
 const int AudioClient::MAX_BUFFER_FRAMES = 20;
 
-static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 100;
-
 #if defined(Q_OS_ANDROID)
 static const int CHECK_INPUT_READS_MSECS = 2000;
 static const int MIN_READS_TO_CONSIDER_INPUT_ALIVE = 10;
 #endif
 
-static const auto DEFAULT_POSITION_GETTER = []{ return Vectors::ZERO; };
-static const auto DEFAULT_ORIENTATION_GETTER = [] { return Quaternions::IDENTITY; };
-
-static const int DEFAULT_BUFFER_FRAMES = 1;
-
-// OUTPUT_CHANNEL_COUNT is audio pipeline output format, which is always 2 channel.
-// _outputFormat.channelCount() is device output format, which may be 1 or multichannel.
-static const int OUTPUT_CHANNEL_COUNT = 2;
-
-static const bool DEFAULT_STARVE_DETECTION_ENABLED = true;
-static const int STARVE_DETECTION_THRESHOLD = 3;
-static const int STARVE_DETECTION_PERIOD = 10 * 1000; // 10 Seconds
+const AudioClient::AudioPositionGetter  AudioClient::DEFAULT_POSITION_GETTER = []{ return Vectors::ZERO; };
+const AudioClient::AudioOrientationGetter AudioClient::DEFAULT_ORIENTATION_GETTER = [] { return Quaternions::IDENTITY; };
 
 Setting::Handle<bool> dynamicJitterBufferEnabled("dynamicJitterBuffersEnabled",
     InboundAudioStream::DEFAULT_DYNAMIC_JITTER_BUFFER_ENABLED);
@@ -272,55 +260,7 @@ static inline float convertToFloat(int16_t sample) {
     return (float)sample * (1 / 32768.0f);
 }
 
-AudioClient::AudioClient() :
-    AbstractAudioInterface(),
-    _gate(this),
-    _audioInput(NULL),
-    _dummyAudioInput(NULL),
-    _desiredInputFormat(),
-    _inputFormat(),
-    _numInputCallbackBytes(0),
-    _audioOutput(NULL),
-    _desiredOutputFormat(),
-    _outputFormat(),
-    _outputFrameSize(0),
-    _numOutputCallbackBytes(0),
-    _loopbackAudioOutput(NULL),
-    _loopbackOutputDevice(NULL),
-    _inputRingBuffer(0),
-    _localInjectorsStream(0, 1),
-    _receivedAudioStream(RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES),
-    _isStereoInput(false),
-    _outputStarveDetectionStartTimeMsec(0),
-    _outputStarveDetectionCount(0),
-    _outputBufferSizeFrames("audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES),
-    _sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
-    _outputStarveDetectionEnabled("audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED),
-    _lastRawInputLoudness(0.0f),
-    _lastSmoothedRawInputLoudness(0.0f),
-    _lastInputLoudness(0.0f),
-    _timeSinceLastClip(-1.0f),
-    _muted(false),
-    _shouldEchoLocally(false),
-    _shouldEchoToServer(false),
-    _isNoiseGateEnabled(true),
-    _isAECEnabled(true),
-    _reverb(false),
-    _reverbOptions(&_scriptReverbOptions),
-    _inputToNetworkResampler(NULL),
-    _networkToOutputResampler(NULL),
-    _localToOutputResampler(NULL),
-    _loopbackResampler(NULL),
-    _audioLimiter(AudioConstants::SAMPLE_RATE, OUTPUT_CHANNEL_COUNT),
-    _outgoingAvatarAudioSequenceNumber(0),
-    _audioOutputIODevice(_localInjectorsStream, _receivedAudioStream, this),
-    _stats(&_receivedAudioStream),
-    _positionGetter(DEFAULT_POSITION_GETTER),
-#if defined(Q_OS_ANDROID)
-    _checkInputTimer(this),
-    _isHeadsetPluggedIn(false),
-#endif
-    _orientationGetter(DEFAULT_ORIENTATION_GETTER) {
+AudioClient::AudioClient() {
 
     // avoid putting a lock in the device callback
     assert(_localSamplesAvailable.is_lock_free());
diff --git a/libraries/audio-client/src/AudioClient.h b/libraries/audio-client/src/AudioClient.h
index a13943f22e..3adb1a18bd 100644
--- a/libraries/audio-client/src/AudioClient.h
+++ b/libraries/audio-client/src/AudioClient.h
@@ -292,6 +292,18 @@ protected:
     virtual void customDeleter() override;
 
 private:
+    static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES{ 100 };
+    // OUTPUT_CHANNEL_COUNT is audio pipeline output format, which is always 2 channel.
+    // _outputFormat.channelCount() is device output format, which may be 1 or multichannel.
+    static const int OUTPUT_CHANNEL_COUNT{ 2 };
+    static const bool DEFAULT_STARVE_DETECTION_ENABLED{ true };
+    static const int STARVE_DETECTION_THRESHOLD{ 3 };
+    static const int STARVE_DETECTION_PERIOD{ 10 * 1000 }; // 10 Seconds
+
+    static const AudioPositionGetter DEFAULT_POSITION_GETTER;
+    static const AudioOrientationGetter DEFAULT_ORIENTATION_GETTER;
+    static const int DEFAULT_BUFFER_FRAMES{ 1 };
+
     friend class CheckDevicesThread;
     friend class LocalInjectorsThread;
 
@@ -307,9 +319,9 @@ private:
     float gainForSource(float distance, float volume);
 
 #ifdef Q_OS_ANDROID
-    QTimer _checkInputTimer;
+    QTimer _checkInputTimer{ this };
     long _inputReadsSinceLastCheck = 0l;
-    bool _isHeadsetPluggedIn;
+    bool _isHeadsetPluggedIn { false };
 #endif
 
     class Gate {
@@ -336,68 +348,68 @@ private:
         bool _isSimulatingJitter{ false };
     };
 
-    Gate _gate;
+    Gate _gate{ this };
 
     Mutex _injectorsMutex;
-    QAudioInput* _audioInput;
-    QTimer* _dummyAudioInput;
+    QAudioInput* _audioInput{ nullptr };
+    QTimer* _dummyAudioInput{ nullptr };
     QAudioFormat _desiredInputFormat;
     QAudioFormat _inputFormat;
-    QIODevice* _inputDevice;
-    int _numInputCallbackBytes;
-    QAudioOutput* _audioOutput;
+    QIODevice* _inputDevice{ nullptr };
+    int _numInputCallbackBytes{ 0 };
+    QAudioOutput* _audioOutput{ nullptr };
     std::atomic<bool> _audioOutputInitialized { false };
     QAudioFormat _desiredOutputFormat;
     QAudioFormat _outputFormat;
-    int _outputFrameSize;
-    int _numOutputCallbackBytes;
-    QAudioOutput* _loopbackAudioOutput;
-    QIODevice* _loopbackOutputDevice;
-    AudioRingBuffer _inputRingBuffer;
-    LocalInjectorsStream _localInjectorsStream;
+    int _outputFrameSize{ 0 };
+    int _numOutputCallbackBytes{ 0 };
+    QAudioOutput* _loopbackAudioOutput{ nullptr };
+    QIODevice* _loopbackOutputDevice{ nullptr };
+    AudioRingBuffer _inputRingBuffer{ 0 };
+    LocalInjectorsStream _localInjectorsStream{ 0 , 1 };
     // In order to use _localInjectorsStream as a lock-free pipe,
     // use it with a single producer/consumer, and track available samples and injectors
     std::atomic<int> _localSamplesAvailable { 0 };
     std::atomic<bool> _localInjectorsAvailable { false };
-    MixedProcessedAudioStream _receivedAudioStream;
-    bool _isStereoInput;
+    MixedProcessedAudioStream _receivedAudioStream{ RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES };
+    bool _isStereoInput{ false };
     std::atomic<bool> _enablePeakValues { false };
 
-    quint64 _outputStarveDetectionStartTimeMsec;
-    int _outputStarveDetectionCount;
+    quint64 _outputStarveDetectionStartTimeMsec{ 0 };
+    int _outputStarveDetectionCount { 0 };
 
-    Setting::Handle<int> _outputBufferSizeFrames;
-    int _sessionOutputBufferSizeFrames;
-    Setting::Handle<bool> _outputStarveDetectionEnabled;
+    Setting::Handle<int> _outputBufferSizeFrames{"audioOutputBufferFrames", DEFAULT_BUFFER_FRAMES};
+    int _sessionOutputBufferSizeFrames{ _outputBufferSizeFrames.get() };
+    Setting::Handle<bool> _outputStarveDetectionEnabled{ "audioOutputStarveDetectionEnabled", DEFAULT_STARVE_DETECTION_ENABLED};
 
     StDev _stdev;
     QElapsedTimer _timeSinceLastReceived;
-    float _lastRawInputLoudness;    // before mute/gate
-    float _lastSmoothedRawInputLoudness;
-    float _lastInputLoudness;       // after mute/gate
-    float _timeSinceLastClip;
+    float _lastRawInputLoudness{ 0.0f };    // before mute/gate
+    float _lastSmoothedRawInputLoudness{ 0.0f };
+    float _lastInputLoudness{ 0.0f };       // after mute/gate
+    float _timeSinceLastClip{ -1.0f };
     int _totalInputAudioSamples;
 
-    bool _muted;
-    bool _shouldEchoLocally;
-    bool _shouldEchoToServer;
-    bool _isNoiseGateEnabled;
+    bool _muted{ false };
+    bool _shouldEchoLocally{ false };
+    bool _shouldEchoToServer{ false };
+    bool _isNoiseGateEnabled{ true };
     bool _warnWhenMuted;
-    bool _isAECEnabled;
+    bool _isAECEnabled{ true };
 
-    bool _reverb;
+    bool _reverb{ false };
     AudioEffectOptions _scriptReverbOptions;
     AudioEffectOptions _zoneReverbOptions;
-    AudioEffectOptions* _reverbOptions;
+    AudioEffectOptions* _reverbOptions{ &_scriptReverbOptions };
     AudioReverb _sourceReverb { AudioConstants::SAMPLE_RATE };
     AudioReverb _listenerReverb { AudioConstants::SAMPLE_RATE };
     AudioReverb _localReverb { AudioConstants::SAMPLE_RATE };
 
     // possible streams needed for resample
-    AudioSRC* _inputToNetworkResampler;
-    AudioSRC* _networkToOutputResampler;
-    AudioSRC* _localToOutputResampler;
-    AudioSRC* _loopbackResampler;
+    AudioSRC* _inputToNetworkResampler{ nullptr };
+    AudioSRC* _networkToOutputResampler{ nullptr };
+    AudioSRC* _localToOutputResampler{ nullptr };
+    AudioSRC* _loopbackResampler{ nullptr };
 
     // for network audio (used by network audio thread)
     int16_t _networkScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
@@ -416,7 +428,7 @@ private:
     int16_t _localScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
     float* _localOutputMixBuffer { NULL };
     Mutex _localAudioMutex;
-    AudioLimiter _audioLimiter;
+    AudioLimiter _audioLimiter{ AudioConstants::SAMPLE_RATE, OUTPUT_CHANNEL_COUNT };
 
     // Adds Reverb
     void configureReverb();
@@ -445,17 +457,17 @@ private:
     int calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const;
     int calculateNumberOfFrameSamples(int numBytes) const;
 
-    quint16 _outgoingAvatarAudioSequenceNumber;
+    quint16 _outgoingAvatarAudioSequenceNumber{ 0 };
 
-    AudioOutputIODevice _audioOutputIODevice;
+    AudioOutputIODevice _audioOutputIODevice{ _localInjectorsStream, _receivedAudioStream, this };
 
-    AudioIOStats _stats;
+    AudioIOStats _stats{ &_receivedAudioStream };
 
     AudioGate* _audioGate { nullptr };
     bool _audioGateOpen { true };
 
-    AudioPositionGetter _positionGetter;
-    AudioOrientationGetter _orientationGetter;
+    AudioPositionGetter _positionGetter{ DEFAULT_POSITION_GETTER };
+    AudioOrientationGetter _orientationGetter{ DEFAULT_ORIENTATION_GETTER };
 
     glm::vec3 avatarBoundingBoxCorner;
     glm::vec3 avatarBoundingBoxScale;
diff --git a/libraries/gpu-gl-common/src/gpu/gl/GLBackendQuery.cpp b/libraries/gpu-gl-common/src/gpu/gl/GLBackendQuery.cpp
index 7f61ca78f6..1cff06d919 100644
--- a/libraries/gpu-gl-common/src/gpu/gl/GLBackendQuery.cpp
+++ b/libraries/gpu-gl-common/src/gpu/gl/GLBackendQuery.cpp
@@ -21,8 +21,10 @@ const uint32_t MAX_RANGE_QUERY_DEPTH = 1;
 static bool timeElapsed = true;
 #else
 const uint32_t MAX_RANGE_QUERY_DEPTH = 10000;
+#if !defined(USE_GLES)
 static bool timeElapsed = false;
 #endif
+#endif
 
 #if defined(USE_GLES)
 static bool hasTimerExtension() {
diff --git a/libraries/gpu-gles/src/gpu/gles/GLESBackendOutput.cpp b/libraries/gpu-gles/src/gpu/gles/GLESBackendOutput.cpp
index 36b37083cb..99bcb34547 100644
--- a/libraries/gpu-gles/src/gpu/gles/GLESBackendOutput.cpp
+++ b/libraries/gpu-gles/src/gpu/gles/GLESBackendOutput.cpp
@@ -17,34 +17,6 @@
 
 namespace gpu { namespace gles { 
 
-
-// returns the FOV from the projection matrix
-static inline vec4 extractFov( const glm::mat4& m) {
-    static const std::array<vec4, 4> CLIPS{ {
-                                                { 1, 0, 0, 1 },
-                                                { -1, 0, 0, 1 },
-                                                { 0, 1, 0, 1 },
-                                                { 0, -1, 0, 1 }
-                                            } };
-
-    glm::mat4 mt = glm::transpose(m);
-    vec4 v, result;
-    // Left
-    v = mt * CLIPS[0];
-    result.x = -atanf(v.z / v.x);
-    // Right
-    v = mt * CLIPS[1];
-    result.y = atanf(v.z / v.x);
-    // Down
-    v = mt * CLIPS[2];
-    result.z = -atanf(v.z / v.y);
-    // Up
-    v = mt * CLIPS[3];
-    result.w = atanf(v.z / v.y);
-    return result;
-}
-
-
 class GLESFramebuffer : public gl::GLFramebuffer {
     using Parent = gl::GLFramebuffer;
     static GLuint allocate() {
diff --git a/libraries/shaders/src/shaders/Shaders.cpp b/libraries/shaders/src/shaders/Shaders.cpp
index ef67842f84..9964b1a1ed 100644
--- a/libraries/shaders/src/shaders/Shaders.cpp
+++ b/libraries/shaders/src/shaders/Shaders.cpp
@@ -35,7 +35,7 @@ namespace shader {
 const Dialect DEFAULT_DIALECT = Dialect::glsl310es;
 
 const std::vector<Dialect>& allDialects() {
-    static const std::vector<Dialect> ALL_DIALECTS{ { Dialect::glsl310es } };
+    static const std::vector<Dialect> ALL_DIALECTS{ Dialect::glsl310es };
     return ALL_DIALECTS;
 }
     
diff --git a/libraries/shared/src/GLMHelpers.h b/libraries/shared/src/GLMHelpers.h
index 6deae695cd..cfb4bb6398 100644
--- a/libraries/shared/src/GLMHelpers.h
+++ b/libraries/shared/src/GLMHelpers.h
@@ -14,6 +14,8 @@
 
 #include <stdint.h>
 
+#include <array>
+
 #include <glm/glm.hpp>
 #include <glm/gtc/quaternion.hpp>
 #include <glm/gtx/quaternion.hpp>
@@ -364,4 +366,30 @@ inline int fastLrintf(float x) {
 #endif
 }
 
+// returns the FOV from the projection matrix
+inline glm::vec4 extractFov( const glm::mat4& m) {
+    static const std::array<glm::vec4, 4> CLIPS{ {
+                                                { 1, 0, 0, 1 },
+                                                { -1, 0, 0, 1 },
+                                                { 0, 1, 0, 1 },
+                                                { 0, -1, 0, 1 }
+                                            } };
+
+    glm::mat4 mt = glm::transpose(m);
+    glm::vec4 v, result;
+    // Left
+    v = mt * CLIPS[0];
+    result.x = -atanf(v.z / v.x);
+    // Right
+    v = mt * CLIPS[1];
+    result.y = atanf(v.z / v.x);
+    // Down
+    v = mt * CLIPS[2];
+    result.z = -atanf(v.z / v.y);
+    // Up
+    v = mt * CLIPS[3];
+    result.w = atanf(v.z / v.y);
+    return result;
+}
+
 #endif // hifi_GLMHelpers_h