mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-08 18:02:13 +02:00
commit
6ce1049c32
17 changed files with 551 additions and 13 deletions
|
@ -428,8 +428,8 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
|
|||
}
|
||||
|
||||
int AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* listenerNodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_preMixSamples, 0, sizeof(_preMixSamples));
|
||||
|
@ -730,6 +730,33 @@ void AudioMixer::run() {
|
|||
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||
dataAt += sizeof(quint16);
|
||||
|
||||
// Pack stream properties
|
||||
bool inAZone = false;
|
||||
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
|
||||
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
|
||||
if (_audioZones[_zoneReverbSettings[i].zone].contains(streamPosition)) {
|
||||
bool hasReverb = true;
|
||||
float reverbTime = _zoneReverbSettings[i].reverbTime;
|
||||
float wetLevel = _zoneReverbSettings[i].wetLevel;
|
||||
|
||||
memcpy(dataAt, &hasReverb, sizeof(bool));
|
||||
dataAt += sizeof(bool);
|
||||
memcpy(dataAt, &reverbTime, sizeof(float));
|
||||
dataAt += sizeof(float);
|
||||
memcpy(dataAt, &wetLevel, sizeof(float));
|
||||
dataAt += sizeof(float);
|
||||
|
||||
inAZone = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!inAZone) {
|
||||
bool hasReverb = false;
|
||||
memcpy(dataAt, &hasReverb, sizeof(bool));
|
||||
dataAt += sizeof(bool);
|
||||
}
|
||||
|
||||
// pack mixed audio samples
|
||||
memcpy(dataAt, _mixSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||
|
@ -1033,6 +1060,38 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
const QString REVERB = "reverb";
|
||||
if (audioEnvGroupObject[REVERB].isArray()) {
|
||||
const QJsonArray& reverb = audioEnvGroupObject[REVERB].toArray();
|
||||
|
||||
const QString ZONE = "zone";
|
||||
const QString REVERB_TIME = "reverb_time";
|
||||
const QString WET_LEVEL = "wet_level";
|
||||
for (int i = 0; i < reverb.count(); ++i) {
|
||||
QJsonObject reverbObject = reverb[i].toObject();
|
||||
|
||||
if (reverbObject.contains(ZONE) &&
|
||||
reverbObject.contains(REVERB_TIME) &&
|
||||
reverbObject.contains(WET_LEVEL)) {
|
||||
|
||||
bool okReverbTime, okWetLevel;
|
||||
QString zone = reverbObject.value(ZONE).toString();
|
||||
float reverbTime = reverbObject.value(REVERB_TIME).toString().toFloat(&okReverbTime);
|
||||
float wetLevel = reverbObject.value(WET_LEVEL).toString().toFloat(&okWetLevel);
|
||||
|
||||
if (okReverbTime && okWetLevel && _audioZones.contains(zone)) {
|
||||
ReverbSettings settings;
|
||||
settings.zone = zone;
|
||||
settings.reverbTime = reverbTime;
|
||||
settings.wetLevel = wetLevel;
|
||||
|
||||
_zoneReverbSettings.push_back(settings);
|
||||
qDebug() << "Added Reverb:" << zone << reverbTime << wetLevel;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -82,7 +82,13 @@ private:
|
|||
float coefficient;
|
||||
};
|
||||
QVector<ZonesSettings> _zonesSettings;
|
||||
|
||||
struct ReverbSettings {
|
||||
QString zone;
|
||||
float reverbTime;
|
||||
float wetLevel;
|
||||
};
|
||||
QVector<ReverbSettings> _zoneReverbSettings;
|
||||
|
||||
static InboundAudioStream::Settings _streamSettings;
|
||||
|
||||
static bool _printStreamStats;
|
||||
|
|
39
cmake/modules/FindGverb.cmake
Normal file
39
cmake/modules/FindGverb.cmake
Normal file
|
@ -0,0 +1,39 @@
|
|||
# FindGVerb.cmake
|
||||
#
|
||||
# Try to find the Gverb library.
|
||||
#
|
||||
# You must provide a GVERB_ROOT_DIR which contains src and include directories
|
||||
#
|
||||
# Once done this will define
|
||||
#
|
||||
# GVERB_FOUND - system found Gverb
|
||||
# GVERB_INCLUDE_DIRS - the Gverb include directory
|
||||
#
|
||||
# Copyright 2014 High Fidelity, Inc.
|
||||
#
|
||||
# Distributed under the Apache License, Version 2.0.
|
||||
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
#
|
||||
|
||||
if (GVERB_INCLUDE_DIRS)
|
||||
# in cache already
|
||||
set(GVERB_FOUND TRUE)
|
||||
else (GVERB_INCLUDE_DIRS)
|
||||
|
||||
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
|
||||
hifi_library_search_hints("gverb")
|
||||
|
||||
find_path(GVERB_INCLUDE_DIRS gverb.h PATH_SUFFIXES include HINTS ${GVERB_SEARCH_DIRS})
|
||||
find_path(GVERB_SRC_DIRS gverb.c PATH_SUFFIXES src HINTS ${GVERB_SEARCH_DIRS})
|
||||
|
||||
if (GVERB_INCLUDE_DIRS)
|
||||
set(GVERB_FOUND TRUE)
|
||||
endif (GVERB_INCLUDE_DIRS)
|
||||
|
||||
if (GVERB_FOUND)
|
||||
message(STATUS "Found Gverb: ${GVERB_INCLUDE_DIRS}")
|
||||
else (GVERB_FOUND)
|
||||
message(FATAL_ERROR "Could NOT find Gverb. Read ./interface/externals/gverb/readme.txt")
|
||||
endif (GVERB_FOUND)
|
||||
|
||||
endif(GVERB_INCLUDE_DIRS)
|
|
@ -154,6 +154,33 @@
|
|||
"placeholder": "0.18"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "reverb",
|
||||
"type": "table",
|
||||
"label": "Reverb Settings",
|
||||
"help": "In this table you can set custom reverb values for each audio zones",
|
||||
"numbered": true,
|
||||
"columns": [
|
||||
{
|
||||
"name": "zone",
|
||||
"label": "Zone",
|
||||
"can_set": true,
|
||||
"placeholder": "Audio_Zone"
|
||||
},
|
||||
{
|
||||
"name": "reverb_time",
|
||||
"label": "Reverb Decay Time",
|
||||
"can_set": true,
|
||||
"placeholder": "(in sec)"
|
||||
},
|
||||
{
|
||||
"name": "wet_level",
|
||||
"label": "Wet Level",
|
||||
"can_set": true,
|
||||
"placeholder": "(in db)"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
39
examples/audioReverbOn.js
Normal file
39
examples/audioReverbOn.js
Normal file
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// audioReverbOn.js
|
||||
// examples
|
||||
//
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
||||
// http://wiki.audacityteam.org/wiki/GVerb#Instant_reverb_settings
|
||||
var audioOptions = new AudioEffectOptions({
|
||||
// Square Meters
|
||||
maxRoomSize: 50,
|
||||
roomSize: 50,
|
||||
|
||||
// Seconds
|
||||
reverbTime: 4,
|
||||
|
||||
// Between 0 - 1
|
||||
damping: 0.50,
|
||||
inputBandwidth: 0.75,
|
||||
|
||||
// dB
|
||||
earlyLevel: -22,
|
||||
tailLevel: -28,
|
||||
dryLevel: 0,
|
||||
wetLevel: 6
|
||||
});
|
||||
|
||||
AudioDevice.setReverbOptions(audioOptions);
|
||||
AudioDevice.setReverb(true);
|
||||
print("Reverb is now on with the updated options.");
|
||||
|
||||
function scriptEnding() {
|
||||
AudioDevice.setReverb(false);
|
||||
print("Reberb is now off.");
|
||||
}
|
||||
|
||||
Script.scriptEnding.connect(scriptEnding);
|
|
@ -2,7 +2,7 @@ set(TARGET_NAME interface)
|
|||
project(${TARGET_NAME})
|
||||
|
||||
# set a default root dir for each of our optional externals if it was not passed
|
||||
set(OPTIONAL_EXTERNALS "Faceshift" "LibOVR" "PrioVR" "Sixense" "Visage" "LeapMotion" "RtMidi" "Qxmpp" "SDL2")
|
||||
set(OPTIONAL_EXTERNALS "Faceshift" "LibOVR" "PrioVR" "Sixense" "Visage" "LeapMotion" "RtMidi" "Qxmpp" "SDL2" "Gverb")
|
||||
foreach(EXTERNAL ${OPTIONAL_EXTERNALS})
|
||||
string(TOUPPER ${EXTERNAL} ${EXTERNAL}_UPPERCASE)
|
||||
if (NOT ${${EXTERNAL}_UPPERCASE}_ROOT_DIR)
|
||||
|
@ -14,6 +14,10 @@ endforeach()
|
|||
find_package(Qt5LinguistTools REQUIRED)
|
||||
find_package(Qt5LinguistToolsMacros)
|
||||
|
||||
|
||||
# As Gverb is currently the only reverb library, it's required.
|
||||
find_package(Gverb REQUIRED)
|
||||
|
||||
if (DEFINED ENV{JOB_ID})
|
||||
set(BUILD_SEQ $ENV{JOB_ID})
|
||||
else ()
|
||||
|
@ -166,6 +170,13 @@ if (QXMPP_FOUND AND NOT DISABLE_QXMPP AND WIN32)
|
|||
add_definitions(-DQXMPP_STATIC)
|
||||
endif ()
|
||||
|
||||
if (GVERB_FOUND)
|
||||
file(GLOB GVERB_SRCS ${GVERB_SRC_DIRS}/*.c)
|
||||
include_directories(${GVERB_INCLUDE_DIRS})
|
||||
add_library(gverb STATIC ${GVERB_SRCS})
|
||||
target_link_libraries(${TARGET_NAME} gverb)
|
||||
endif (GVERB_FOUND)
|
||||
|
||||
# include headers for interface and InterfaceConfig.
|
||||
include_directories("${PROJECT_SOURCE_DIR}/src" "${PROJECT_BINARY_DIR}/includes")
|
||||
|
||||
|
|
15
interface/external/gverb/readme.txt
vendored
Normal file
15
interface/external/gverb/readme.txt
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
|
||||
Instructions for adding the Gverb library to Interface
|
||||
(This is a required library)
|
||||
Clément Brisset, Octobre 22nd, 2014
|
||||
|
||||
1. Go to https://github.com/highfidelity/gverb
|
||||
Or download the sources directly via this link:
|
||||
https://github.com/highfidelity/gverb/archive/master.zip
|
||||
|
||||
2. Extract the archive
|
||||
|
||||
3. Place the directories “include” and “src” in interface/external/gverb
|
||||
(Normally next to this readme)
|
||||
|
||||
4. Clear your build directory, run cmake, build and you should be all set.
|
|
@ -92,6 +92,8 @@ Audio::Audio(QObject* parent) :
|
|||
_collisionSoundDuration(0.0f),
|
||||
_proceduralEffectSample(0),
|
||||
_muted(false),
|
||||
_reverb(false),
|
||||
_reverbOptions(&_scriptReverbOptions),
|
||||
_processSpatialAudio(false),
|
||||
_spatialAudioStart(0),
|
||||
_spatialAudioFinish(0),
|
||||
|
@ -123,11 +125,14 @@ Audio::Audio(QObject* parent) :
|
|||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||
// Create the noise sample array
|
||||
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
|
||||
|
||||
|
||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedSilence, this, &Audio::addStereoSilenceToScope, Qt::DirectConnection);
|
||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade, this, &Audio::addLastFrameRepeatedWithFadeToScope, Qt::DirectConnection);
|
||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedStereoSamples, this, &Audio::addStereoSamplesToScope, Qt::DirectConnection);
|
||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedSamples, Qt::DirectConnection);
|
||||
|
||||
// Initialize GVerb
|
||||
initGverb();
|
||||
}
|
||||
|
||||
void Audio::init(QGLWidget *parent) {
|
||||
|
@ -489,6 +494,69 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
|
|||
return switchOutputToAudioDevice(getNamedAudioDeviceForMode(QAudio::AudioOutput, outputDeviceName));
|
||||
}
|
||||
|
||||
void Audio::initGverb() {
|
||||
// Initialize a new gverb instance
|
||||
_gverb = gverb_new(_outputFormat.sampleRate(), _reverbOptions->getMaxRoomSize(), _reverbOptions->getRoomSize(),
|
||||
_reverbOptions->getReverbTime(), _reverbOptions->getDamping(), _reverbOptions->getSpread(),
|
||||
_reverbOptions->getInputBandwidth(), _reverbOptions->getEarlyLevel(),
|
||||
_reverbOptions->getTailLevel());
|
||||
|
||||
// Configure the instance (these functions are not super well named - they actually set several internal variables)
|
||||
gverb_set_roomsize(_gverb, _reverbOptions->getRoomSize());
|
||||
gverb_set_revtime(_gverb, _reverbOptions->getReverbTime());
|
||||
gverb_set_damping(_gverb, _reverbOptions->getDamping());
|
||||
gverb_set_inputbandwidth(_gverb, _reverbOptions->getInputBandwidth());
|
||||
gverb_set_earlylevel(_gverb, DB_CO(_reverbOptions->getEarlyLevel()));
|
||||
gverb_set_taillevel(_gverb, DB_CO(_reverbOptions->getTailLevel()));
|
||||
}
|
||||
|
||||
void Audio::setReverbOptions(const AudioEffectOptions* options) {
|
||||
// Save the new options
|
||||
_scriptReverbOptions.setMaxRoomSize(options->getMaxRoomSize());
|
||||
_scriptReverbOptions.setRoomSize(options->getRoomSize());
|
||||
_scriptReverbOptions.setReverbTime(options->getReverbTime());
|
||||
_scriptReverbOptions.setDamping(options->getDamping());
|
||||
_scriptReverbOptions.setSpread(options->getSpread());
|
||||
_scriptReverbOptions.setInputBandwidth(options->getInputBandwidth());
|
||||
_scriptReverbOptions.setEarlyLevel(options->getEarlyLevel());
|
||||
_scriptReverbOptions.setTailLevel(options->getTailLevel());
|
||||
|
||||
_scriptReverbOptions.setDryLevel(options->getDryLevel());
|
||||
_scriptReverbOptions.setWetLevel(options->getWetLevel());
|
||||
|
||||
if (_reverbOptions == &_scriptReverbOptions) {
|
||||
// Apply them to the reverb instance(s)
|
||||
initGverb();
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::addReverb(int16_t* samplesData, int numSamples, QAudioFormat& audioFormat) {
|
||||
float dryFraction = DB_CO(_reverbOptions->getDryLevel());
|
||||
float wetFraction = DB_CO(_reverbOptions->getWetLevel());
|
||||
|
||||
float lValue,rValue;
|
||||
for (int sample = 0; sample < numSamples; sample += audioFormat.channelCount()) {
|
||||
// Run GVerb
|
||||
float value = (float)samplesData[sample];
|
||||
gverb_do(_gverb, value, &lValue, &rValue);
|
||||
|
||||
// Mix, accounting for clipping, the left and right channels. Ignore the rest.
|
||||
for (unsigned int j = sample; j < sample + audioFormat.channelCount(); j++) {
|
||||
if (j == sample) {
|
||||
// left channel
|
||||
int lResult = glm::clamp((int)(samplesData[j] * dryFraction + lValue * wetFraction), -32768, 32767);
|
||||
samplesData[j] = (int16_t)lResult;
|
||||
} else if (j == (sample + 1)) {
|
||||
// right channel
|
||||
int rResult = glm::clamp((int)(samplesData[j] * dryFraction + rValue * wetFraction), -32768, 32767);
|
||||
samplesData[j] = (int16_t)rResult;
|
||||
} else {
|
||||
// ignore channels above 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::handleAudioInput() {
|
||||
static char audioDataPacket[MAX_PACKET_SIZE];
|
||||
|
||||
|
@ -720,7 +788,6 @@ void Audio::handleAudioInput() {
|
|||
NodeList* nodeList = NodeList::getInstance();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
|
||||
|
||||
if (_recorder && _recorder.data()->isRecording()) {
|
||||
_recorder.data()->record(reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes);
|
||||
}
|
||||
|
@ -840,12 +907,10 @@ void Audio::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
|
|||
}
|
||||
|
||||
void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||
|
||||
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
||||
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
||||
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||
|
||||
|
||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||
|
||||
const int16_t* receivedSamples;
|
||||
|
@ -884,10 +949,37 @@ void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& ou
|
|||
numNetworkOutputSamples,
|
||||
numDeviceOutputSamples,
|
||||
_desiredOutputFormat, _outputFormat);
|
||||
|
||||
if(_reverb || _receivedAudioStream.hasReverb()) {
|
||||
bool reverbChanged = false;
|
||||
if (_receivedAudioStream.hasReverb()) {
|
||||
|
||||
if (_zoneReverbOptions.getReverbTime() != _receivedAudioStream.getRevebTime()) {
|
||||
_zoneReverbOptions.setReverbTime(_receivedAudioStream.getRevebTime());
|
||||
reverbChanged = true;
|
||||
}
|
||||
if (_zoneReverbOptions.getWetLevel() != _receivedAudioStream.getWetLevel()) {
|
||||
_zoneReverbOptions.setWetLevel(_receivedAudioStream.getWetLevel());
|
||||
reverbChanged = true;
|
||||
}
|
||||
|
||||
if (_reverbOptions != &_zoneReverbOptions) {
|
||||
_reverbOptions = &_zoneReverbOptions;
|
||||
reverbChanged = true;
|
||||
}
|
||||
} else if (_reverbOptions != &_scriptReverbOptions) {
|
||||
_reverbOptions = &_scriptReverbOptions;
|
||||
reverbChanged = true;
|
||||
}
|
||||
|
||||
if (reverbChanged) {
|
||||
initGverb();
|
||||
}
|
||||
addReverb((int16_t*)outputBuffer.data(), numDeviceOutputSamples, _outputFormat);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||
|
||||
if (_audioOutput) {
|
||||
// Audio output must exist and be correctly set up if we're going to process received audio
|
||||
_receivedAudioStream.parseData(audioByteArray);
|
||||
|
|
|
@ -43,6 +43,14 @@
|
|||
#include <StdDev.h>
|
||||
|
||||
#include "MixedProcessedAudioStream.h"
|
||||
#include "AudioEffectOptions.h"
|
||||
#include <AudioRingBuffer.h>
|
||||
#include <StdDev.h>
|
||||
|
||||
extern "C" {
|
||||
#include <gverb.h>
|
||||
#include <gverbdsp.h>
|
||||
}
|
||||
|
||||
static const int NUM_AUDIO_CHANNELS = 2;
|
||||
|
||||
|
@ -159,6 +167,8 @@ public slots:
|
|||
|
||||
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
||||
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
||||
void setReverb(bool reverb) { _reverb = reverb; }
|
||||
void setReverbOptions(const AudioEffectOptions* options);
|
||||
|
||||
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
||||
|
@ -230,6 +240,11 @@ private:
|
|||
int _proceduralEffectSample;
|
||||
bool _muted;
|
||||
bool _localEcho;
|
||||
bool _reverb;
|
||||
AudioEffectOptions _scriptReverbOptions;
|
||||
AudioEffectOptions _zoneReverbOptions;
|
||||
AudioEffectOptions* _reverbOptions;
|
||||
ty_gverb *_gverb;
|
||||
GLuint _micTextureId;
|
||||
GLuint _muteTextureId;
|
||||
GLuint _boxTextureId;
|
||||
|
@ -249,6 +264,10 @@ private:
|
|||
// 2. Mix with the audio input
|
||||
void processProceduralAudio(int16_t* monoInput, int numSamples);
|
||||
|
||||
// Adds Reverb
|
||||
void initGverb();
|
||||
void addReverb(int16_t* samples, int numSamples, QAudioFormat& format);
|
||||
|
||||
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
||||
void addProceduralSounds(int16_t* monoInput, int numSamples);
|
||||
|
||||
|
|
|
@ -70,3 +70,11 @@ float AudioDeviceScriptingInterface::getInputVolume() {
|
|||
void AudioDeviceScriptingInterface::setInputVolume(float volume) {
|
||||
Application::getInstance()->getAudio()->setInputVolume(volume);
|
||||
}
|
||||
|
||||
void AudioDeviceScriptingInterface::setReverb(bool reverb) {
|
||||
Application::getInstance()->getAudio()->setReverb(reverb);
|
||||
}
|
||||
|
||||
void AudioDeviceScriptingInterface::setReverbOptions(const AudioEffectOptions* options) {
|
||||
Application::getInstance()->getAudio()->setReverbOptions(options);
|
||||
}
|
||||
|
|
|
@ -39,6 +39,8 @@ public slots:
|
|||
|
||||
float getInputVolume();
|
||||
void setInputVolume(float volume);
|
||||
void setReverb(bool reverb);
|
||||
void setReverbOptions(const AudioEffectOptions* options);
|
||||
};
|
||||
|
||||
#endif // hifi_AudioDeviceScriptingInterface_h
|
||||
|
|
88
libraries/audio/src/AudioEffectOptions.cpp
Normal file
88
libraries/audio/src/AudioEffectOptions.cpp
Normal file
|
@ -0,0 +1,88 @@
|
|||
//
|
||||
// AudioEffectOptions.cpp
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "AudioEffectOptions.h"
|
||||
|
||||
static const QString MAX_ROOM_SIZE_HANDLE = "maxRoomSize";
|
||||
static const QString ROOM_SIZE_HANDLE = "roomSize";
|
||||
static const QString REVERB_TIME_HANDLE = "reverbTime";
|
||||
static const QString DAMPIMG_HANDLE = "damping";
|
||||
static const QString SPREAD_HANDLE = "spread";
|
||||
static const QString INPUT_BANDWIDTH_HANDLE = "inputBandwidth";
|
||||
static const QString EARLY_LEVEL_HANDLE = "earlyLevel";
|
||||
static const QString TAIL_LEVEL_HANDLE = "tailLevel";
|
||||
static const QString DRY_LEVEL_HANDLE = "dryLevel";
|
||||
static const QString WET_LEVEL_HANDLE = "wetLevel";
|
||||
|
||||
AudioEffectOptions::AudioEffectOptions(QScriptValue arguments) :
|
||||
_maxRoomSize(50.0f),
|
||||
_roomSize(50.0f),
|
||||
_reverbTime(4.0f),
|
||||
_damping(0.5f),
|
||||
_spread(15.0f),
|
||||
_inputBandwidth(0.75f),
|
||||
_earlyLevel(-22.0f),
|
||||
_tailLevel(-28.0f),
|
||||
_dryLevel(0.0f),
|
||||
_wetLevel(6.0f) {
|
||||
if (arguments.property(MAX_ROOM_SIZE_HANDLE).isNumber()) {
|
||||
_maxRoomSize = arguments.property(MAX_ROOM_SIZE_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(ROOM_SIZE_HANDLE).isNumber()) {
|
||||
_roomSize = arguments.property(ROOM_SIZE_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(REVERB_TIME_HANDLE).isNumber()) {
|
||||
_reverbTime = arguments.property(REVERB_TIME_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(DAMPIMG_HANDLE).isNumber()) {
|
||||
_damping = arguments.property(DAMPIMG_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(SPREAD_HANDLE).isNumber()) {
|
||||
_spread = arguments.property(SPREAD_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(INPUT_BANDWIDTH_HANDLE).isNumber()) {
|
||||
_inputBandwidth = arguments.property(INPUT_BANDWIDTH_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(EARLY_LEVEL_HANDLE).isNumber()) {
|
||||
_earlyLevel = arguments.property(EARLY_LEVEL_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(TAIL_LEVEL_HANDLE).isNumber()) {
|
||||
_tailLevel = arguments.property(TAIL_LEVEL_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(DRY_LEVEL_HANDLE).isNumber()) {
|
||||
_dryLevel = arguments.property(DRY_LEVEL_HANDLE).toNumber();
|
||||
}
|
||||
if (arguments.property(WET_LEVEL_HANDLE).isNumber()) {
|
||||
_wetLevel = arguments.property(WET_LEVEL_HANDLE).toNumber();
|
||||
}
|
||||
}
|
||||
|
||||
AudioEffectOptions::AudioEffectOptions(const AudioEffectOptions &other) {
|
||||
*this = other;
|
||||
}
|
||||
|
||||
AudioEffectOptions& AudioEffectOptions::operator=(const AudioEffectOptions &other) {
|
||||
_maxRoomSize = other._maxRoomSize;
|
||||
_roomSize = other._roomSize;
|
||||
_reverbTime = other._reverbTime;
|
||||
_damping = other._damping;
|
||||
_spread = other._spread;
|
||||
_inputBandwidth = other._inputBandwidth;
|
||||
_earlyLevel = other._earlyLevel;
|
||||
_tailLevel = other._tailLevel;
|
||||
_dryLevel = other._dryLevel;
|
||||
_wetLevel = other._wetLevel;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
QScriptValue AudioEffectOptions::constructor(QScriptContext* context, QScriptEngine* engine) {
|
||||
return engine->newQObject(new AudioEffectOptions(context->argument(0)));
|
||||
}
|
106
libraries/audio/src/AudioEffectOptions.h
Normal file
106
libraries/audio/src/AudioEffectOptions.h
Normal file
|
@ -0,0 +1,106 @@
|
|||
//
|
||||
// AudioEffectOptions.h
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioEffectOptions_h
|
||||
#define hifi_AudioEffectOptions_h
|
||||
|
||||
#include <QObject>
|
||||
#include <QtScript/QScriptContext>
|
||||
#include <QtScript/QScriptEngine>
|
||||
|
||||
class AudioEffectOptions : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
// Meters Square
|
||||
Q_PROPERTY(float maxRoomSize READ getMaxRoomSize WRITE setMaxRoomSize)
|
||||
Q_PROPERTY(float roomSize READ getRoomSize WRITE setRoomSize)
|
||||
|
||||
// Seconds
|
||||
Q_PROPERTY(float reverbTime READ getReverbTime WRITE setReverbTime)
|
||||
|
||||
// Ratio between 0 and 1
|
||||
Q_PROPERTY(float damping READ getDamping WRITE setDamping)
|
||||
|
||||
// (?) Does not appear to be set externally very often
|
||||
Q_PROPERTY(float spread READ getSpread WRITE setSpread)
|
||||
|
||||
// Ratio between 0 and 1
|
||||
Q_PROPERTY(float inputBandwidth READ getInputBandwidth WRITE setInputBandwidth)
|
||||
|
||||
// in dB
|
||||
Q_PROPERTY(float earlyLevel READ getEarlyLevel WRITE setEarlyLevel)
|
||||
Q_PROPERTY(float tailLevel READ getTailLevel WRITE setTailLevel)
|
||||
Q_PROPERTY(float dryLevel READ getDryLevel WRITE setDryLevel)
|
||||
Q_PROPERTY(float wetLevel READ getWetLevel WRITE setWetLevel)
|
||||
|
||||
public:
|
||||
AudioEffectOptions(QScriptValue arguments = QScriptValue());
|
||||
AudioEffectOptions(const AudioEffectOptions &other);
|
||||
AudioEffectOptions& operator=(const AudioEffectOptions &other);
|
||||
|
||||
static QScriptValue constructor(QScriptContext* context, QScriptEngine* engine);
|
||||
|
||||
float getRoomSize() const { return _roomSize; }
|
||||
void setRoomSize(float roomSize ) { _roomSize = roomSize; }
|
||||
|
||||
float getMaxRoomSize() const { return _maxRoomSize; }
|
||||
void setMaxRoomSize(float maxRoomSize ) { _maxRoomSize = maxRoomSize; }
|
||||
|
||||
float getReverbTime() const { return _reverbTime; }
|
||||
void setReverbTime(float reverbTime ) { _reverbTime = reverbTime; }
|
||||
|
||||
float getDamping() const { return _damping; }
|
||||
void setDamping(float damping ) { _damping = damping; }
|
||||
|
||||
float getSpread() const { return _spread; }
|
||||
void setSpread(float spread ) { _spread = spread; }
|
||||
|
||||
float getInputBandwidth() const { return _inputBandwidth; }
|
||||
void setInputBandwidth(float inputBandwidth ) { _inputBandwidth = inputBandwidth; }
|
||||
|
||||
float getEarlyLevel() const { return _earlyLevel; }
|
||||
void setEarlyLevel(float earlyLevel ) { _earlyLevel = earlyLevel; }
|
||||
|
||||
float getTailLevel() const { return _tailLevel; }
|
||||
void setTailLevel(float tailLevel ) { _tailLevel = tailLevel; }
|
||||
|
||||
float getDryLevel() const { return _dryLevel; }
|
||||
void setDryLevel(float dryLevel) { _dryLevel = dryLevel; }
|
||||
|
||||
float getWetLevel() const { return _wetLevel; }
|
||||
void setWetLevel(float wetLevel) { _wetLevel = wetLevel; }
|
||||
|
||||
private:
|
||||
// http://wiki.audacityteam.org/wiki/GVerb#Instant_Reverberb_settings
|
||||
|
||||
// Meters Square
|
||||
float _maxRoomSize;
|
||||
float _roomSize;
|
||||
|
||||
// Seconds
|
||||
float _reverbTime;
|
||||
|
||||
// Ratio between 0 and 1
|
||||
float _damping;
|
||||
|
||||
// ? (Does not appear to be set externally very often)
|
||||
float _spread;
|
||||
|
||||
// Ratio between 0 and 1
|
||||
float _inputBandwidth;
|
||||
|
||||
// dB
|
||||
float _earlyLevel;
|
||||
float _tailLevel;
|
||||
float _dryLevel;
|
||||
float _wetLevel;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioEffectOptions_h
|
|
@ -44,7 +44,8 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit
|
|||
_framesAvailableStat(),
|
||||
_currentJitterBufferFrames(0),
|
||||
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
||||
_repetitionWithFade(settings._repetitionWithFade)
|
||||
_repetitionWithFade(settings._repetitionWithFade),
|
||||
_hasReverb(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -162,9 +163,22 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
|||
}
|
||||
|
||||
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
int read = 0;
|
||||
if (type == PacketTypeMixedAudio) {
|
||||
memcpy(&_hasReverb, packetAfterSeqNum.data() + read, sizeof(bool));
|
||||
read += sizeof(bool);
|
||||
|
||||
if (_hasReverb) {
|
||||
memcpy(&_reverbTime, packetAfterSeqNum.data() + read, sizeof(float));
|
||||
read += sizeof(float);
|
||||
memcpy(&_wetLevel, packetAfterSeqNum.data() + read, sizeof(float));
|
||||
read += sizeof(float);
|
||||
}
|
||||
}
|
||||
|
||||
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||
return 0;
|
||||
numAudioSamples = (packetAfterSeqNum.size() - read) / sizeof(int16_t);
|
||||
return read;
|
||||
}
|
||||
|
||||
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
|
|
|
@ -154,6 +154,10 @@ public:
|
|||
int getOverflowCount() const { return _ringBuffer.getOverflowCount(); }
|
||||
|
||||
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
||||
|
||||
bool hasReverb() const { return _hasReverb; }
|
||||
float getRevebTime() const { return _reverbTime; }
|
||||
float getWetLevel() const { return _wetLevel; }
|
||||
|
||||
public slots:
|
||||
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
|
||||
|
@ -243,6 +247,11 @@ protected:
|
|||
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
|
||||
|
||||
bool _repetitionWithFade;
|
||||
|
||||
// Reverb properties
|
||||
bool _hasReverb;
|
||||
float _reverbTime;
|
||||
float _wetLevel;
|
||||
};
|
||||
|
||||
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);
|
||||
|
|
|
@ -5,7 +5,7 @@ setup_hifi_library(Gui Network Script Widgets)
|
|||
|
||||
include_glm()
|
||||
|
||||
link_hifi_libraries(shared octree voxels fbx entities animation)
|
||||
link_hifi_libraries(shared octree voxels fbx entities animation audio)
|
||||
|
||||
# call macro to link our dependencies and bubble them up via a property on our target
|
||||
link_shared_dependencies()
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <QtNetwork/QNetworkReply>
|
||||
#include <QScriptEngine>
|
||||
|
||||
#include <AudioEffectOptions.h>
|
||||
#include <AudioInjector.h>
|
||||
#include <AudioRingBuffer.h>
|
||||
#include <AvatarData.h>
|
||||
|
@ -277,6 +278,9 @@ void ScriptEngine::init() {
|
|||
|
||||
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
|
||||
globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
||||
|
||||
QScriptValue audioEffectOptionsConstructorValue = newFunction(AudioEffectOptions::constructor);
|
||||
globalObject().setProperty("AudioEffectOptions", audioEffectOptionsConstructorValue);
|
||||
|
||||
qScriptRegisterMetaType(this, injectorToScriptValue, injectorFromScriptValue);
|
||||
qScriptRegisterMetaType(this, inputControllerToScriptValue, inputControllerFromScriptValue);
|
||||
|
|
Loading…
Reference in a new issue