Initial reverb implementation

Conflicts:
	interface/CMakeLists.txt
	interface/src/Audio.cpp
	interface/src/Audio.h
	libraries/script-engine/src/ScriptEngine.cpp
This commit is contained in:
NextPrior 2014-06-10 22:04:39 -07:00 committed by Atlante45
parent d90d30ad9f
commit a0a5530641
18 changed files with 1689 additions and 1 deletions

View file

@ -0,0 +1,38 @@
# FindGVerb.cmake
#
# Try to find the Gverb library.
#
# You must provide a GVERB_ROOT_DIR which contains src and include directories
#
# Once done this will define
#
# GVERB_FOUND - system found Gverb
# GVERB_INCLUDE_DIRS - the Gverb include directory
#
# Copyright 2014 High Fidelity, Inc.
#
# Distributed under the Apache License, Version 2.0.
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
#
if (GVERB_INCLUDE_DIRS)
# in cache already
set(GVERB_FOUND TRUE)
else ()
find_path(GVERB_INCLUDE_DIRS gverb.h ${GVERB_ROOT_DIR}/src)
if (GVERB_INCLUDE_DIRS)
set(GVERB_FOUND TRUE)
endif (GVERB_INCLUDE_DIRS)
if (GVERB_FOUND)
if (NOT GVERB_FIND_QUIETLY)
message(STATUS "Found Gverb... ${GVERB_LIBRARIES}")
endif (NOT GVERB_FIND_QUIETLY)
else ()
if (GVERB_FIND_REQUIRED)
message(FATAL_ERROR "Could not find Gverb")
endif (GVERB_FIND_REQUIRED)
endif ()
endif ()

View file

@ -0,0 +1,12 @@
//
// audioReverbOff.js
// examples
//
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
//
AudioDevice.setReverb(false);
print("Reberb is now off.");

32
examples/audioReverbOn.js Normal file
View file

@ -0,0 +1,32 @@
//
// audioReverbOn.js
// examples
//
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
// http://wiki.audacityteam.org/wiki/GVerb#Instant_reverb_settings
var audioOptions = new AudioEffectOptions();
// Square Meters
audioOptions.maxRoomSize = 50;
audioOptions.roomSize = 50;
// Seconds
audioOptions.reverbTime = 4;
// Between 0 - 1
audioOptions.damping = 0.50;
audioOptions.inputBandwidth = 0.75;
// dB
audioOptions.earlyLevel = -22;
audioOptions.tailLevel = -28;
audioOptions.dryLevel = 0;
audioOptions.wetLevel = 6;
AudioDevice.setReverbOptions(audioOptions);
AudioDevice.setReverb(true);
print("Reverb is now on with the updated options.");

View file

@ -14,6 +14,9 @@ endforeach()
find_package(Qt5LinguistTools REQUIRED)
find_package(Qt5LinguistToolsMacros)
# As Gverb is currently the only reverb library, it's required.
find_package(Gverb REQUIRED)
if (DEFINED ENV{JOB_ID})
set(BUILD_SEQ $ENV{JOB_ID})
else ()
@ -167,6 +170,12 @@ if (QXMPP_FOUND AND NOT DISABLE_QXMPP AND WIN32)
add_definitions(-DQXMPP_STATIC)
endif ()
if (GVERB_FOUND)
add_subdirectory(${GVERB_ROOT_DIR})
include_directories(${GVERB_INCLUDE_DIRS})
target_link_libraries(${TARGET_NAME} gverb)
endif (GVERB_FOUND)
# include headers for interface and InterfaceConfig.
include_directories("${PROJECT_SOURCE_DIR}/src" "${PROJECT_BINARY_DIR}/includes")
include_directories("${OPENSSL_INCLUDE_DIR}")

View file

@ -0,0 +1,2 @@
cmake_minimum_required(VERSION 2.8)
add_library(gverb include/ladspa-util.h include/lv2.h src/gverb.h src/gverb.c src/gverbdsp.h src/gverbdsp.c)

View file

@ -0,0 +1,234 @@
/* Some misc util functions for audio DSP work, written by Steve Harris,
* December 2000
*
* steve@plugin.org.uk
*/
#ifndef LADSPA_UTIL_H
#define LADSPA_UTIL_H
#include <math.h>
#include <stdint.h>
#include <string.h>
#define buffer_write(a, b) a=(b)
// 16.16 fixpoint
typedef union {
int32_t all;
struct {
#ifdef WORDS_BIGENDIAN
int16_t in;
uint16_t fr;
#else
uint16_t fr;
int16_t in;
#endif
} part;
} fixp16;
// 32.32 fixpoint
typedef union {
int64_t all;
struct {
#ifdef WORDS_BIGENDIAN
int32_t in;
uint32_t fr;
#else
uint32_t fr;
int32_t in;
#endif
} part;
} fixp32;
/* 32 bit "pointer cast" union */
typedef union {
float f;
int32_t i;
} ls_pcast32;
// Sometimes it doesn't get defined, even though it eists and C99 is declared
long int lrintf (float x);
// 1.0 / ln(2)
#define LN2R 1.442695041f
/* detet floating point denormal numbers by comparing them to the smallest
* normal, crap, but reliable */
#define DN_CHECK(x, l) if (fabs(x) < 1e-38) printf("DN: "l"\n")
// Denormalise floats, only actually needed for PIII and recent PowerPC
//#define FLUSH_TO_ZERO(fv) (((*(unsigned int*)&(fv))&0x7f800000)==0)?0.0f:(fv)
static inline float flush_to_zero(float f)
{
ls_pcast32 v;
v.f = f;
// original: return (v.i & 0x7f800000) == 0 ? 0.0f : f;
// version from Tim Blechmann
return (v.i & 0x7f800000) < 0x08000000 ? 0.0f : f;
}
static inline void round_to_zero(volatile float *f)
{
*f += 1e-18;
*f -= 1e-18;
}
/* A set of branchless clipping operations from Laurent de Soras */
static inline float f_max(float x, float a)
{
x -= a;
x += fabs(x);
x *= 0.5;
x += a;
return x;
}
static inline float f_min(float x, float b)
{
x = b - x;
x += fabs(x);
x *= 0.5;
x = b - x;
return x;
}
static inline float f_clamp(float x, float a, float b)
{
const float x1 = fabs(x - a);
const float x2 = fabs(x - b);
x = x1 + a + b;
x -= x2;
x *= 0.5;
return x;
}
// Limit a value to be l<=v<=u
#define LIMIT(v,l,u) ((v)<(l)?(l):((v)>(u)?(u):(v)))
// Truncate-to-zero modulo (ANSI C doesn't specify) will only work
// if -m < v < 2m
#define MOD(v,m) (v<0?v+m:(v>=m?v-m:v))
// Truncate-to-zero modulo (ANSI C doesn't specify) will only work
// if v > -m and v < m
#define NEG_MOD(v,m) ((v)<0?((v)+(m)):(v))
// Convert a value in dB's to a coefficent
#define DB_CO(g) ((g) > -90.0f ? powf(10.0f, (g) * 0.05f) : 0.0f)
#define CO_DB(v) (20.0f * log10f(v))
// Linearly interpolate [ = a * (1 - f) + b * f]
#define LIN_INTERP(f,a,b) ((a) + (f) * ((b) - (a)))
// Cubic interpolation function
static inline float cube_interp(const float fr, const float inm1, const float
in, const float inp1, const float inp2)
{
return in + 0.5f * fr * (inp1 - inm1 +
fr * (4.0f * inp1 + 2.0f * inm1 - 5.0f * in - inp2 +
fr * (3.0f * (in - inp1) - inm1 + inp2)));
}
/* fast sin^2 aproxiamtion, adapted from jan AT rpgfan's posting to the
* music-dsp list */
static inline float f_sin_sq(float angle)
{
const float asqr = angle * angle;
float result = -2.39e-08f;
result *= asqr;
result += 2.7526e-06f;
result *= asqr;
result -= 1.98409e-04f;
result *= asqr;
result += 8.3333315e-03f;
result *= asqr;
result -= 1.666666664e-01f;
result *= asqr;
result += 1.0f;
result *= angle;
return result * result;
}
#ifdef HAVE_LRINTF
#define f_round(f) lrintf(f)
#else
// Round float to int using IEEE int* hack
static inline int f_round(float f)
{
ls_pcast32 p;
p.f = f;
p.f += (3<<22);
return p.i - 0x4b400000;
}
#endif
// Truncate float to int
static inline int f_trunc(float f)
{
return f_round(floorf(f));
}
/* Andrew Simper's pow(2, x) aproximation from the music-dsp list */
#if 0
/* original */
static inline float f_pow2(float x)
{
long *px = (long*)(&x); // store address of float as long pointer
const float tx = (x-0.5f) + (3<<22); // temporary value for truncation
const long lx = *((long*)&tx) - 0x4b400000; // integer power of 2
const float dx = x-(float)(lx); // float remainder of power of 2
x = 1.0f + dx*(0.6960656421638072f + // cubic apporoximation of 2^x
dx*(0.224494337302845f + // for x in the range [0, 1]
dx*(0.07944023841053369f)));
*px += (lx<<23); // add integer power of 2 to exponent
return x;
}
#else
/* union version */
static inline float f_pow2(float x)
{
ls_pcast32 *px, tx, lx;
float dx;
px = (ls_pcast32 *)&x; // store address of float as long pointer
tx.f = (x-0.5f) + (3<<22); // temporary value for truncation
lx.i = tx.i - 0x4b400000; // integer power of 2
dx = x - (float)lx.i; // float remainder of power of 2
x = 1.0f + dx * (0.6960656421638072f + // cubic apporoximation of 2^x
dx * (0.224494337302845f + // for x in the range [0, 1]
dx * (0.07944023841053369f)));
(*px).i += (lx.i << 23); // add integer power of 2 to exponent
return (*px).f;
}
#endif
/* Fast exponentiation function, y = e^x */
#define f_exp(x) f_pow2(x * LN2R)
#endif

392
interface/external/gverb/include/lv2.h vendored Normal file
View file

@ -0,0 +1,392 @@
/* LV2 - LADSPA (Linux Audio Developer's Simple Plugin API) Version 2
* Revision 1
*
* Copyright (C) 2000-2002 Richard W.E. Furse, Paul Barton-Davis,
* Stefan Westerfeld.
* Copyright (C) 2006-2008 Steve Harris, Dave Robillard.
*
* This header is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License,
* or (at your option) any later version.
*
* This header is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#ifndef LV2_H_INCLUDED
#define LV2_H_INCLUDED
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* ************************************************************************* */
/** @file lv2.h
*
* Revision: 1
*
* == Overview ==
*
* There are a large number of open source and free software synthesis
* packages in use or development at this time. This API ('LV2')
* attempts to give programmers the ability to write simple 'plugin'
* audio processors in C/C++ and link them dynamically ('plug') into
* a range of these packages ('hosts'). It should be possible for any
* host and any plugin to communicate completely through this interface.
*
* This API is deliberately as short and simple as possible.
* The information required to use a plugin is in a companion data
* (RDF) file. The shared library portion of the API (defined in this
* header) does not contain enough information to make use of the plugin
* possible - the data file is mandatory.
*
* Plugins are expected to distinguish between control rate and audio
* rate data (or other types of data defined by extensions). Plugins have
* 'ports' that are inputs or outputs and each plugin is 'run' for a 'block'
* corresponding to a short time interval measured in samples. Audio rate
* data is communicated using arrays with one element per sample processed,
* allowing a block of audio to be processed by the plugin in a single
* pass. Control rate data is communicated using single values. Control
* rate data has a single value at the start of a call to the 'run()'
* function, and may be considered to remain this value for its duration.
* Thus the 'control rate' is determined by the block size, controlled by
* the host. The plugin may assume that all its input and output ports have
* been connected to the relevant data location (see the 'connect_port()'
* function below) before it is asked to run, unless the port has been set
* 'connection optional' in the plugin's data file.
*
* Plugins will reside in shared object files suitable for dynamic linking
* by dlopen() and family. The file will provide a number of 'plugin
* types' that can be used to instantiate actual plugins (sometimes known
* as 'plugin instances') that can be connected together to perform tasks.
* The host can access these plugin types using the lv2_descriptor()
* function.
*
* This API contains very limited error-handling.
*
* == Threading rules ==
*
* Certain hosts may need to call the functions provided by a plugin from
* multiple threads. For this to be safe, the plugin must be written so that
* those functions can be executed simultaneously without problems.
* To facilitate this, the functions provided by a plugin are divided into
* classes:
*
* - Discovery class: lv2_descriptor(), extension_data()
* - Instantiation class: instantiate(), cleanup(), activate(), deactivate()
* - Audio class: run(), connect_port()
*
* Extensions to this specification which add new functions MUST declare in
* which of these classes the functions belong, or define new classes for them.
* The rules that hosts must follow are these:
*
* - When a function from the Discovery class is running, no other
* functions in the same shared object file may run.
* - When a function from the Instantiation class is running for a plugin
* instance, no other functions for that instance may run.
* - When a function is running for a plugin instance, no other
* function in the same class may run for that instance.
*
* Any simultaneous calls that are not explicitly forbidden by these rules
* are allowed. For example, a host may call run() for two different plugin
* instances simultaneously.
*/
/* ************************************************************************* */
/** Plugin Handle.
*
* This plugin handle indicates a particular instance of the plugin
* concerned. It is valid to compare this to NULL (0 for C++) but
* otherwise the host MUST NOT attempt to interpret it. The plugin
* may use it to reference internal instance data. */
typedef void * LV2_Handle;
/* ************************************************************************* */
/** Feature data.
*
* These are passed to a plugin's instantiate method to represent a special
* feature the host has which the plugin may depend on. This is to allow
* extensions to the LV2 specification without causing any breakage.
* Extensions may specify what data needs to be passed here. The base
* LV2 specification does not define any features; hosts are not required
* to use this facility. */
typedef struct _LV2_Feature {
/** A globally unique, case-sensitive identifier for this feature.
*
* This MUST be defined in the specification of any LV2 extension which
* defines a host feature. */
const char * URI;
/** Pointer to arbitrary data.
*
* This is to allow hosts to pass data to a plugin (simple values, data
* structures, function pointers, etc) as part of a 'feature'. The LV2
* specification makes no restrictions on the contents of this data.
* The data here MUST be cleary defined by the LV2 extension which defines
* this feature.
* If no data is required, this may be set to NULL. */
void * data;
} LV2_Feature;
/* ************************************************************************* */
/** Descriptor for a Type of Plugin.
*
* This structure is used to describe a plugin type. It provides a number
* of functions to instantiate it, link it to buffers and run it. */
typedef struct _LV2_Descriptor {
/** A globally unique, case-sensitive identifier for this plugin type.
*
* All plugins with the same URI MUST be compatible in terms of 'port
* signature', meaning they have the same number of ports, same port
* shortnames, and roughly the same functionality. URIs should
* probably contain a version number (or similar) for this reason.
*
* Rationale: When serializing session/patch/etc files, hosts MUST
* refer to a loaded plugin by the plugin URI only. In the future
* loading a plugin with this URI MUST yield a plugin with the
* same ports (etc) which is 100% compatible. */
const char * URI;
/** Function pointer that instantiates a plugin.
*
* A handle is returned indicating the new plugin instance. The
* instantiation function accepts a sample rate as a parameter as well
* as the plugin descriptor from which this instantiate function was
* found. This function must return NULL if instantiation fails.
*
* bundle_path is a string of the path to the LV2 bundle which contains
* this plugin binary. It MUST include the trailing directory separator
* (e.g. '/') so that BundlePath + filename gives the path to a file
* in the bundle.
*
* features is a NULL terminated array of LV2_Feature structs which
* represent the features the host supports. Plugins may refuse to
* instantiate if required features are not found here (however hosts
* SHOULD NOT use this as a discovery mechanism, instead reading the
* data file before attempting to instantiate the plugin). This array
* must always exist; if a host has no features, it MUST pass a single
* element array containing NULL (to simplify plugins).
*
* Note that instance initialisation should generally occur in
* activate() rather than here. If a host calls instantiate, it MUST
* call cleanup() at some point in the future. */
LV2_Handle (*instantiate)(const struct _LV2_Descriptor * descriptor,
double sample_rate,
const char * bundle_path,
const LV2_Feature *const * features);
/** Function pointer that connects a port on a plugin instance to a memory
* location where the block of data for the port will be read/written.
*
* The data location is expected to be of the type defined in the
* plugin's data file (e.g. an array of float for an lv2:AudioPort).
* Memory issues are managed by the host. The plugin must read/write
* the data at these locations every time run() is called, data
* present at the time of this connection call MUST NOT be
* considered meaningful.
*
* The host MUST NOT try to connect a data buffer to a port index
* that is not defined in the RDF data for the plugin. If it does,
* the plugin's behaviour is undefined.
*
* connect_port() may be called more than once for a plugin instance
* to allow the host to change the buffers that the plugin is reading
* or writing. These calls may be made before or after activate()
* or deactivate() calls. Note that there may be realtime constraints
* on connect_port (see lv2:hardRTCapable in lv2.ttl).
*
* connect_port() MUST be called at least once for each port before
* run() is called. The plugin must pay careful attention to the block
* size passed to the run function as the block allocated may only just
* be large enough to contain the block of data (typically samples), and
* is not guaranteed to be constant.
*
* Plugin writers should be aware that the host may elect to use the
* same buffer for more than one port and even use the same buffer for
* both input and output (see lv2:inPlaceBroken in lv2.ttl).
* However, overlapped buffers or use of a single buffer for both
* audio and control data may result in unexpected behaviour.
*
* If the plugin has the feature lv2:hardRTCapable then there are
* various things that the plugin MUST NOT do within the connect_port()
* function (see lv2.ttl). */
void (*connect_port)(LV2_Handle instance,
uint32_t port,
void * data_location);
/** Function pointer that initialises a plugin instance and activates
* it for use.
*
* This is separated from instantiate() to aid real-time support and so
* that hosts can reinitialise a plugin instance by calling deactivate()
* and then activate(). In this case the plugin instance must reset all
* state information dependent on the history of the plugin instance
* except for any data locations provided by connect_port(). If there
* is nothing for activate() to do then the plugin writer may provide
* a NULL rather than an empty function.
*
* When present, hosts MUST call this function once before run()
* is called for the first time. This call SHOULD be made as close
* to the run() call as possible and indicates to real-time plugins
* that they are now live, however plugins MUST NOT rely on a prompt
* call to run() after activate(). activate() may not be called again
* unless deactivate() is called first (after which activate() may be
* called again, followed by deactivate, etc. etc.). If a host calls
* activate, it MUST call deactivate at some point in the future.
*
* Note that connect_port() may be called before or after a call to
* activate(). */
void (*activate)(LV2_Handle instance);
/** Function pointer that runs a plugin instance for a block.
*
* Two parameters are required: the first is a handle to the particular
* instance to be run and the second indicates the block size (in
* samples) for which the plugin instance may run.
*
* Note that if an activate() function exists then it must be called
* before run(). If deactivate() is called for a plugin instance then
* the plugin instance may not be reused until activate() has been
* called again.
*
* If the plugin has the feature lv2:hardRTCapable then there are
* various things that the plugin MUST NOT do within the run()
* function (see lv2.ttl). */
void (*run)(LV2_Handle instance,
uint32_t sample_count);
/** This is the counterpart to activate() (see above). If there is
* nothing for deactivate() to do then the plugin writer may provide
* a NULL rather than an empty function.
*
* Hosts must deactivate all activated units after they have been run()
* for the last time. This call SHOULD be made as close to the last
* run() call as possible and indicates to real-time plugins that
* they are no longer live, however plugins MUST NOT rely on prompt
* deactivation. Note that connect_port() may be called before or
* after a call to deactivate().
*
* Note that deactivation is not similar to pausing as the plugin
* instance will be reinitialised when activate() is called to reuse it.
* Hosts MUST NOT call deactivate() unless activate() was previously
* called. */
void (*deactivate)(LV2_Handle instance);
/** This is the counterpart to instantiate() (see above). Once an instance
* of a plugin has been finished with it can be deleted using this
* function. The instance handle passed ceases to be valid after
* this call.
*
* If activate() was called for a plugin instance then a corresponding
* call to deactivate() MUST be made before cleanup() is called.
* Hosts MUST NOT call cleanup() unless instantiate() was previously
* called. */
void (*cleanup)(LV2_Handle instance);
/** Function pointer that can be used to return additional instance data for
* a plugin defined by some extenion (e.g. a struct containing additional
* function pointers).
*
* The actual type and meaning of the returned object MUST be specified
* precisely by the extension if it defines any extra data. If a particular
* extension does not define extra instance data, this function MUST return
* NULL for that extension's URI. If a plugin does not support any
* extensions that define extra instance data, this function pointer may be
* set to NULL rather than providing an empty function.
*
* The only parameter is the URI of the extension. The plugin MUST return
* NULL if it does not support the extension, but hosts SHOULD NOT use this
* as a discovery method (e.g. hosts should only call this function for
* extensions known to be supported by the plugin from the data file).
*
* The host is never responsible for freeing the returned value.
*
* NOTE: This function should return a struct (likely containing function
* pointers) and NOT a direct function pointer. Standard C and C++ do not
* allow type casts from void* to a function pointer type. To provide
* additional functions a struct should be returned containing the extra
* function pointers (which is valid standard code, and a much better idea
* for extensibility anyway). */
const void* (*extension_data)(const char * uri);
} LV2_Descriptor;
/* ****************************************************************** */
/** Accessing Plugin Types.
*
* The exact mechanism by which plugins are loaded is host-dependent,
* however all most hosts will need to know is the URI of the plugin they
* wish to load. The environment variable LV2_PATH, if present, should
* contain a colon-separated path indicating directories (containing
* plugin bundle subdirectories) that should be searched (in order)
* for plugins. It is expected that hosts will use a library to provide
* this functionality.
*
* A plugin programmer must include a function called "lv2_descriptor"
* with the following function prototype within the shared object
* file. This function will have C-style linkage (if you are using
* C++ this is taken care of by the 'extern "C"' clause at the top of
* the file).
*
* A host will find the plugin shared object file by one means or another,
* find the lv2_descriptor() function, call it, and proceed from there.
*
* Plugin types are accessed by index (not ID) using values from 0
* upwards. Out of range indexes must result in this function returning
* NULL, so the plugin count can be determined by checking for the least
* index that results in NULL being returned. Index has no meaning,
* hosts MUST NOT depend on it remaining constant (ie when serialising)
* in any way. */
const LV2_Descriptor * lv2_descriptor(uint32_t index);
/** Datatype corresponding to the lv2_descriptor() function. */
typedef const LV2_Descriptor *
(*LV2_Descriptor_Function)(uint32_t index);
/* ******************************************************************** */
/* Put this (LV2_SYMBOL_EXPORT) before any functions that are to be loaded
* by the host as a symbol from the dynamic library.
*/
#ifdef WIN32
#define LV2_SYMBOL_EXPORT __declspec(dllexport)
#else
#define LV2_SYMBOL_EXPORT
#endif
#ifdef __cplusplus
}
#endif
#endif /* LV2_H_INCLUDED */

207
interface/external/gverb/src/gverb.c vendored Normal file
View file

@ -0,0 +1,207 @@
/*
Copyright (C) 1999 Juhana Sadeharju
kouhia at nic.funet.fi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "gverbdsp.h"
#include "gverb.h"
#include "../include/ladspa-util.h"
ty_gverb *gverb_new(int srate, float maxroomsize, float roomsize,
float revtime,
float damping, float spread,
float inputbandwidth, float earlylevel,
float taillevel)
{
ty_gverb *p;
float ga,gb,gt;
int i,n;
float r;
float diffscale;
int a,b,c,cc,d,dd,e;
float spread1,spread2;
p = (ty_gverb *)malloc(sizeof(ty_gverb));
p->rate = srate;
p->fdndamping = damping;
p->maxroomsize = maxroomsize;
p->roomsize = roomsize;
p->revtime = revtime;
p->earlylevel = earlylevel;
p->taillevel = taillevel;
p->maxdelay = p->rate*p->maxroomsize/340.0;
p->largestdelay = p->rate*p->roomsize/340.0;
/* Input damper */
p->inputbandwidth = inputbandwidth;
p->inputdamper = damper_make(1.0 - p->inputbandwidth);
/* FDN section */
p->fdndels = (ty_fixeddelay **)calloc(FDNORDER, sizeof(ty_fixeddelay *));
for(i = 0; i < FDNORDER; i++) {
p->fdndels[i] = fixeddelay_make((int)p->maxdelay+1000);
}
p->fdngains = (float *)calloc(FDNORDER, sizeof(float));
p->fdnlens = (int *)calloc(FDNORDER, sizeof(int));
p->fdndamps = (ty_damper **)calloc(FDNORDER, sizeof(ty_damper *));
for(i = 0; i < FDNORDER; i++) {
p->fdndamps[i] = damper_make(p->fdndamping);
}
ga = 60.0;
gt = p->revtime;
ga = powf(10.0f,-ga/20.0f);
n = p->rate*gt;
p->alpha = pow((double)ga, 1.0/(double)n);
gb = 0.0;
for(i = 0; i < FDNORDER; i++) {
if (i == 0) gb = 1.000000*p->largestdelay;
if (i == 1) gb = 0.816490*p->largestdelay;
if (i == 2) gb = 0.707100*p->largestdelay;
if (i == 3) gb = 0.632450*p->largestdelay;
#if 0
p->fdnlens[i] = nearest_prime((int)gb, 0.5);
#else
p->fdnlens[i] = f_round(gb);
#endif
p->fdngains[i] = -powf((float)p->alpha,p->fdnlens[i]);
}
p->d = (float *)calloc(FDNORDER, sizeof(float));
p->u = (float *)calloc(FDNORDER, sizeof(float));
p->f = (float *)calloc(FDNORDER, sizeof(float));
/* Diffuser section */
diffscale = (float)p->fdnlens[3]/(210+159+562+410);
spread1 = spread;
spread2 = 3.0*spread;
b = 210;
r = 0.125541;
a = spread1*r;
c = 210+159+a;
cc = c-b;
r = 0.854046;
a = spread2*r;
d = 210+159+562+a;
dd = d-c;
e = 1341-d;
p->ldifs = (ty_diffuser **)calloc(4, sizeof(ty_diffuser *));
p->ldifs[0] = diffuser_make((int)(diffscale*b),0.75);
p->ldifs[1] = diffuser_make((int)(diffscale*cc),0.75);
p->ldifs[2] = diffuser_make((int)(diffscale*dd),0.625);
p->ldifs[3] = diffuser_make((int)(diffscale*e),0.625);
b = 210;
r = -0.568366;
a = spread1*r;
c = 210+159+a;
cc = c-b;
r = -0.126815;
a = spread2*r;
d = 210+159+562+a;
dd = d-c;
e = 1341-d;
p->rdifs = (ty_diffuser **)calloc(4, sizeof(ty_diffuser *));
p->rdifs[0] = diffuser_make((int)(diffscale*b),0.75);
p->rdifs[1] = diffuser_make((int)(diffscale*cc),0.75);
p->rdifs[2] = diffuser_make((int)(diffscale*dd),0.625);
p->rdifs[3] = diffuser_make((int)(diffscale*e),0.625);
/* Tapped delay section */
p->tapdelay = fixeddelay_make(44000);
p->taps = (int *)calloc(FDNORDER, sizeof(int));
p->tapgains = (float *)calloc(FDNORDER, sizeof(float));
p->taps[0] = 5+0.410*p->largestdelay;
p->taps[1] = 5+0.300*p->largestdelay;
p->taps[2] = 5+0.155*p->largestdelay;
p->taps[3] = 5+0.000*p->largestdelay;
for(i = 0; i < FDNORDER; i++) {
p->tapgains[i] = pow(p->alpha,(double)p->taps[i]);
}
return(p);
}
void gverb_free(ty_gverb *p)
{
int i;
damper_free(p->inputdamper);
for(i = 0; i < FDNORDER; i++) {
fixeddelay_free(p->fdndels[i]);
damper_free(p->fdndamps[i]);
diffuser_free(p->ldifs[i]);
diffuser_free(p->rdifs[i]);
}
free(p->fdndels);
free(p->fdngains);
free(p->fdnlens);
free(p->fdndamps);
free(p->d);
free(p->u);
free(p->f);
free(p->ldifs);
free(p->rdifs);
free(p->taps);
free(p->tapgains);
fixeddelay_free(p->tapdelay);
free(p);
}
void gverb_flush(ty_gverb *p)
{
int i;
damper_flush(p->inputdamper);
for(i = 0; i < FDNORDER; i++) {
fixeddelay_flush(p->fdndels[i]);
damper_flush(p->fdndamps[i]);
diffuser_flush(p->ldifs[i]);
diffuser_flush(p->rdifs[i]);
}
memset(p->d, 0, FDNORDER * sizeof(float));
memset(p->u, 0, FDNORDER * sizeof(float));
memset(p->f, 0, FDNORDER * sizeof(float));
fixeddelay_flush(p->tapdelay);
}
/* swh: other functions are now in the .h file for inlining */

234
interface/external/gverb/src/gverb.h vendored Normal file
View file

@ -0,0 +1,234 @@
/*
Copyright (C) 1999 Juhana Sadeharju
kouhia at nic.funet.fi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef GVERB_H
#define GVERB_H
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "gverbdsp.h"
#include "gverb.h"
#include "../include/ladspa-util.h"
#define FDNORDER 4
typedef struct {
int rate;
float inputbandwidth;
float taillevel;
float earlylevel;
ty_damper *inputdamper;
float maxroomsize;
float roomsize;
float revtime;
float maxdelay;
float largestdelay;
ty_fixeddelay **fdndels;
float *fdngains;
int *fdnlens;
ty_damper **fdndamps;
float fdndamping;
ty_diffuser **ldifs;
ty_diffuser **rdifs;
ty_fixeddelay *tapdelay;
int *taps;
float *tapgains;
float *d;
float *u;
float *f;
double alpha;
} ty_gverb;
ty_gverb *gverb_new(int, float, float, float, float, float, float, float, float);
void gverb_free(ty_gverb *);
void gverb_flush(ty_gverb *);
static void gverb_do(ty_gverb *, float, float *, float *);
static void gverb_set_roomsize(ty_gverb *, float);
static void gverb_set_revtime(ty_gverb *, float);
static void gverb_set_damping(ty_gverb *, float);
static void gverb_set_inputbandwidth(ty_gverb *, float);
static void gverb_set_earlylevel(ty_gverb *, float);
static void gverb_set_taillevel(ty_gverb *, float);
/*
* This FDN reverb can be made smoother by setting matrix elements at the
* diagonal and near of it to zero or nearly zero. By setting diagonals to zero
* means we remove the effect of the parallel comb structure from the
* reverberation. A comb generates uniform impulse stream to the reverberation
* impulse response, and thus it is not good. By setting near diagonal elements
* to zero means we remove delay sequences having consequtive delays of the
* similar lenths, when the delays are in sorted in length with respect to
* matrix element index. The matrix described here could be generated by
* differencing Rocchesso's circulant matrix at max diffuse value and at low
* diffuse value (approaching parallel combs).
*
* Example 1:
* Set a(k,k), for all k, equal to 0.
*
* Example 2:
* Set a(k,k), a(k,k-1) and a(k,k+1) equal to 0.
*
* Example 3: The transition to zero gains could be smooth as well.
* a(k,k-1) and a(k,k+1) could be 0.3, and a(k,k-2) and a(k,k+2) could
* be 0.5, say.
*/
static inline void gverb_fdnmatrix(float *a, float *b)
{
const float dl0 = a[0], dl1 = a[1], dl2 = a[2], dl3 = a[3];
b[0] = 0.5f*(+dl0 + dl1 - dl2 - dl3);
b[1] = 0.5f*(+dl0 - dl1 - dl2 + dl3);
b[2] = 0.5f*(-dl0 + dl1 - dl2 + dl3);
b[3] = 0.5f*(+dl0 + dl1 + dl2 + dl3);
}
static inline void gverb_do(ty_gverb *p, float x, float *yl, float *yr)
{
float z;
unsigned int i;
float lsum,rsum,sum,sign;
if ((x != x) || fabsf(x) > 100000.0f) {
x = 0.0f;
}
z = damper_do(p->inputdamper, x);
z = diffuser_do(p->ldifs[0],z);
for(i = 0; i < FDNORDER; i++) {
p->u[i] = p->tapgains[i]*fixeddelay_read(p->tapdelay,p->taps[i]);
}
fixeddelay_write(p->tapdelay,z);
for(i = 0; i < FDNORDER; i++) {
p->d[i] = damper_do(p->fdndamps[i],
p->fdngains[i]*fixeddelay_read(p->fdndels[i],
p->fdnlens[i]));
}
sum = 0.0f;
sign = 1.0f;
for(i = 0; i < FDNORDER; i++) {
sum += sign*(p->taillevel*p->d[i] + p->earlylevel*p->u[i]);
sign = -sign;
}
sum += x*p->earlylevel;
lsum = sum;
rsum = sum;
gverb_fdnmatrix(p->d,p->f);
for(i = 0; i < FDNORDER; i++) {
fixeddelay_write(p->fdndels[i],p->u[i]+p->f[i]);
}
lsum = diffuser_do(p->ldifs[1],lsum);
lsum = diffuser_do(p->ldifs[2],lsum);
lsum = diffuser_do(p->ldifs[3],lsum);
rsum = diffuser_do(p->rdifs[1],rsum);
rsum = diffuser_do(p->rdifs[2],rsum);
rsum = diffuser_do(p->rdifs[3],rsum);
*yl = lsum;
*yr = rsum;
}
static inline void gverb_set_roomsize(ty_gverb *p, const float a)
{
unsigned int i;
if (a <= 1.0 || (a != a)) {
p->roomsize = 1.0;
} else {
p->roomsize = a;
}
p->largestdelay = p->rate * p->roomsize * 0.00294f;
p->fdnlens[0] = f_round(1.000000f*p->largestdelay);
p->fdnlens[1] = f_round(0.816490f*p->largestdelay);
p->fdnlens[2] = f_round(0.707100f*p->largestdelay);
p->fdnlens[3] = f_round(0.632450f*p->largestdelay);
for(i = 0; i < FDNORDER; i++) {
p->fdngains[i] = -powf((float)p->alpha, p->fdnlens[i]);
}
p->taps[0] = 5+f_round(0.410f*p->largestdelay);
p->taps[1] = 5+f_round(0.300f*p->largestdelay);
p->taps[2] = 5+f_round(0.155f*p->largestdelay);
p->taps[3] = 5+f_round(0.000f*p->largestdelay);
for(i = 0; i < FDNORDER; i++) {
p->tapgains[i] = powf((float)p->alpha, p->taps[i]);
}
}
static inline void gverb_set_revtime(ty_gverb *p,float a)
{
float ga,gt;
double n;
unsigned int i;
p->revtime = a;
ga = 60.0;
gt = p->revtime;
ga = powf(10.0f,-ga/20.0f);
n = p->rate*gt;
p->alpha = (double)powf(ga,1.0f/n);
for(i = 0; i < FDNORDER; i++) {
p->fdngains[i] = -powf((float)p->alpha, p->fdnlens[i]);
}
}
static inline void gverb_set_damping(ty_gverb *p,float a)
{
unsigned int i;
p->fdndamping = a;
for(i = 0; i < FDNORDER; i++) {
damper_set(p->fdndamps[i],p->fdndamping);
}
}
static inline void gverb_set_inputbandwidth(ty_gverb *p,float a)
{
p->inputbandwidth = a;
damper_set(p->inputdamper,1.0 - p->inputbandwidth);
}
static inline void gverb_set_earlylevel(ty_gverb *p,float a)
{
p->earlylevel = a;
}
static inline void gverb_set_taillevel(ty_gverb *p,float a)
{
p->taillevel = a;
}
#endif

130
interface/external/gverb/src/gverbdsp.c vendored Normal file
View file

@ -0,0 +1,130 @@
/*
Copyright (C) 1999 Juhana Sadeharju
kouhia at nic.funet.fi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "gverbdsp.h"
#define TRUE 1
#define FALSE 0
ty_diffuser *diffuser_make(int size, float coeff)
{
ty_diffuser *p;
int i;
p = (ty_diffuser *)malloc(sizeof(ty_diffuser));
p->size = size;
p->coeff = coeff;
p->idx = 0;
p->buf = (float *)malloc(size*sizeof(float));
for (i = 0; i < size; i++) p->buf[i] = 0.0;
return(p);
}
void diffuser_free(ty_diffuser *p)
{
free(p->buf);
free(p);
}
void diffuser_flush(ty_diffuser *p)
{
memset(p->buf, 0, p->size * sizeof(float));
}
ty_damper *damper_make(float damping)
{
ty_damper *p;
p = (ty_damper *)malloc(sizeof(ty_damper));
p->damping = damping;
p->delay = 0.0f;
return(p);
}
void damper_free(ty_damper *p)
{
free(p);
}
void damper_flush(ty_damper *p)
{
p->delay = 0.0f;
}
ty_fixeddelay *fixeddelay_make(int size)
{
ty_fixeddelay *p;
int i;
p = (ty_fixeddelay *)malloc(sizeof(ty_fixeddelay));
p->size = size;
p->idx = 0;
p->buf = (float *)malloc(size*sizeof(float));
for (i = 0; i < size; i++) p->buf[i] = 0.0;
return(p);
}
void fixeddelay_free(ty_fixeddelay *p)
{
free(p->buf);
free(p);
}
void fixeddelay_flush(ty_fixeddelay *p)
{
memset(p->buf, 0, p->size * sizeof(float));
}
int isprime(int n)
{
unsigned int i;
const unsigned int lim = (int)sqrtf((float)n);
if (n == 2) return(TRUE);
if ((n & 1) == 0) return(FALSE);
for(i = 3; i <= lim; i += 2)
if ((n % i) == 0) return(FALSE);
return(TRUE);
}
int nearest_prime(int n, float rerror)
/* relative error; new prime will be in range
* [n-n*rerror, n+n*rerror];
*/
{
int bound,k;
if (isprime(n)) return(n);
/* assume n is large enough and n*rerror enough smaller than n */
bound = n*rerror;
for(k = 1; k <= bound; k++) {
if (isprime(n+k)) return(n+k);
if (isprime(n-k)) return(n-k);
}
return(-1);
}

85
interface/external/gverb/src/gverbdsp.h vendored Normal file
View file

@ -0,0 +1,85 @@
#ifndef GVERBDSP_H
#define GVERBDSP_H
#include "../include/ladspa-util.h"
typedef struct {
int size;
int idx;
float *buf;
} ty_fixeddelay;
typedef struct {
int size;
float coeff;
int idx;
float *buf;
} ty_diffuser;
typedef struct {
float damping;
float delay;
} ty_damper;
ty_diffuser *diffuser_make(int, float);
void diffuser_free(ty_diffuser *);
void diffuser_flush(ty_diffuser *);
//float diffuser_do(ty_diffuser *, float);
ty_damper *damper_make(float);
void damper_free(ty_damper *);
void damper_flush(ty_damper *);
//void damper_set(ty_damper *, float);
//float damper_do(ty_damper *, float);
ty_fixeddelay *fixeddelay_make(int);
void fixeddelay_free(ty_fixeddelay *);
void fixeddelay_flush(ty_fixeddelay *);
//float fixeddelay_read(ty_fixeddelay *, int);
//void fixeddelay_write(ty_fixeddelay *, float);
int isprime(int);
int nearest_prime(int, float);
static inline float diffuser_do(ty_diffuser *p, float x)
{
float y,w;
w = x - p->buf[p->idx]*p->coeff;
w = flush_to_zero(w);
y = p->buf[p->idx] + w*p->coeff;
p->buf[p->idx] = w;
p->idx = (p->idx + 1) % p->size;
return(y);
}
static inline float fixeddelay_read(ty_fixeddelay *p, int n)
{
int i;
i = (p->idx - n + p->size) % p->size;
return(p->buf[i]);
}
static inline void fixeddelay_write(ty_fixeddelay *p, float x)
{
p->buf[p->idx] = x;
p->idx = (p->idx + 1) % p->size;
}
static inline void damper_set(ty_damper *p, float damping)
{
p->damping = damping;
}
static inline float damper_do(ty_damper *p, float x)
{
float y;
y = x*(1.0-p->damping) + p->delay*p->damping;
p->delay = y;
return(y);
}
#endif

View file

@ -123,11 +123,14 @@ Audio::Audio(QObject* parent) :
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
// Create the noise sample array
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedSilence, this, &Audio::addStereoSilenceToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade, this, &Audio::addLastFrameRepeatedWithFadeToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedStereoSamples, this, &Audio::addStereoSamplesToScope, Qt::DirectConnection);
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedSamples, Qt::DirectConnection);
// Initialize GVerb
initGverb();
}
void Audio::init(QGLWidget *parent) {
@ -489,6 +492,70 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
return switchOutputToAudioDevice(getNamedAudioDeviceForMode(QAudio::AudioOutput, outputDeviceName));
}
void Audio::initGverb() {
// Initialize a new gverb instance
_gverb = gverb_new(_outputFormat.sampleRate(), _reverbOptions.getMaxRoomSize(), _reverbOptions.getRoomSize(), _reverbOptions.getReverbTime(),
_reverbOptions.getDamping(), _reverbOptions.getSpread(), _reverbOptions.getInputBandwidth(), _reverbOptions.getEarlyLevel(),
_reverbOptions.getTailLevel());
// Configure the instance (these functions are not super well named - they actually set several internal variables)
gverb_set_roomsize(_gverb, _reverbOptions.getRoomSize());
gverb_set_revtime(_gverb, _reverbOptions.getReverbTime());
gverb_set_damping(_gverb, _reverbOptions.getDamping());
gverb_set_inputbandwidth(_gverb, _reverbOptions.getInputBandwidth());
gverb_set_earlylevel(_gverb, DB_CO(_reverbOptions.getEarlyLevel()));
gverb_set_taillevel(_gverb, DB_CO(_reverbOptions.getTailLevel()));
}
void Audio::setReverbOptions(const AudioEffectOptions* options) {
// Save the new options
_reverbOptions.setMaxRoomSize(options->getMaxRoomSize());
_reverbOptions.setRoomSize(options->getRoomSize());
_reverbOptions.setReverbTime(options->getReverbTime());
_reverbOptions.setDamping(options->getDamping());
_reverbOptions.setSpread(options->getSpread());
_reverbOptions.setInputBandwidth(options->getInputBandwidth());
_reverbOptions.setEarlyLevel(options->getEarlyLevel());
_reverbOptions.setTailLevel(options->getTailLevel());
_reverbOptions.setDryLevel(options->getDryLevel());
_reverbOptions.setWetLevel(options->getWetLevel());
// Apply them to the reverb instance(s)
initGverb();
}
void Audio::addReverb(int16_t* samplesData, int numSamples, QAudioFormat& audioFormat) {
float dryFraction = DB_CO(_reverbOptions.getDryLevel());
float wetFraction = DB_CO(_reverbOptions.getWetLevel());
float lValue,rValue;
for (int sample = 0; sample < numSamples; sample += audioFormat.channelCount()) {
// Run GVerb
float value = (float)samplesData[sample];
gverb_do(_gverb, value, &lValue, &rValue);
// Mix, accounting for clipping, the left and right channels. Ignore the rest.
for (unsigned int j = sample; j < sample + audioFormat.channelCount(); j++) {
if (j == sample) {
// left channel
int lResult = (int)(samplesData[j] * dryFraction + lValue * wetFraction);
if (lResult > 32767) lResult = 32767;
if (lResult < -32768) lResult = -32768;
samplesData[j] = (int16_t)lResult;
} else if (j == (sample + 1)) {
// right channel
int rResult = (int)(samplesData[j] * dryFraction + rValue * wetFraction);
if (rResult > 32767) rResult = 32767;
if (rResult < -32768) rResult = -32768;
samplesData[j] = (int16_t)rResult;
} else {
// ignore channels above 2
}
}
}
}
void Audio::handleAudioInput() {
static char audioDataPacket[MAX_PACKET_SIZE];
@ -1060,6 +1127,110 @@ void Audio::toggleStereoInput() {
}
}
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.parseData(audioByteArray);
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
if (!_ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) {
// we don't have any audio data left in the output buffer
// we just starved
//qDebug() << "Audio output just starved.";
_ringBuffer.setIsStarved(true);
_numFramesDisplayStarve = 10;
}
// if there is anything in the ring buffer, decide what to do
if (_ringBuffer.samplesAvailable() > 0) {
int numNetworkOutputSamples = _ringBuffer.samplesAvailable();
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
QByteArray outputBuffer;
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
int numSamplesNeededToStartPlayback = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2);
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
// We are still waiting for enough samples to begin playback
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
} else {
// We are either already playing back, or we have enough audio to start playing back.
//qDebug() << "pushing " << numNetworkOutputSamples;
_ringBuffer.setIsStarved(false);
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
if (_processSpatialAudio) {
unsigned int sampleTime = _spatialAudioStart;
QByteArray buffer;
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
_ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
// Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
}
// Send audio off for spatial processing
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
// copy the samples we'll resample from the spatial audio ring buffer - this also
// pushes the read pointer of the spatial audio ring buffer forwards
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
// Advance the start point for the next packet of audio to arrive
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
} else {
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
_ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
}
// copy the packet from the RB to the output
linearResampling(ringBufferSamples,
(int16_t*) outputBuffer.data(),
numNetworkOutputSamples,
numDeviceOutputSamples,
_desiredOutputFormat, _outputFormat);
if(_reverb) {
addReverb((int16_t*)outputBuffer.data(), numDeviceOutputSamples, _outputFormat);
}
if (_outputDevice) {
_outputDevice->write(outputBuffer);
}
if (_scopeEnabled && !_scopeEnabledPause) {
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
int16_t* samples = ringBufferSamples;
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
unsigned int audioChannel = 0;
addBufferToScope(
_scopeOutputLeft,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
audioChannel = 1;
addBufferToScope(
_scopeOutputRight,
_scopeOutputOffset,
samples, audioChannel, numAudioChannels);
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
_scopeOutputOffset %= _samplesPerScope;
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
}
}
delete[] ringBufferSamples;
}
}
}
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
// zero out the locally injected audio in preparation for audio procedural sounds

View file

@ -44,6 +44,14 @@
#include <StdDev.h>
#include "MixedProcessedAudioStream.h"
#include "AudioEffectOptions.h"
#include <AudioRingBuffer.h>
#include <StdDev.h>
extern "C" {
#include <gverb.h>
#include <gverbdsp.h>
}
static const int NUM_AUDIO_CHANNELS = 2;
@ -160,6 +168,8 @@ public slots:
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
void setReverb(bool reverb) { _reverb = reverb; }
void setReverbOptions(const AudioEffectOptions* options);
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
@ -231,6 +241,9 @@ private:
int _proceduralEffectSample;
bool _muted;
bool _localEcho;
bool _reverb;
AudioEffectOptions _reverbOptions;
ty_gverb *_gverb;
GLuint _micTextureId;
GLuint _muteTextureId;
GLuint _boxTextureId;
@ -250,6 +263,10 @@ private:
// 2. Mix with the audio input
void processProceduralAudio(int16_t* monoInput, int numSamples);
// Adds Reverb
void initGverb();
void addReverb(int16_t* samples, int numSamples, QAudioFormat& format);
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
void addProceduralSounds(int16_t* monoInput, int numSamples);

View file

@ -70,3 +70,11 @@ float AudioDeviceScriptingInterface::getInputVolume() {
void AudioDeviceScriptingInterface::setInputVolume(float volume) {
Application::getInstance()->getAudio()->setInputVolume(volume);
}
void AudioDeviceScriptingInterface::setReverb(bool reverb) {
Application::getInstance()->getAudio()->setReverb(reverb);
}
void AudioDeviceScriptingInterface::setReverbOptions(const AudioEffectOptions* options) {
Application::getInstance()->getAudio()->setReverbOptions(options);
}

View file

@ -39,6 +39,8 @@ public slots:
float getInputVolume();
void setInputVolume(float volume);
void setReverb(bool reverb);
void setReverbOptions(const AudioEffectOptions* options);
};
#endif // hifi_AudioDeviceScriptingInterface_h

View file

@ -0,0 +1,12 @@
//
// AudioEffectOptions.cpp
// hifi
//
#include "AudioEffectOptions.h"
AudioEffectOptions::AudioEffectOptions() { }
QScriptValue AudioEffectOptions::constructor(QScriptContext* context, QScriptEngine* engine) {
return engine->newQObject(new AudioEffectOptions());
}

View file

@ -0,0 +1,99 @@
//
// AudioEffectOptions.h
// hifi
//
#ifndef __hifi__AudioEffectOptions__
#define __hifi__AudioEffectOptions__
#include <QObject>
#include <QtScript/QScriptContext>
#include <QtScript/QScriptEngine>
class AudioEffectOptions : public QObject {
Q_OBJECT
// Meters Square
Q_PROPERTY(float maxRoomSize READ getMaxRoomSize WRITE setMaxRoomSize)
Q_PROPERTY(float roomSize READ getRoomSize WRITE setRoomSize)
// Seconds
Q_PROPERTY(float reverbTime READ getReverbTime WRITE setReverbTime)
// Ratio between 0 and 1
Q_PROPERTY(float damping READ getDamping WRITE setDamping)
// (?) Does not appear to be set externally very often
Q_PROPERTY(float spread READ getSpread WRITE setSpread)
// Ratio between 0 and 1
Q_PROPERTY(float inputBandwidth READ getInputBandwidth WRITE setInputBandwidth)
// in dB
Q_PROPERTY(float earlyLevel READ getEarlyLevel WRITE setEarlyLevel)
Q_PROPERTY(float tailLevel READ getTailLevel WRITE setTailLevel)
Q_PROPERTY(float dryLevel READ getDryLevel WRITE setDryLevel)
Q_PROPERTY(float wetLevel READ getWetLevel WRITE setWetLevel)
public:
AudioEffectOptions();
static QScriptValue constructor(QScriptContext* context, QScriptEngine* engine);
float getRoomSize() const { return _roomSize; }
void setRoomSize(float roomSize ) { _roomSize = roomSize; }
float getMaxRoomSize() const { return _maxRoomSize; }
void setMaxRoomSize(float maxRoomSize ) { _maxRoomSize = maxRoomSize; }
float getReverbTime() const { return _reverbTime; }
void setReverbTime(float reverbTime ) { _reverbTime = reverbTime; }
float getDamping() const { return _damping; }
void setDamping(float damping ) { _damping = damping; }
float getSpread() const { return _spread; }
void setSpread(float spread ) { _spread = spread; }
float getInputBandwidth() const { return _inputBandwidth; }
void setInputBandwidth(float inputBandwidth ) { _inputBandwidth = inputBandwidth; }
float getEarlyLevel() const { return _earlyLevel; }
void setEarlyLevel(float earlyLevel ) { _earlyLevel = earlyLevel; }
float getTailLevel() const { return _tailLevel; }
void setTailLevel(float tailLevel ) { _tailLevel = tailLevel; }
float getDryLevel() const { return _dryLevel; }
void setDryLevel(float dryLevel) { _dryLevel = dryLevel; }
float getWetLevel() const { return _wetLevel; }
void setWetLevel(float wetLevel) { _wetLevel = wetLevel; }
private:
// http://wiki.audacityteam.org/wiki/GVerb#Instant_Reverberb_settings
// Meters Square
float _maxRoomSize = 50.0f;
float _roomSize = 50.0f;
// Seconds
float _reverbTime = 4.0f;
// Ratio between 0 and 1
float _damping = 0.5f;
// ? (Does not appear to be set externally very often)
float _spread = 15.0f;
// Ratio between 0 and 1
float _inputBandwidth = 0.75f;
// dB
float _earlyLevel = -22.0f;
float _tailLevel = -28.0f;
float _dryLevel = 0.0f;
float _wetLevel = 6.0f;
};
#endif /* defined(__hifi__AudioEffectOptions__) */

View file

@ -41,6 +41,7 @@
#include "ScriptEngine.h"
#include "TypedArrays.h"
#include "XMLHttpRequestClass.h"
#include "AudioEffectOptions.h"
VoxelsScriptingInterface ScriptEngine::_voxelsScriptingInterface;
ParticlesScriptingInterface ScriptEngine::_particlesScriptingInterface;
@ -276,6 +277,9 @@ void ScriptEngine::init() {
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
globalObject().setProperty("LocalVoxels", localVoxelsValue);
QScriptValue audioEffectOptionsConstructorValue = _engine.newFunction(AudioEffectOptions::constructor);
_engine.globalObject().setProperty("AudioEffectOptions", audioEffectOptionsConstructorValue);
qScriptRegisterMetaType(this, injectorToScriptValue, injectorFromScriptValue);
qScriptRegisterMetaType(this, inputControllerToScriptValue, inputControllerFromScriptValue);