mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 22:51:20 +02:00
Merge remote-tracking branch 'hifi/master'
This commit is contained in:
commit
9495c2c87a
34 changed files with 804 additions and 67 deletions
|
@ -428,8 +428,8 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioMixer::prepareMixForListeningNode(Node* node) {
|
int AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
|
||||||
AudioMixerClientData* listenerNodeData = (AudioMixerClientData*)node->getLinkedData();
|
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
|
|
||||||
// zero out the client mix for this node
|
// zero out the client mix for this node
|
||||||
memset(_preMixSamples, 0, sizeof(_preMixSamples));
|
memset(_preMixSamples, 0, sizeof(_preMixSamples));
|
||||||
|
@ -730,6 +730,33 @@ void AudioMixer::run() {
|
||||||
memcpy(dataAt, &sequence, sizeof(quint16));
|
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||||
dataAt += sizeof(quint16);
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
|
// Pack stream properties
|
||||||
|
bool inAZone = false;
|
||||||
|
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
|
||||||
|
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
|
glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
|
||||||
|
if (_audioZones[_zoneReverbSettings[i].zone].contains(streamPosition)) {
|
||||||
|
bool hasReverb = true;
|
||||||
|
float reverbTime = _zoneReverbSettings[i].reverbTime;
|
||||||
|
float wetLevel = _zoneReverbSettings[i].wetLevel;
|
||||||
|
|
||||||
|
memcpy(dataAt, &hasReverb, sizeof(bool));
|
||||||
|
dataAt += sizeof(bool);
|
||||||
|
memcpy(dataAt, &reverbTime, sizeof(float));
|
||||||
|
dataAt += sizeof(float);
|
||||||
|
memcpy(dataAt, &wetLevel, sizeof(float));
|
||||||
|
dataAt += sizeof(float);
|
||||||
|
|
||||||
|
inAZone = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!inAZone) {
|
||||||
|
bool hasReverb = false;
|
||||||
|
memcpy(dataAt, &hasReverb, sizeof(bool));
|
||||||
|
dataAt += sizeof(bool);
|
||||||
|
}
|
||||||
|
|
||||||
// pack mixed audio samples
|
// pack mixed audio samples
|
||||||
memcpy(dataAt, _mixSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
memcpy(dataAt, _mixSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||||
|
@ -1033,6 +1060,38 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const QString REVERB = "reverb";
|
||||||
|
if (audioEnvGroupObject[REVERB].isArray()) {
|
||||||
|
const QJsonArray& reverb = audioEnvGroupObject[REVERB].toArray();
|
||||||
|
|
||||||
|
const QString ZONE = "zone";
|
||||||
|
const QString REVERB_TIME = "reverb_time";
|
||||||
|
const QString WET_LEVEL = "wet_level";
|
||||||
|
for (int i = 0; i < reverb.count(); ++i) {
|
||||||
|
QJsonObject reverbObject = reverb[i].toObject();
|
||||||
|
|
||||||
|
if (reverbObject.contains(ZONE) &&
|
||||||
|
reverbObject.contains(REVERB_TIME) &&
|
||||||
|
reverbObject.contains(WET_LEVEL)) {
|
||||||
|
|
||||||
|
bool okReverbTime, okWetLevel;
|
||||||
|
QString zone = reverbObject.value(ZONE).toString();
|
||||||
|
float reverbTime = reverbObject.value(REVERB_TIME).toString().toFloat(&okReverbTime);
|
||||||
|
float wetLevel = reverbObject.value(WET_LEVEL).toString().toFloat(&okWetLevel);
|
||||||
|
|
||||||
|
if (okReverbTime && okWetLevel && _audioZones.contains(zone)) {
|
||||||
|
ReverbSettings settings;
|
||||||
|
settings.zone = zone;
|
||||||
|
settings.reverbTime = reverbTime;
|
||||||
|
settings.wetLevel = wetLevel;
|
||||||
|
|
||||||
|
_zoneReverbSettings.push_back(settings);
|
||||||
|
qDebug() << "Added Reverb:" << zone << reverbTime << wetLevel;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -82,6 +82,12 @@ private:
|
||||||
float coefficient;
|
float coefficient;
|
||||||
};
|
};
|
||||||
QVector<ZonesSettings> _zonesSettings;
|
QVector<ZonesSettings> _zonesSettings;
|
||||||
|
struct ReverbSettings {
|
||||||
|
QString zone;
|
||||||
|
float reverbTime;
|
||||||
|
float wetLevel;
|
||||||
|
};
|
||||||
|
QVector<ReverbSettings> _zoneReverbSettings;
|
||||||
|
|
||||||
static InboundAudioStream::Settings _streamSettings;
|
static InboundAudioStream::Settings _streamSettings;
|
||||||
|
|
||||||
|
|
|
@ -237,8 +237,7 @@ void MetavoxelSession::update() {
|
||||||
|
|
||||||
// go back to the beginning with the current packet and note that there's a delta pending
|
// go back to the beginning with the current packet and note that there's a delta pending
|
||||||
_sequencer.getOutputStream().getUnderlying().device()->seek(start);
|
_sequencer.getOutputStream().getUnderlying().device()->seek(start);
|
||||||
MetavoxelDeltaPendingMessage msg = { ++_reliableDeltaID, sendRecord->getPacketNumber(),
|
MetavoxelDeltaPendingMessage msg = { ++_reliableDeltaID, sendRecord->getPacketNumber(), _lodPacketNumber };
|
||||||
_sequencer.getIncomingPacketNumber() };
|
|
||||||
out << (_reliableDeltaMessage = QVariant::fromValue(msg));
|
out << (_reliableDeltaMessage = QVariant::fromValue(msg));
|
||||||
_sequencer.endPacket();
|
_sequencer.endPacket();
|
||||||
|
|
||||||
|
@ -265,6 +264,7 @@ void MetavoxelSession::handleMessage(const QVariant& message) {
|
||||||
if (userType == ClientStateMessage::Type) {
|
if (userType == ClientStateMessage::Type) {
|
||||||
ClientStateMessage state = message.value<ClientStateMessage>();
|
ClientStateMessage state = message.value<ClientStateMessage>();
|
||||||
_lod = state.lod;
|
_lod = state.lod;
|
||||||
|
_lodPacketNumber = _sequencer.getIncomingPacketNumber();
|
||||||
|
|
||||||
} else if (userType == MetavoxelEditMessage::Type) {
|
} else if (userType == MetavoxelEditMessage::Type) {
|
||||||
QMetaObject::invokeMethod(_sender->getServer(), "applyEdit", Q_ARG(const MetavoxelEditMessage&,
|
QMetaObject::invokeMethod(_sender->getServer(), "applyEdit", Q_ARG(const MetavoxelEditMessage&,
|
||||||
|
|
|
@ -127,6 +127,7 @@ private:
|
||||||
MetavoxelSender* _sender;
|
MetavoxelSender* _sender;
|
||||||
|
|
||||||
MetavoxelLOD _lod;
|
MetavoxelLOD _lod;
|
||||||
|
int _lodPacketNumber;
|
||||||
|
|
||||||
ReliableChannel* _reliableDeltaChannel;
|
ReliableChannel* _reliableDeltaChannel;
|
||||||
int _reliableDeltaReceivedOffset;
|
int _reliableDeltaReceivedOffset;
|
||||||
|
|
39
cmake/modules/FindGverb.cmake
Normal file
39
cmake/modules/FindGverb.cmake
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
# FindGVerb.cmake
|
||||||
|
#
|
||||||
|
# Try to find the Gverb library.
|
||||||
|
#
|
||||||
|
# You must provide a GVERB_ROOT_DIR which contains src and include directories
|
||||||
|
#
|
||||||
|
# Once done this will define
|
||||||
|
#
|
||||||
|
# GVERB_FOUND - system found Gverb
|
||||||
|
# GVERB_INCLUDE_DIRS - the Gverb include directory
|
||||||
|
#
|
||||||
|
# Copyright 2014 High Fidelity, Inc.
|
||||||
|
#
|
||||||
|
# Distributed under the Apache License, Version 2.0.
|
||||||
|
# See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
#
|
||||||
|
|
||||||
|
if (GVERB_INCLUDE_DIRS)
|
||||||
|
# in cache already
|
||||||
|
set(GVERB_FOUND TRUE)
|
||||||
|
else (GVERB_INCLUDE_DIRS)
|
||||||
|
|
||||||
|
include("${MACRO_DIR}/HifiLibrarySearchHints.cmake")
|
||||||
|
hifi_library_search_hints("gverb")
|
||||||
|
|
||||||
|
find_path(GVERB_INCLUDE_DIRS gverb.h PATH_SUFFIXES include HINTS ${GVERB_SEARCH_DIRS})
|
||||||
|
find_path(GVERB_SRC_DIRS gverb.c PATH_SUFFIXES src HINTS ${GVERB_SEARCH_DIRS})
|
||||||
|
|
||||||
|
if (GVERB_INCLUDE_DIRS)
|
||||||
|
set(GVERB_FOUND TRUE)
|
||||||
|
endif (GVERB_INCLUDE_DIRS)
|
||||||
|
|
||||||
|
if (GVERB_FOUND)
|
||||||
|
message(STATUS "Found Gverb: ${GVERB_INCLUDE_DIRS}")
|
||||||
|
else (GVERB_FOUND)
|
||||||
|
message(FATAL_ERROR "Could NOT find Gverb. Read ./interface/externals/gverb/readme.txt")
|
||||||
|
endif (GVERB_FOUND)
|
||||||
|
|
||||||
|
endif(GVERB_INCLUDE_DIRS)
|
|
@ -154,6 +154,33 @@
|
||||||
"placeholder": "0.18"
|
"placeholder": "0.18"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "reverb",
|
||||||
|
"type": "table",
|
||||||
|
"label": "Reverb Settings",
|
||||||
|
"help": "In this table you can set custom reverb values for each audio zones",
|
||||||
|
"numbered": true,
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "zone",
|
||||||
|
"label": "Zone",
|
||||||
|
"can_set": true,
|
||||||
|
"placeholder": "Audio_Zone"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "reverb_time",
|
||||||
|
"label": "Reverb Decay Time",
|
||||||
|
"can_set": true,
|
||||||
|
"placeholder": "(in sec)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "wet_level",
|
||||||
|
"label": "Wet Level",
|
||||||
|
"can_set": true,
|
||||||
|
"placeholder": "(in db)"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
39
examples/audioReverbOn.js
Normal file
39
examples/audioReverbOn.js
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
//
|
||||||
|
// audioReverbOn.js
|
||||||
|
// examples
|
||||||
|
//
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
|
||||||
|
// http://wiki.audacityteam.org/wiki/GVerb#Instant_reverb_settings
|
||||||
|
var audioOptions = new AudioEffectOptions({
|
||||||
|
// Square Meters
|
||||||
|
maxRoomSize: 50,
|
||||||
|
roomSize: 50,
|
||||||
|
|
||||||
|
// Seconds
|
||||||
|
reverbTime: 4,
|
||||||
|
|
||||||
|
// Between 0 - 1
|
||||||
|
damping: 0.50,
|
||||||
|
inputBandwidth: 0.75,
|
||||||
|
|
||||||
|
// dB
|
||||||
|
earlyLevel: -22,
|
||||||
|
tailLevel: -28,
|
||||||
|
dryLevel: 0,
|
||||||
|
wetLevel: 6
|
||||||
|
});
|
||||||
|
|
||||||
|
AudioDevice.setReverbOptions(audioOptions);
|
||||||
|
AudioDevice.setReverb(true);
|
||||||
|
print("Reverb is now on with the updated options.");
|
||||||
|
|
||||||
|
function scriptEnding() {
|
||||||
|
AudioDevice.setReverb(false);
|
||||||
|
print("Reberb is now off.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Script.scriptEnding.connect(scriptEnding);
|
|
@ -78,6 +78,8 @@ SelectionManager = (function() {
|
||||||
that.worldDimensions = null;
|
that.worldDimensions = null;
|
||||||
that.worldPosition = null;
|
that.worldPosition = null;
|
||||||
} else if (that.selections.length == 1) {
|
} else if (that.selections.length == 1) {
|
||||||
|
SelectionDisplay.setSpaceMode(SPACE_LOCAL);
|
||||||
|
|
||||||
var properties = Entities.getEntityProperties(that.selections[0]);
|
var properties = Entities.getEntityProperties(that.selections[0]);
|
||||||
that.localDimensions = properties.dimensions;
|
that.localDimensions = properties.dimensions;
|
||||||
that.localPosition = properties.position;
|
that.localPosition = properties.position;
|
||||||
|
@ -622,8 +624,8 @@ SelectionDisplay = (function () {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var diagonal = (Vec3.length(properties.dimensions) / 2) * 1.1;
|
var diagonal = (Vec3.length(selectionManager.worldDimensions) / 2) * 1.1;
|
||||||
var halfDimensions = Vec3.multiply(properties.dimensions, 0.5);
|
var halfDimensions = Vec3.multiply(selectionManager.worldDimensions, 0.5);
|
||||||
innerRadius = diagonal;
|
innerRadius = diagonal;
|
||||||
outerRadius = diagonal * 1.15;
|
outerRadius = diagonal * 1.15;
|
||||||
var innerActive = false;
|
var innerActive = false;
|
||||||
|
@ -843,7 +845,7 @@ SelectionDisplay = (function () {
|
||||||
|
|
||||||
Overlays.editOverlay(grabberMoveUp, { visible: translateHandlesVisible, position: { x: boundsCenter.x, y: top + grabberMoveUpOffset, z: boundsCenter.z } });
|
Overlays.editOverlay(grabberMoveUp, { visible: translateHandlesVisible, position: { x: boundsCenter.x, y: top + grabberMoveUpOffset, z: boundsCenter.z } });
|
||||||
|
|
||||||
that.updateHandles(entityID);
|
that.updateHandles();
|
||||||
|
|
||||||
|
|
||||||
Overlays.editOverlay(baseOfEntityProjectionOverlay,
|
Overlays.editOverlay(baseOfEntityProjectionOverlay,
|
||||||
|
@ -924,18 +926,17 @@ SelectionDisplay = (function () {
|
||||||
entitySelected = false;
|
entitySelected = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
that.updateHandles = function(entityID) {
|
that.updateHandles = function() {
|
||||||
if (!entitySelected) {
|
// print("Updating handles");
|
||||||
|
if (SelectionManager.selections.length == 0) {
|
||||||
that.setOverlaysVisible(false);
|
that.setOverlaysVisible(false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var properties = Entities.getEntityProperties(entityID);
|
|
||||||
|
|
||||||
var rotation, dimensions, position;
|
var rotation, dimensions, position;
|
||||||
|
|
||||||
if (spaceMode == SPACE_LOCAL) {
|
if (spaceMode == SPACE_LOCAL) {
|
||||||
rotation = properties.rotation;
|
rotation = SelectionManager.localRotation;
|
||||||
dimensions = SelectionManager.localDimensions;
|
dimensions = SelectionManager.localDimensions;
|
||||||
position = SelectionManager.localPosition;
|
position = SelectionManager.localPosition;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1095,6 +1096,44 @@ SelectionDisplay = (function () {
|
||||||
entitySelected = false;
|
entitySelected = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
function applyEntityProperties(data) {
|
||||||
|
for (var i = 0; i < data.length; i++) {
|
||||||
|
var entityID = data[i].entityID;
|
||||||
|
var properties = data[i].properties;
|
||||||
|
Entities.editEntity(entityID, properties);
|
||||||
|
}
|
||||||
|
selectionManager._update();
|
||||||
|
};
|
||||||
|
|
||||||
|
// For currently selected entities, push a command to the UndoStack that uses the current entity properties for the
|
||||||
|
// redo command, and the saved properties for the undo command.
|
||||||
|
function pushCommandForSelections() {
|
||||||
|
var undoData = [];
|
||||||
|
var redoData = [];
|
||||||
|
for (var i = 0; i < SelectionManager.selections.length; i++) {
|
||||||
|
var entityID = SelectionManager.selections[i];
|
||||||
|
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
||||||
|
var currentProperties = Entities.getEntityProperties(entityID);
|
||||||
|
undoData.push({
|
||||||
|
entityID: entityID,
|
||||||
|
properties: {
|
||||||
|
position: initialProperties.position,
|
||||||
|
rotation: initialProperties.rotation,
|
||||||
|
dimensions: initialProperties.dimensions,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
redoData.push({
|
||||||
|
entityID: entityID,
|
||||||
|
properties: {
|
||||||
|
position: currentProperties.position,
|
||||||
|
rotation: currentProperties.rotation,
|
||||||
|
dimensions: currentProperties.dimensions,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
UndoStack.pushCommand(applyEntityProperties, undoData, applyEntityProperties, redoData);
|
||||||
|
}
|
||||||
|
|
||||||
var lastXZPick = null;
|
var lastXZPick = null;
|
||||||
var translateXZTool = {
|
var translateXZTool = {
|
||||||
mode: 'TRANSLATE_XZ',
|
mode: 'TRANSLATE_XZ',
|
||||||
|
@ -1114,6 +1153,8 @@ SelectionDisplay = (function () {
|
||||||
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
||||||
Entities.editEntity(entityID, initialProperties);
|
Entities.editEntity(entityID, initialProperties);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
pushCommandForSelections();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onMove: function(event) {
|
onMove: function(event) {
|
||||||
|
@ -1172,6 +1213,8 @@ SelectionDisplay = (function () {
|
||||||
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
||||||
Entities.editEntity(entityID, initialProperties);
|
Entities.editEntity(entityID, initialProperties);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
pushCommandForSelections();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onMove: function(event) {
|
onMove: function(event) {
|
||||||
|
@ -1334,6 +1377,8 @@ SelectionDisplay = (function () {
|
||||||
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
||||||
Entities.editEntity(entityID, initialProperties);
|
Entities.editEntity(entityID, initialProperties);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
pushCommandForSelections();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1496,6 +1541,8 @@ SelectionDisplay = (function () {
|
||||||
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
||||||
Entities.editEntity(entityID, initialProperties);
|
Entities.editEntity(entityID, initialProperties);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
pushCommandForSelections();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onMove: function(event) {
|
onMove: function(event) {
|
||||||
|
@ -1602,6 +1649,8 @@ SelectionDisplay = (function () {
|
||||||
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
||||||
Entities.editEntity(entityID, initialProperties);
|
Entities.editEntity(entityID, initialProperties);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
pushCommandForSelections();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onMove: function(event) {
|
onMove: function(event) {
|
||||||
|
@ -1706,6 +1755,8 @@ SelectionDisplay = (function () {
|
||||||
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
var initialProperties = SelectionManager.savedProperties[entityID.id];
|
||||||
Entities.editEntity(entityID, initialProperties);
|
Entities.editEntity(entityID, initialProperties);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
pushCommandForSelections();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onMove: function(event) {
|
onMove: function(event) {
|
||||||
|
|
|
@ -35,7 +35,7 @@ var entityPropertyDialogBox = EntityPropertyDialogBox;
|
||||||
Script.include("libraries/entityCameraTool.js");
|
Script.include("libraries/entityCameraTool.js");
|
||||||
var entityCameraTool = new EntityCameraTool();
|
var entityCameraTool = new EntityCameraTool();
|
||||||
|
|
||||||
selectionManager.setEventListener(selectionDisplay.updateHandles());
|
selectionManager.setEventListener(selectionDisplay.updateHandles);
|
||||||
|
|
||||||
var windowDimensions = Controller.getViewportDimensions();
|
var windowDimensions = Controller.getViewportDimensions();
|
||||||
var toolIconUrl = HIFI_PUBLIC_BUCKET + "images/tools/";
|
var toolIconUrl = HIFI_PUBLIC_BUCKET + "images/tools/";
|
||||||
|
@ -608,9 +608,12 @@ function handeMenuEvent(menuItem) {
|
||||||
} else if (menuItem == "Delete") {
|
} else if (menuItem == "Delete") {
|
||||||
if (entitySelected) {
|
if (entitySelected) {
|
||||||
print(" Delete Entity.... selectedEntityID="+ selectedEntityID);
|
print(" Delete Entity.... selectedEntityID="+ selectedEntityID);
|
||||||
Entities.deleteEntity(selectedEntityID);
|
for (var i = 0; i < selectionManager.selections.length; i++) {
|
||||||
|
Entities.deleteEntity(selectionManager.selections[i]);
|
||||||
|
}
|
||||||
selectionDisplay.unselect(selectedEntityID);
|
selectionDisplay.unselect(selectedEntityID);
|
||||||
entitySelected = false;
|
entitySelected = false;
|
||||||
|
selectionManager.clearSelections();
|
||||||
} else {
|
} else {
|
||||||
print(" Delete Entity.... not holding...");
|
print(" Delete Entity.... not holding...");
|
||||||
}
|
}
|
||||||
|
@ -618,7 +621,7 @@ function handeMenuEvent(menuItem) {
|
||||||
// good place to put the properties dialog
|
// good place to put the properties dialog
|
||||||
|
|
||||||
editModelID = -1;
|
editModelID = -1;
|
||||||
if (entitySelected) {
|
if (selectionManager.selections.length == 1) {
|
||||||
print(" Edit Properties.... selectedEntityID="+ selectedEntityID);
|
print(" Edit Properties.... selectedEntityID="+ selectedEntityID);
|
||||||
editModelID = selectedEntityID;
|
editModelID = selectedEntityID;
|
||||||
} else {
|
} else {
|
||||||
|
@ -653,7 +656,7 @@ Controller.keyReleaseEvent.connect(function (event) {
|
||||||
if (event.text == "`") {
|
if (event.text == "`") {
|
||||||
handeMenuEvent("Edit Properties...");
|
handeMenuEvent("Edit Properties...");
|
||||||
}
|
}
|
||||||
if (event.text == "BACKSPACE") {
|
if (event.text == "BACKSPACE" || event.text == "DELETE") {
|
||||||
handeMenuEvent("Delete");
|
handeMenuEvent("Delete");
|
||||||
} else if (event.text == "TAB") {
|
} else if (event.text == "TAB") {
|
||||||
selectionDisplay.toggleSpaceMode();
|
selectionDisplay.toggleSpaceMode();
|
||||||
|
|
|
@ -2,7 +2,7 @@ set(TARGET_NAME interface)
|
||||||
project(${TARGET_NAME})
|
project(${TARGET_NAME})
|
||||||
|
|
||||||
# set a default root dir for each of our optional externals if it was not passed
|
# set a default root dir for each of our optional externals if it was not passed
|
||||||
set(OPTIONAL_EXTERNALS "Faceshift" "LibOVR" "PrioVR" "Sixense" "Visage" "LeapMotion" "RtMidi" "Qxmpp" "SDL2")
|
set(OPTIONAL_EXTERNALS "Faceshift" "LibOVR" "PrioVR" "Sixense" "Visage" "LeapMotion" "RtMidi" "Qxmpp" "SDL2" "Gverb")
|
||||||
foreach(EXTERNAL ${OPTIONAL_EXTERNALS})
|
foreach(EXTERNAL ${OPTIONAL_EXTERNALS})
|
||||||
string(TOUPPER ${EXTERNAL} ${EXTERNAL}_UPPERCASE)
|
string(TOUPPER ${EXTERNAL} ${EXTERNAL}_UPPERCASE)
|
||||||
if (NOT ${${EXTERNAL}_UPPERCASE}_ROOT_DIR)
|
if (NOT ${${EXTERNAL}_UPPERCASE}_ROOT_DIR)
|
||||||
|
@ -14,6 +14,10 @@ endforeach()
|
||||||
find_package(Qt5LinguistTools REQUIRED)
|
find_package(Qt5LinguistTools REQUIRED)
|
||||||
find_package(Qt5LinguistToolsMacros)
|
find_package(Qt5LinguistToolsMacros)
|
||||||
|
|
||||||
|
|
||||||
|
# As Gverb is currently the only reverb library, it's required.
|
||||||
|
find_package(Gverb REQUIRED)
|
||||||
|
|
||||||
if (DEFINED ENV{JOB_ID})
|
if (DEFINED ENV{JOB_ID})
|
||||||
set(BUILD_SEQ $ENV{JOB_ID})
|
set(BUILD_SEQ $ENV{JOB_ID})
|
||||||
else ()
|
else ()
|
||||||
|
@ -166,6 +170,13 @@ if (QXMPP_FOUND AND NOT DISABLE_QXMPP AND WIN32)
|
||||||
add_definitions(-DQXMPP_STATIC)
|
add_definitions(-DQXMPP_STATIC)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if (GVERB_FOUND)
|
||||||
|
file(GLOB GVERB_SRCS ${GVERB_SRC_DIRS}/*.c)
|
||||||
|
include_directories(${GVERB_INCLUDE_DIRS})
|
||||||
|
add_library(gverb STATIC ${GVERB_SRCS})
|
||||||
|
target_link_libraries(${TARGET_NAME} gverb)
|
||||||
|
endif (GVERB_FOUND)
|
||||||
|
|
||||||
# include headers for interface and InterfaceConfig.
|
# include headers for interface and InterfaceConfig.
|
||||||
include_directories("${PROJECT_SOURCE_DIR}/src" "${PROJECT_BINARY_DIR}/includes")
|
include_directories("${PROJECT_SOURCE_DIR}/src" "${PROJECT_BINARY_DIR}/includes")
|
||||||
|
|
||||||
|
|
15
interface/external/gverb/readme.txt
vendored
Normal file
15
interface/external/gverb/readme.txt
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
|
||||||
|
Instructions for adding the Gverb library to Interface
|
||||||
|
(This is a required library)
|
||||||
|
Clément Brisset, Octobre 22nd, 2014
|
||||||
|
|
||||||
|
1. Go to https://github.com/highfidelity/gverb
|
||||||
|
Or download the sources directly via this link:
|
||||||
|
https://github.com/highfidelity/gverb/archive/master.zip
|
||||||
|
|
||||||
|
2. Extract the archive
|
||||||
|
|
||||||
|
3. Place the directories “include” and “src” in interface/external/gverb
|
||||||
|
(Normally next to this readme)
|
||||||
|
|
||||||
|
4. Clear your build directory, run cmake, build and you should be all set.
|
|
@ -172,6 +172,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
||||||
_nodeBoundsDisplay(this),
|
_nodeBoundsDisplay(this),
|
||||||
_previousScriptLocation(),
|
_previousScriptLocation(),
|
||||||
_applicationOverlay(),
|
_applicationOverlay(),
|
||||||
|
_undoStack(),
|
||||||
|
_undoStackScriptingInterface(&_undoStack),
|
||||||
_runningScriptsWidget(NULL),
|
_runningScriptsWidget(NULL),
|
||||||
_runningScriptsWidgetWasVisible(false),
|
_runningScriptsWidgetWasVisible(false),
|
||||||
_trayIcon(new QSystemTrayIcon(_window)),
|
_trayIcon(new QSystemTrayIcon(_window)),
|
||||||
|
@ -3811,6 +3813,8 @@ ScriptEngine* Application::loadScript(const QString& scriptFilename, bool isUser
|
||||||
scriptEngine->registerGlobalObject("Joysticks", &JoystickScriptingInterface::getInstance());
|
scriptEngine->registerGlobalObject("Joysticks", &JoystickScriptingInterface::getInstance());
|
||||||
qScriptRegisterMetaType(scriptEngine, joystickToScriptValue, joystickFromScriptValue);
|
qScriptRegisterMetaType(scriptEngine, joystickToScriptValue, joystickFromScriptValue);
|
||||||
|
|
||||||
|
scriptEngine->registerGlobalObject("UndoStack", &_undoStackScriptingInterface);
|
||||||
|
|
||||||
#ifdef HAVE_RTMIDI
|
#ifdef HAVE_RTMIDI
|
||||||
scriptEngine->registerGlobalObject("MIDI", &MIDIManager::getInstance());
|
scriptEngine->registerGlobalObject("MIDI", &MIDIManager::getInstance());
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -91,6 +91,9 @@
|
||||||
#include "voxels/VoxelSystem.h"
|
#include "voxels/VoxelSystem.h"
|
||||||
|
|
||||||
|
|
||||||
|
#include "UndoStackScriptingInterface.h"
|
||||||
|
|
||||||
|
|
||||||
class QAction;
|
class QAction;
|
||||||
class QActionGroup;
|
class QActionGroup;
|
||||||
class QGLWidget;
|
class QGLWidget;
|
||||||
|
@ -450,6 +453,7 @@ private:
|
||||||
int _numChangedSettings;
|
int _numChangedSettings;
|
||||||
|
|
||||||
QUndoStack _undoStack;
|
QUndoStack _undoStack;
|
||||||
|
UndoStackScriptingInterface _undoStackScriptingInterface;
|
||||||
|
|
||||||
glm::vec3 _gravity;
|
glm::vec3 _gravity;
|
||||||
|
|
||||||
|
|
|
@ -92,6 +92,8 @@ Audio::Audio(QObject* parent) :
|
||||||
_collisionSoundDuration(0.0f),
|
_collisionSoundDuration(0.0f),
|
||||||
_proceduralEffectSample(0),
|
_proceduralEffectSample(0),
|
||||||
_muted(false),
|
_muted(false),
|
||||||
|
_reverb(false),
|
||||||
|
_reverbOptions(&_scriptReverbOptions),
|
||||||
_processSpatialAudio(false),
|
_processSpatialAudio(false),
|
||||||
_spatialAudioStart(0),
|
_spatialAudioStart(0),
|
||||||
_spatialAudioFinish(0),
|
_spatialAudioFinish(0),
|
||||||
|
@ -128,6 +130,9 @@ Audio::Audio(QObject* parent) :
|
||||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade, this, &Audio::addLastFrameRepeatedWithFadeToScope, Qt::DirectConnection);
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade, this, &Audio::addLastFrameRepeatedWithFadeToScope, Qt::DirectConnection);
|
||||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedStereoSamples, this, &Audio::addStereoSamplesToScope, Qt::DirectConnection);
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::addedStereoSamples, this, &Audio::addStereoSamplesToScope, Qt::DirectConnection);
|
||||||
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedSamples, Qt::DirectConnection);
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedSamples, Qt::DirectConnection);
|
||||||
|
|
||||||
|
// Initialize GVerb
|
||||||
|
initGverb();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::init(QGLWidget *parent) {
|
void Audio::init(QGLWidget *parent) {
|
||||||
|
@ -489,6 +494,69 @@ bool Audio::switchOutputToAudioDevice(const QString& outputDeviceName) {
|
||||||
return switchOutputToAudioDevice(getNamedAudioDeviceForMode(QAudio::AudioOutput, outputDeviceName));
|
return switchOutputToAudioDevice(getNamedAudioDeviceForMode(QAudio::AudioOutput, outputDeviceName));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Audio::initGverb() {
|
||||||
|
// Initialize a new gverb instance
|
||||||
|
_gverb = gverb_new(_outputFormat.sampleRate(), _reverbOptions->getMaxRoomSize(), _reverbOptions->getRoomSize(),
|
||||||
|
_reverbOptions->getReverbTime(), _reverbOptions->getDamping(), _reverbOptions->getSpread(),
|
||||||
|
_reverbOptions->getInputBandwidth(), _reverbOptions->getEarlyLevel(),
|
||||||
|
_reverbOptions->getTailLevel());
|
||||||
|
|
||||||
|
// Configure the instance (these functions are not super well named - they actually set several internal variables)
|
||||||
|
gverb_set_roomsize(_gverb, _reverbOptions->getRoomSize());
|
||||||
|
gverb_set_revtime(_gverb, _reverbOptions->getReverbTime());
|
||||||
|
gverb_set_damping(_gverb, _reverbOptions->getDamping());
|
||||||
|
gverb_set_inputbandwidth(_gverb, _reverbOptions->getInputBandwidth());
|
||||||
|
gverb_set_earlylevel(_gverb, DB_CO(_reverbOptions->getEarlyLevel()));
|
||||||
|
gverb_set_taillevel(_gverb, DB_CO(_reverbOptions->getTailLevel()));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::setReverbOptions(const AudioEffectOptions* options) {
|
||||||
|
// Save the new options
|
||||||
|
_scriptReverbOptions.setMaxRoomSize(options->getMaxRoomSize());
|
||||||
|
_scriptReverbOptions.setRoomSize(options->getRoomSize());
|
||||||
|
_scriptReverbOptions.setReverbTime(options->getReverbTime());
|
||||||
|
_scriptReverbOptions.setDamping(options->getDamping());
|
||||||
|
_scriptReverbOptions.setSpread(options->getSpread());
|
||||||
|
_scriptReverbOptions.setInputBandwidth(options->getInputBandwidth());
|
||||||
|
_scriptReverbOptions.setEarlyLevel(options->getEarlyLevel());
|
||||||
|
_scriptReverbOptions.setTailLevel(options->getTailLevel());
|
||||||
|
|
||||||
|
_scriptReverbOptions.setDryLevel(options->getDryLevel());
|
||||||
|
_scriptReverbOptions.setWetLevel(options->getWetLevel());
|
||||||
|
|
||||||
|
if (_reverbOptions == &_scriptReverbOptions) {
|
||||||
|
// Apply them to the reverb instance(s)
|
||||||
|
initGverb();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Audio::addReverb(int16_t* samplesData, int numSamples, QAudioFormat& audioFormat) {
|
||||||
|
float dryFraction = DB_CO(_reverbOptions->getDryLevel());
|
||||||
|
float wetFraction = DB_CO(_reverbOptions->getWetLevel());
|
||||||
|
|
||||||
|
float lValue,rValue;
|
||||||
|
for (int sample = 0; sample < numSamples; sample += audioFormat.channelCount()) {
|
||||||
|
// Run GVerb
|
||||||
|
float value = (float)samplesData[sample];
|
||||||
|
gverb_do(_gverb, value, &lValue, &rValue);
|
||||||
|
|
||||||
|
// Mix, accounting for clipping, the left and right channels. Ignore the rest.
|
||||||
|
for (unsigned int j = sample; j < sample + audioFormat.channelCount(); j++) {
|
||||||
|
if (j == sample) {
|
||||||
|
// left channel
|
||||||
|
int lResult = glm::clamp((int)(samplesData[j] * dryFraction + lValue * wetFraction), -32768, 32767);
|
||||||
|
samplesData[j] = (int16_t)lResult;
|
||||||
|
} else if (j == (sample + 1)) {
|
||||||
|
// right channel
|
||||||
|
int rResult = glm::clamp((int)(samplesData[j] * dryFraction + rValue * wetFraction), -32768, 32767);
|
||||||
|
samplesData[j] = (int16_t)rResult;
|
||||||
|
} else {
|
||||||
|
// ignore channels above 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Audio::handleAudioInput() {
|
void Audio::handleAudioInput() {
|
||||||
static char audioDataPacket[MAX_PACKET_SIZE];
|
static char audioDataPacket[MAX_PACKET_SIZE];
|
||||||
|
|
||||||
|
@ -720,7 +788,6 @@ void Audio::handleAudioInput() {
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||||
|
|
||||||
|
|
||||||
if (_recorder && _recorder.data()->isRecording()) {
|
if (_recorder && _recorder.data()->isRecording()) {
|
||||||
_recorder.data()->record(reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes);
|
_recorder.data()->record(reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes);
|
||||||
}
|
}
|
||||||
|
@ -840,12 +907,10 @@ void Audio::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||||
|
|
||||||
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
||||||
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
||||||
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||||
|
|
||||||
|
|
||||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||||
|
|
||||||
const int16_t* receivedSamples;
|
const int16_t* receivedSamples;
|
||||||
|
@ -884,10 +949,37 @@ void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& ou
|
||||||
numNetworkOutputSamples,
|
numNetworkOutputSamples,
|
||||||
numDeviceOutputSamples,
|
numDeviceOutputSamples,
|
||||||
_desiredOutputFormat, _outputFormat);
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
|
||||||
|
if(_reverb || _receivedAudioStream.hasReverb()) {
|
||||||
|
bool reverbChanged = false;
|
||||||
|
if (_receivedAudioStream.hasReverb()) {
|
||||||
|
|
||||||
|
if (_zoneReverbOptions.getReverbTime() != _receivedAudioStream.getRevebTime()) {
|
||||||
|
_zoneReverbOptions.setReverbTime(_receivedAudioStream.getRevebTime());
|
||||||
|
reverbChanged = true;
|
||||||
|
}
|
||||||
|
if (_zoneReverbOptions.getWetLevel() != _receivedAudioStream.getWetLevel()) {
|
||||||
|
_zoneReverbOptions.setWetLevel(_receivedAudioStream.getWetLevel());
|
||||||
|
reverbChanged = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_reverbOptions != &_zoneReverbOptions) {
|
||||||
|
_reverbOptions = &_zoneReverbOptions;
|
||||||
|
reverbChanged = true;
|
||||||
|
}
|
||||||
|
} else if (_reverbOptions != &_scriptReverbOptions) {
|
||||||
|
_reverbOptions = &_scriptReverbOptions;
|
||||||
|
reverbChanged = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reverbChanged) {
|
||||||
|
initGverb();
|
||||||
|
}
|
||||||
|
addReverb((int16_t*)outputBuffer.data(), numDeviceOutputSamples, _outputFormat);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||||
|
|
||||||
if (_audioOutput) {
|
if (_audioOutput) {
|
||||||
// Audio output must exist and be correctly set up if we're going to process received audio
|
// Audio output must exist and be correctly set up if we're going to process received audio
|
||||||
_receivedAudioStream.parseData(audioByteArray);
|
_receivedAudioStream.parseData(audioByteArray);
|
||||||
|
|
|
@ -43,6 +43,14 @@
|
||||||
#include <StdDev.h>
|
#include <StdDev.h>
|
||||||
|
|
||||||
#include "MixedProcessedAudioStream.h"
|
#include "MixedProcessedAudioStream.h"
|
||||||
|
#include "AudioEffectOptions.h"
|
||||||
|
#include <AudioRingBuffer.h>
|
||||||
|
#include <StdDev.h>
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include <gverb.h>
|
||||||
|
#include <gverbdsp.h>
|
||||||
|
}
|
||||||
|
|
||||||
static const int NUM_AUDIO_CHANNELS = 2;
|
static const int NUM_AUDIO_CHANNELS = 2;
|
||||||
|
|
||||||
|
@ -159,6 +167,8 @@ public slots:
|
||||||
|
|
||||||
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; }
|
||||||
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); }
|
||||||
|
void setReverb(bool reverb) { _reverb = reverb; }
|
||||||
|
void setReverbOptions(const AudioEffectOptions* options);
|
||||||
|
|
||||||
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
const AudioStreamStats& getAudioMixerAvatarStreamAudioStats() const { return _audioMixerAvatarStreamAudioStats; }
|
||||||
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
const QHash<QUuid, AudioStreamStats>& getAudioMixerInjectedStreamAudioStatsMap() const { return _audioMixerInjectedStreamAudioStatsMap; }
|
||||||
|
@ -230,6 +240,11 @@ private:
|
||||||
int _proceduralEffectSample;
|
int _proceduralEffectSample;
|
||||||
bool _muted;
|
bool _muted;
|
||||||
bool _localEcho;
|
bool _localEcho;
|
||||||
|
bool _reverb;
|
||||||
|
AudioEffectOptions _scriptReverbOptions;
|
||||||
|
AudioEffectOptions _zoneReverbOptions;
|
||||||
|
AudioEffectOptions* _reverbOptions;
|
||||||
|
ty_gverb *_gverb;
|
||||||
GLuint _micTextureId;
|
GLuint _micTextureId;
|
||||||
GLuint _muteTextureId;
|
GLuint _muteTextureId;
|
||||||
GLuint _boxTextureId;
|
GLuint _boxTextureId;
|
||||||
|
@ -249,6 +264,10 @@ private:
|
||||||
// 2. Mix with the audio input
|
// 2. Mix with the audio input
|
||||||
void processProceduralAudio(int16_t* monoInput, int numSamples);
|
void processProceduralAudio(int16_t* monoInput, int numSamples);
|
||||||
|
|
||||||
|
// Adds Reverb
|
||||||
|
void initGverb();
|
||||||
|
void addReverb(int16_t* samples, int numSamples, QAudioFormat& format);
|
||||||
|
|
||||||
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
||||||
void addProceduralSounds(int16_t* monoInput, int numSamples);
|
void addProceduralSounds(int16_t* monoInput, int numSamples);
|
||||||
|
|
||||||
|
|
|
@ -66,8 +66,6 @@ glm::vec3 OculusManager::_calibrationPosition;
|
||||||
glm::quat OculusManager::_calibrationOrientation;
|
glm::quat OculusManager::_calibrationOrientation;
|
||||||
quint64 OculusManager::_calibrationStartTime;
|
quint64 OculusManager::_calibrationStartTime;
|
||||||
int OculusManager::_calibrationMessage = NULL;
|
int OculusManager::_calibrationMessage = NULL;
|
||||||
QString OculusManager::CALIBRATION_BILLBOARD_URL = "http://hifi-public.s3.amazonaws.com/images/hold-to-calibrate.svg";
|
|
||||||
float OculusManager::CALIBRATION_BILLBOARD_SCALE = 2.f;
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -191,7 +189,7 @@ void OculusManager::disconnect() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_LIBOVR
|
#ifdef HAVE_LIBOVR
|
||||||
void OculusManager::positionCalibrationBillboard(BillboardOverlay* billboard) {
|
void OculusManager::positionCalibrationBillboard(Text3DOverlay* billboard) {
|
||||||
glm::quat headOrientation = Application::getInstance()->getAvatar()->getHeadOrientation();
|
glm::quat headOrientation = Application::getInstance()->getAvatar()->getHeadOrientation();
|
||||||
headOrientation.x = 0;
|
headOrientation.x = 0;
|
||||||
headOrientation.z = 0;
|
headOrientation.z = 0;
|
||||||
|
@ -204,8 +202,9 @@ void OculusManager::positionCalibrationBillboard(BillboardOverlay* billboard) {
|
||||||
|
|
||||||
#ifdef HAVE_LIBOVR
|
#ifdef HAVE_LIBOVR
|
||||||
void OculusManager::calibrate(glm::vec3 position, glm::quat orientation) {
|
void OculusManager::calibrate(glm::vec3 position, glm::quat orientation) {
|
||||||
|
static QString instructionMessage = "Hold still to calibrate";
|
||||||
static QString progressMessage;
|
static QString progressMessage;
|
||||||
static BillboardOverlay* billboard;
|
static Text3DOverlay* billboard;
|
||||||
|
|
||||||
switch (_calibrationState) {
|
switch (_calibrationState) {
|
||||||
|
|
||||||
|
@ -235,9 +234,13 @@ void OculusManager::calibrate(glm::vec3 position, glm::quat orientation) {
|
||||||
if (!_calibrationMessage) {
|
if (!_calibrationMessage) {
|
||||||
qDebug() << "Hold still to calibrate HMD";
|
qDebug() << "Hold still to calibrate HMD";
|
||||||
|
|
||||||
billboard = new BillboardOverlay();
|
billboard = new Text3DOverlay();
|
||||||
billboard->setURL(CALIBRATION_BILLBOARD_URL);
|
billboard->setDimensions(glm::vec2(2.0f, 1.25f));
|
||||||
billboard->setScale(CALIBRATION_BILLBOARD_SCALE);
|
billboard->setTopMargin(0.35f);
|
||||||
|
billboard->setLeftMargin(0.28f);
|
||||||
|
billboard->setText(instructionMessage);
|
||||||
|
billboard->setAlpha(0.5f);
|
||||||
|
billboard->setLineHeight(0.1f);
|
||||||
billboard->setIsFacingAvatar(false);
|
billboard->setIsFacingAvatar(false);
|
||||||
positionCalibrationBillboard(billboard);
|
positionCalibrationBillboard(billboard);
|
||||||
|
|
||||||
|
@ -275,7 +278,7 @@ void OculusManager::calibrate(glm::vec3 position, glm::quat orientation) {
|
||||||
} else {
|
} else {
|
||||||
progressMessage += ".";
|
progressMessage += ".";
|
||||||
}
|
}
|
||||||
//qDebug() << progressMessage; // Progress message ready for 3D text overlays.
|
billboard->setText(instructionMessage + "\n\n" + progressMessage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "renderer/ProgramObject.h"
|
#include "renderer/ProgramObject.h"
|
||||||
#include "ui/overlays/BillboardOverlay.h"
|
#include "ui/overlays/Text3DOverlay.h"
|
||||||
|
|
||||||
const float DEFAULT_OCULUS_UI_ANGULAR_SIZE = 72.0f;
|
const float DEFAULT_OCULUS_UI_ANGULAR_SIZE = 72.0f;
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ private:
|
||||||
WAITING_FOR_ZERO_HELD,
|
WAITING_FOR_ZERO_HELD,
|
||||||
CALIBRATED
|
CALIBRATED
|
||||||
};
|
};
|
||||||
static void positionCalibrationBillboard(BillboardOverlay* billboard);
|
static void positionCalibrationBillboard(Text3DOverlay* message);
|
||||||
static float CALIBRATION_DELTA_MINIMUM_LENGTH;
|
static float CALIBRATION_DELTA_MINIMUM_LENGTH;
|
||||||
static float CALIBRATION_DELTA_MINIMUM_ANGLE;
|
static float CALIBRATION_DELTA_MINIMUM_ANGLE;
|
||||||
static float CALIBRATION_ZERO_MAXIMUM_LENGTH;
|
static float CALIBRATION_ZERO_MAXIMUM_LENGTH;
|
||||||
|
@ -123,8 +123,6 @@ private:
|
||||||
static glm::quat _calibrationOrientation;
|
static glm::quat _calibrationOrientation;
|
||||||
static quint64 _calibrationStartTime;
|
static quint64 _calibrationStartTime;
|
||||||
static int _calibrationMessage;
|
static int _calibrationMessage;
|
||||||
static QString CALIBRATION_BILLBOARD_URL;
|
|
||||||
static float CALIBRATION_BILLBOARD_SCALE;
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -70,3 +70,11 @@ float AudioDeviceScriptingInterface::getInputVolume() {
|
||||||
void AudioDeviceScriptingInterface::setInputVolume(float volume) {
|
void AudioDeviceScriptingInterface::setInputVolume(float volume) {
|
||||||
Application::getInstance()->getAudio()->setInputVolume(volume);
|
Application::getInstance()->getAudio()->setInputVolume(volume);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AudioDeviceScriptingInterface::setReverb(bool reverb) {
|
||||||
|
Application::getInstance()->getAudio()->setReverb(reverb);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioDeviceScriptingInterface::setReverbOptions(const AudioEffectOptions* options) {
|
||||||
|
Application::getInstance()->getAudio()->setReverbOptions(options);
|
||||||
|
}
|
||||||
|
|
|
@ -39,6 +39,8 @@ public slots:
|
||||||
|
|
||||||
float getInputVolume();
|
float getInputVolume();
|
||||||
void setInputVolume(float volume);
|
void setInputVolume(float volume);
|
||||||
|
void setReverb(bool reverb);
|
||||||
|
void setReverbOptions(const AudioEffectOptions* options);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioDeviceScriptingInterface_h
|
#endif // hifi_AudioDeviceScriptingInterface_h
|
||||||
|
|
|
@ -32,6 +32,7 @@ public:
|
||||||
float getTopMargin() const { return _topMargin; }
|
float getTopMargin() const { return _topMargin; }
|
||||||
float getRightMargin() const { return _rightMargin; }
|
float getRightMargin() const { return _rightMargin; }
|
||||||
float getBottomMargin() const { return _bottomMargin; }
|
float getBottomMargin() const { return _bottomMargin; }
|
||||||
|
bool getIsFacingAvatar() const { return _isFacingAvatar; }
|
||||||
xColor getBackgroundColor();
|
xColor getBackgroundColor();
|
||||||
|
|
||||||
// setters
|
// setters
|
||||||
|
@ -41,6 +42,7 @@ public:
|
||||||
void setTopMargin(float margin) { _topMargin = margin; }
|
void setTopMargin(float margin) { _topMargin = margin; }
|
||||||
void setRightMargin(float margin) { _rightMargin = margin; }
|
void setRightMargin(float margin) { _rightMargin = margin; }
|
||||||
void setBottomMargin(float margin) { _bottomMargin = margin; }
|
void setBottomMargin(float margin) { _bottomMargin = margin; }
|
||||||
|
void setIsFacingAvatar(bool isFacingAvatar) { _isFacingAvatar = isFacingAvatar; }
|
||||||
|
|
||||||
virtual void setProperties(const QScriptValue& properties);
|
virtual void setProperties(const QScriptValue& properties);
|
||||||
|
|
||||||
|
|
88
libraries/audio/src/AudioEffectOptions.cpp
Normal file
88
libraries/audio/src/AudioEffectOptions.cpp
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
//
|
||||||
|
// AudioEffectOptions.cpp
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "AudioEffectOptions.h"
|
||||||
|
|
||||||
|
static const QString MAX_ROOM_SIZE_HANDLE = "maxRoomSize";
|
||||||
|
static const QString ROOM_SIZE_HANDLE = "roomSize";
|
||||||
|
static const QString REVERB_TIME_HANDLE = "reverbTime";
|
||||||
|
static const QString DAMPIMG_HANDLE = "damping";
|
||||||
|
static const QString SPREAD_HANDLE = "spread";
|
||||||
|
static const QString INPUT_BANDWIDTH_HANDLE = "inputBandwidth";
|
||||||
|
static const QString EARLY_LEVEL_HANDLE = "earlyLevel";
|
||||||
|
static const QString TAIL_LEVEL_HANDLE = "tailLevel";
|
||||||
|
static const QString DRY_LEVEL_HANDLE = "dryLevel";
|
||||||
|
static const QString WET_LEVEL_HANDLE = "wetLevel";
|
||||||
|
|
||||||
|
AudioEffectOptions::AudioEffectOptions(QScriptValue arguments) :
|
||||||
|
_maxRoomSize(50.0f),
|
||||||
|
_roomSize(50.0f),
|
||||||
|
_reverbTime(4.0f),
|
||||||
|
_damping(0.5f),
|
||||||
|
_spread(15.0f),
|
||||||
|
_inputBandwidth(0.75f),
|
||||||
|
_earlyLevel(-22.0f),
|
||||||
|
_tailLevel(-28.0f),
|
||||||
|
_dryLevel(0.0f),
|
||||||
|
_wetLevel(6.0f) {
|
||||||
|
if (arguments.property(MAX_ROOM_SIZE_HANDLE).isNumber()) {
|
||||||
|
_maxRoomSize = arguments.property(MAX_ROOM_SIZE_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(ROOM_SIZE_HANDLE).isNumber()) {
|
||||||
|
_roomSize = arguments.property(ROOM_SIZE_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(REVERB_TIME_HANDLE).isNumber()) {
|
||||||
|
_reverbTime = arguments.property(REVERB_TIME_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(DAMPIMG_HANDLE).isNumber()) {
|
||||||
|
_damping = arguments.property(DAMPIMG_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(SPREAD_HANDLE).isNumber()) {
|
||||||
|
_spread = arguments.property(SPREAD_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(INPUT_BANDWIDTH_HANDLE).isNumber()) {
|
||||||
|
_inputBandwidth = arguments.property(INPUT_BANDWIDTH_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(EARLY_LEVEL_HANDLE).isNumber()) {
|
||||||
|
_earlyLevel = arguments.property(EARLY_LEVEL_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(TAIL_LEVEL_HANDLE).isNumber()) {
|
||||||
|
_tailLevel = arguments.property(TAIL_LEVEL_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(DRY_LEVEL_HANDLE).isNumber()) {
|
||||||
|
_dryLevel = arguments.property(DRY_LEVEL_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
if (arguments.property(WET_LEVEL_HANDLE).isNumber()) {
|
||||||
|
_wetLevel = arguments.property(WET_LEVEL_HANDLE).toNumber();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioEffectOptions::AudioEffectOptions(const AudioEffectOptions &other) {
|
||||||
|
*this = other;
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioEffectOptions& AudioEffectOptions::operator=(const AudioEffectOptions &other) {
|
||||||
|
_maxRoomSize = other._maxRoomSize;
|
||||||
|
_roomSize = other._roomSize;
|
||||||
|
_reverbTime = other._reverbTime;
|
||||||
|
_damping = other._damping;
|
||||||
|
_spread = other._spread;
|
||||||
|
_inputBandwidth = other._inputBandwidth;
|
||||||
|
_earlyLevel = other._earlyLevel;
|
||||||
|
_tailLevel = other._tailLevel;
|
||||||
|
_dryLevel = other._dryLevel;
|
||||||
|
_wetLevel = other._wetLevel;
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
QScriptValue AudioEffectOptions::constructor(QScriptContext* context, QScriptEngine* engine) {
|
||||||
|
return engine->newQObject(new AudioEffectOptions(context->argument(0)));
|
||||||
|
}
|
106
libraries/audio/src/AudioEffectOptions.h
Normal file
106
libraries/audio/src/AudioEffectOptions.h
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
//
|
||||||
|
// AudioEffectOptions.h
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_AudioEffectOptions_h
|
||||||
|
#define hifi_AudioEffectOptions_h
|
||||||
|
|
||||||
|
#include <QObject>
|
||||||
|
#include <QtScript/QScriptContext>
|
||||||
|
#include <QtScript/QScriptEngine>
|
||||||
|
|
||||||
|
class AudioEffectOptions : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
|
|
||||||
|
// Meters Square
|
||||||
|
Q_PROPERTY(float maxRoomSize READ getMaxRoomSize WRITE setMaxRoomSize)
|
||||||
|
Q_PROPERTY(float roomSize READ getRoomSize WRITE setRoomSize)
|
||||||
|
|
||||||
|
// Seconds
|
||||||
|
Q_PROPERTY(float reverbTime READ getReverbTime WRITE setReverbTime)
|
||||||
|
|
||||||
|
// Ratio between 0 and 1
|
||||||
|
Q_PROPERTY(float damping READ getDamping WRITE setDamping)
|
||||||
|
|
||||||
|
// (?) Does not appear to be set externally very often
|
||||||
|
Q_PROPERTY(float spread READ getSpread WRITE setSpread)
|
||||||
|
|
||||||
|
// Ratio between 0 and 1
|
||||||
|
Q_PROPERTY(float inputBandwidth READ getInputBandwidth WRITE setInputBandwidth)
|
||||||
|
|
||||||
|
// in dB
|
||||||
|
Q_PROPERTY(float earlyLevel READ getEarlyLevel WRITE setEarlyLevel)
|
||||||
|
Q_PROPERTY(float tailLevel READ getTailLevel WRITE setTailLevel)
|
||||||
|
Q_PROPERTY(float dryLevel READ getDryLevel WRITE setDryLevel)
|
||||||
|
Q_PROPERTY(float wetLevel READ getWetLevel WRITE setWetLevel)
|
||||||
|
|
||||||
|
public:
|
||||||
|
AudioEffectOptions(QScriptValue arguments = QScriptValue());
|
||||||
|
AudioEffectOptions(const AudioEffectOptions &other);
|
||||||
|
AudioEffectOptions& operator=(const AudioEffectOptions &other);
|
||||||
|
|
||||||
|
static QScriptValue constructor(QScriptContext* context, QScriptEngine* engine);
|
||||||
|
|
||||||
|
float getRoomSize() const { return _roomSize; }
|
||||||
|
void setRoomSize(float roomSize ) { _roomSize = roomSize; }
|
||||||
|
|
||||||
|
float getMaxRoomSize() const { return _maxRoomSize; }
|
||||||
|
void setMaxRoomSize(float maxRoomSize ) { _maxRoomSize = maxRoomSize; }
|
||||||
|
|
||||||
|
float getReverbTime() const { return _reverbTime; }
|
||||||
|
void setReverbTime(float reverbTime ) { _reverbTime = reverbTime; }
|
||||||
|
|
||||||
|
float getDamping() const { return _damping; }
|
||||||
|
void setDamping(float damping ) { _damping = damping; }
|
||||||
|
|
||||||
|
float getSpread() const { return _spread; }
|
||||||
|
void setSpread(float spread ) { _spread = spread; }
|
||||||
|
|
||||||
|
float getInputBandwidth() const { return _inputBandwidth; }
|
||||||
|
void setInputBandwidth(float inputBandwidth ) { _inputBandwidth = inputBandwidth; }
|
||||||
|
|
||||||
|
float getEarlyLevel() const { return _earlyLevel; }
|
||||||
|
void setEarlyLevel(float earlyLevel ) { _earlyLevel = earlyLevel; }
|
||||||
|
|
||||||
|
float getTailLevel() const { return _tailLevel; }
|
||||||
|
void setTailLevel(float tailLevel ) { _tailLevel = tailLevel; }
|
||||||
|
|
||||||
|
float getDryLevel() const { return _dryLevel; }
|
||||||
|
void setDryLevel(float dryLevel) { _dryLevel = dryLevel; }
|
||||||
|
|
||||||
|
float getWetLevel() const { return _wetLevel; }
|
||||||
|
void setWetLevel(float wetLevel) { _wetLevel = wetLevel; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
// http://wiki.audacityteam.org/wiki/GVerb#Instant_Reverberb_settings
|
||||||
|
|
||||||
|
// Meters Square
|
||||||
|
float _maxRoomSize;
|
||||||
|
float _roomSize;
|
||||||
|
|
||||||
|
// Seconds
|
||||||
|
float _reverbTime;
|
||||||
|
|
||||||
|
// Ratio between 0 and 1
|
||||||
|
float _damping;
|
||||||
|
|
||||||
|
// ? (Does not appear to be set externally very often)
|
||||||
|
float _spread;
|
||||||
|
|
||||||
|
// Ratio between 0 and 1
|
||||||
|
float _inputBandwidth;
|
||||||
|
|
||||||
|
// dB
|
||||||
|
float _earlyLevel;
|
||||||
|
float _tailLevel;
|
||||||
|
float _dryLevel;
|
||||||
|
float _wetLevel;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_AudioEffectOptions_h
|
|
@ -44,7 +44,8 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit
|
||||||
_framesAvailableStat(),
|
_framesAvailableStat(),
|
||||||
_currentJitterBufferFrames(0),
|
_currentJitterBufferFrames(0),
|
||||||
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
||||||
_repetitionWithFade(settings._repetitionWithFade)
|
_repetitionWithFade(settings._repetitionWithFade),
|
||||||
|
_hasReverb(false)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,9 +163,22 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
|
int read = 0;
|
||||||
|
if (type == PacketTypeMixedAudio) {
|
||||||
|
memcpy(&_hasReverb, packetAfterSeqNum.data() + read, sizeof(bool));
|
||||||
|
read += sizeof(bool);
|
||||||
|
|
||||||
|
if (_hasReverb) {
|
||||||
|
memcpy(&_reverbTime, packetAfterSeqNum.data() + read, sizeof(float));
|
||||||
|
read += sizeof(float);
|
||||||
|
memcpy(&_wetLevel, packetAfterSeqNum.data() + read, sizeof(float));
|
||||||
|
read += sizeof(float);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// mixed audio packets do not have any info between the seq num and the audio data.
|
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||||
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
numAudioSamples = (packetAfterSeqNum.size() - read) / sizeof(int16_t);
|
||||||
return 0;
|
return read;
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
|
|
@ -155,6 +155,10 @@ public:
|
||||||
|
|
||||||
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
||||||
|
|
||||||
|
bool hasReverb() const { return _hasReverb; }
|
||||||
|
float getRevebTime() const { return _reverbTime; }
|
||||||
|
float getWetLevel() const { return _wetLevel; }
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
|
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
|
||||||
/// is enabled, those stats are used to calculate _desiredJitterBufferFrames.
|
/// is enabled, those stats are used to calculate _desiredJitterBufferFrames.
|
||||||
|
@ -243,6 +247,11 @@ protected:
|
||||||
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
|
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
|
||||||
|
|
||||||
bool _repetitionWithFade;
|
bool _repetitionWithFade;
|
||||||
|
|
||||||
|
// Reverb properties
|
||||||
|
bool _hasReverb;
|
||||||
|
float _reverbTime;
|
||||||
|
float _wetLevel;
|
||||||
};
|
};
|
||||||
|
|
||||||
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);
|
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);
|
||||||
|
|
|
@ -58,27 +58,21 @@ bool AvatarHashMap::containsAvatarWithDisplayName(const QString& displayName) {
|
||||||
}
|
}
|
||||||
|
|
||||||
AvatarData* AvatarHashMap::avatarWithDisplayName(const QString& displayName) {
|
AvatarData* AvatarHashMap::avatarWithDisplayName(const QString& displayName) {
|
||||||
AvatarHash::iterator avatarIterator = _avatarHash.begin();
|
foreach(const AvatarSharedPointer& sharedAvatar, _avatarHash) {
|
||||||
while (avatarIterator != _avatarHash.end()) {
|
if (sharedAvatar->getDisplayName() == displayName) {
|
||||||
AvatarSharedPointer sharedAvatar = avatarIterator.value();
|
|
||||||
if (avatarIterator.value()->getDisplayName() == displayName) {
|
|
||||||
// this is a match
|
// this is a match
|
||||||
// check if this avatar should still be around
|
// check if this avatar should still be around
|
||||||
if (!shouldKillAvatar(sharedAvatar)) {
|
if (!shouldKillAvatar(sharedAvatar)) {
|
||||||
// we have a match, return true
|
// we have a match, return the AvatarData
|
||||||
return sharedAvatar.data();
|
return sharedAvatar.data();
|
||||||
} else {
|
} else {
|
||||||
// we should remove this avatar, do that now
|
// we should remove this avatar, but we might not be on a thread that is allowed
|
||||||
erase(avatarIterator);
|
// so we just return NULL to the caller
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
++avatarIterator;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return false, no match
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -301,10 +301,10 @@ void DatagramSequencer::clearReliableChannel(QObject* object) {
|
||||||
void DatagramSequencer::sendRecordAcknowledged(const SendRecord& record) {
|
void DatagramSequencer::sendRecordAcknowledged(const SendRecord& record) {
|
||||||
// stop acknowledging the recorded packets
|
// stop acknowledging the recorded packets
|
||||||
while (!_receiveRecords.isEmpty() && _receiveRecords.first().packetNumber <= record.lastReceivedPacketNumber) {
|
while (!_receiveRecords.isEmpty() && _receiveRecords.first().packetNumber <= record.lastReceivedPacketNumber) {
|
||||||
|
emit receiveAcknowledged(0);
|
||||||
const ReceiveRecord& received = _receiveRecords.first();
|
const ReceiveRecord& received = _receiveRecords.first();
|
||||||
_inputStream.persistReadMappings(received.mappings);
|
_inputStream.persistReadMappings(received.mappings);
|
||||||
_receivedHighPriorityMessages -= received.newHighPriorityMessages;
|
_receivedHighPriorityMessages -= received.newHighPriorityMessages;
|
||||||
emit receiveAcknowledged(0);
|
|
||||||
_receiveRecords.removeFirst();
|
_receiveRecords.removeFirst();
|
||||||
}
|
}
|
||||||
_outputStream.persistWriteMappings(record.mappings);
|
_outputStream.persistWriteMappings(record.mappings);
|
||||||
|
|
|
@ -108,6 +108,9 @@ public:
|
||||||
/// Returns the intput channel at the specified index, creating it if necessary.
|
/// Returns the intput channel at the specified index, creating it if necessary.
|
||||||
ReliableChannel* getReliableInputChannel(int index = 0);
|
ReliableChannel* getReliableInputChannel(int index = 0);
|
||||||
|
|
||||||
|
/// Returns a reference to the stored receive mappings at the specified index.
|
||||||
|
const Bitstream::ReadMappings& getReadMappings(int index) const { return _receiveRecords.at(index).mappings; }
|
||||||
|
|
||||||
/// Adds stats for all reliable channels to the referenced variables.
|
/// Adds stats for all reliable channels to the referenced variables.
|
||||||
void addReliableChannelStats(int& sendProgress, int& sendTotal, int& receiveProgress, int& receiveTotal) const;
|
void addReliableChannelStats(int& sendProgress, int& sendTotal, int& receiveProgress, int& receiveTotal) const;
|
||||||
|
|
||||||
|
|
|
@ -188,7 +188,9 @@ MetavoxelClient::MetavoxelClient(const SharedNodePointer& node, MetavoxelUpdater
|
||||||
Endpoint(node, new PacketRecord(), new PacketRecord()),
|
Endpoint(node, new PacketRecord(), new PacketRecord()),
|
||||||
_updater(updater),
|
_updater(updater),
|
||||||
_reliableDeltaChannel(NULL),
|
_reliableDeltaChannel(NULL),
|
||||||
_reliableDeltaID(0) {
|
_reliableDeltaID(0),
|
||||||
|
_dummyInputStream(_dummyDataStream),
|
||||||
|
_dummyPacketNumber(0) {
|
||||||
|
|
||||||
connect(_sequencer.getReliableInputChannel(RELIABLE_DELTA_CHANNEL_INDEX),
|
connect(_sequencer.getReliableInputChannel(RELIABLE_DELTA_CHANNEL_INDEX),
|
||||||
SIGNAL(receivedMessage(const QVariant&, Bitstream&)), SLOT(handleMessage(const QVariant&, Bitstream&)));
|
SIGNAL(receivedMessage(const QVariant&, Bitstream&)), SLOT(handleMessage(const QVariant&, Bitstream&)));
|
||||||
|
@ -234,9 +236,9 @@ PacketRecord* MetavoxelClient::getAcknowledgedReceiveRecord(int packetNumber) co
|
||||||
if (lastAcknowledged->getPacketNumber() == packetNumber) {
|
if (lastAcknowledged->getPacketNumber() == packetNumber) {
|
||||||
return lastAcknowledged;
|
return lastAcknowledged;
|
||||||
}
|
}
|
||||||
foreach (PacketRecord* record, _clearedReceiveRecords) {
|
foreach (const ClearedReceiveRecord& record, _clearedReceiveRecords) {
|
||||||
if (record->getPacketNumber() == packetNumber) {
|
if (record.first->getPacketNumber() == packetNumber) {
|
||||||
return record;
|
return record.first;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -257,8 +259,8 @@ void MetavoxelClient::recordReceive() {
|
||||||
}
|
}
|
||||||
_clearedSendRecords.clear();
|
_clearedSendRecords.clear();
|
||||||
|
|
||||||
foreach (PacketRecord* record, _clearedReceiveRecords) {
|
foreach (const ClearedReceiveRecord& record, _clearedReceiveRecords) {
|
||||||
delete record;
|
delete record.first;
|
||||||
}
|
}
|
||||||
_clearedReceiveRecords.clear();
|
_clearedReceiveRecords.clear();
|
||||||
}
|
}
|
||||||
|
@ -273,10 +275,16 @@ void MetavoxelClient::clearSendRecordsBefore(int index) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetavoxelClient::clearReceiveRecordsBefore(int index) {
|
void MetavoxelClient::clearReceiveRecordsBefore(int index) {
|
||||||
|
// copy the mappings on first call per packet
|
||||||
|
if (_sequencer.getIncomingPacketNumber() > _dummyPacketNumber) {
|
||||||
|
_dummyPacketNumber = _sequencer.getIncomingPacketNumber();
|
||||||
|
_dummyInputStream.copyPersistentMappings(_sequencer.getInputStream());
|
||||||
|
}
|
||||||
|
|
||||||
// move to cleared list
|
// move to cleared list
|
||||||
QList<PacketRecord*>::iterator end = _receiveRecords.begin() + index + 1;
|
QList<PacketRecord*>::iterator end = _receiveRecords.begin() + index + 1;
|
||||||
for (QList<PacketRecord*>::const_iterator it = _receiveRecords.begin(); it != end; it++) {
|
for (QList<PacketRecord*>::const_iterator it = _receiveRecords.begin(); it != end; it++) {
|
||||||
_clearedReceiveRecords.append(*it);
|
_clearedReceiveRecords.append(ClearedReceiveRecord(*it, _sequencer.getReadMappings(index)));
|
||||||
}
|
}
|
||||||
_receiveRecords.erase(_receiveRecords.begin(), end);
|
_receiveRecords.erase(_receiveRecords.begin(), end);
|
||||||
}
|
}
|
||||||
|
@ -289,7 +297,6 @@ void MetavoxelClient::writeUpdateMessage(Bitstream& out) {
|
||||||
void MetavoxelClient::handleMessage(const QVariant& message, Bitstream& in) {
|
void MetavoxelClient::handleMessage(const QVariant& message, Bitstream& in) {
|
||||||
int userType = message.userType();
|
int userType = message.userType();
|
||||||
if (userType == MetavoxelDeltaMessage::Type) {
|
if (userType == MetavoxelDeltaMessage::Type) {
|
||||||
PacketRecord* receiveRecord = getLastAcknowledgedReceiveRecord();
|
|
||||||
if (_reliableDeltaChannel) {
|
if (_reliableDeltaChannel) {
|
||||||
MetavoxelData reference = _remoteData;
|
MetavoxelData reference = _remoteData;
|
||||||
MetavoxelLOD referenceLOD = _remoteDataLOD;
|
MetavoxelLOD referenceLOD = _remoteDataLOD;
|
||||||
|
@ -299,6 +306,7 @@ void MetavoxelClient::handleMessage(const QVariant& message, Bitstream& in) {
|
||||||
_reliableDeltaChannel = NULL;
|
_reliableDeltaChannel = NULL;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
PacketRecord* receiveRecord = getLastAcknowledgedReceiveRecord();
|
||||||
_remoteData.readDelta(receiveRecord->getData(), receiveRecord->getLOD(), in,
|
_remoteData.readDelta(receiveRecord->getData(), receiveRecord->getLOD(), in,
|
||||||
_remoteDataLOD = getLastAcknowledgedSendRecord()->getLOD());
|
_remoteDataLOD = getLastAcknowledgedSendRecord()->getLOD());
|
||||||
in.reset();
|
in.reset();
|
||||||
|
@ -319,8 +327,6 @@ void MetavoxelClient::handleMessage(const QVariant& message, Bitstream& in) {
|
||||||
MetavoxelDeltaPendingMessage pending = message.value<MetavoxelDeltaPendingMessage>();
|
MetavoxelDeltaPendingMessage pending = message.value<MetavoxelDeltaPendingMessage>();
|
||||||
if (pending.id > _reliableDeltaID) {
|
if (pending.id > _reliableDeltaID) {
|
||||||
_reliableDeltaID = pending.id;
|
_reliableDeltaID = pending.id;
|
||||||
_reliableDeltaChannel = _sequencer.getReliableInputChannel(RELIABLE_DELTA_CHANNEL_INDEX);
|
|
||||||
_reliableDeltaChannel->getBitstream().copyPersistentMappings(_sequencer.getInputStream());
|
|
||||||
PacketRecord* sendRecord = getAcknowledgedSendRecord(pending.receivedPacketNumber);
|
PacketRecord* sendRecord = getAcknowledgedSendRecord(pending.receivedPacketNumber);
|
||||||
if (!sendRecord) {
|
if (!sendRecord) {
|
||||||
qWarning() << "Missing send record for delta" << pending.receivedPacketNumber;
|
qWarning() << "Missing send record for delta" << pending.receivedPacketNumber;
|
||||||
|
@ -334,6 +340,20 @@ void MetavoxelClient::handleMessage(const QVariant& message, Bitstream& in) {
|
||||||
}
|
}
|
||||||
_remoteDataLOD = receiveRecord->getLOD();
|
_remoteDataLOD = receiveRecord->getLOD();
|
||||||
_remoteData = receiveRecord->getData();
|
_remoteData = receiveRecord->getData();
|
||||||
|
|
||||||
|
_reliableDeltaChannel = _sequencer.getReliableInputChannel(RELIABLE_DELTA_CHANNEL_INDEX);
|
||||||
|
if (receiveRecord == getLastAcknowledgedReceiveRecord()) {
|
||||||
|
_reliableDeltaChannel->getBitstream().copyPersistentMappings(_sequencer.getInputStream());
|
||||||
|
|
||||||
|
} else {
|
||||||
|
_reliableDeltaChannel->getBitstream().copyPersistentMappings(_dummyInputStream);
|
||||||
|
foreach (const ClearedReceiveRecord& record, _clearedReceiveRecords) {
|
||||||
|
_reliableDeltaChannel->getBitstream().persistReadMappings(record.second);
|
||||||
|
if (record.first == receiveRecord) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Endpoint::handleMessage(message, in);
|
Endpoint::handleMessage(message, in);
|
||||||
|
|
|
@ -145,8 +145,13 @@ protected:
|
||||||
MetavoxelData _dataCopy;
|
MetavoxelData _dataCopy;
|
||||||
QReadWriteLock _dataCopyLock;
|
QReadWriteLock _dataCopyLock;
|
||||||
|
|
||||||
|
QDataStream _dummyDataStream;
|
||||||
|
Bitstream _dummyInputStream;
|
||||||
|
int _dummyPacketNumber;
|
||||||
QList<PacketRecord*> _clearedSendRecords;
|
QList<PacketRecord*> _clearedSendRecords;
|
||||||
QList<PacketRecord*> _clearedReceiveRecords;
|
|
||||||
|
typedef QPair<PacketRecord*, Bitstream::ReadMappings> ClearedReceiveRecord;
|
||||||
|
QList<ClearedReceiveRecord> _clearedReceiveRecords;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_MetavoxelClientManager_h
|
#endif // hifi_MetavoxelClientManager_h
|
||||||
|
|
|
@ -701,8 +701,7 @@ int VoxelMaterialSpannerEditVisitor::visit(MetavoxelInfo& info) {
|
||||||
int sizeY = (int)overlap.maximum.y - minY + 1;
|
int sizeY = (int)overlap.maximum.y - minY + 1;
|
||||||
int sizeZ = (int)overlap.maximum.z - minZ + 1;
|
int sizeZ = (int)overlap.maximum.z - minZ + 1;
|
||||||
|
|
||||||
QRgb rgb = _color.rgba();
|
bool flipped = false;
|
||||||
bool flipped = (qAlpha(rgb) == 0);
|
|
||||||
float step = 1.0f / scale;
|
float step = 1.0f / scale;
|
||||||
glm::vec3 position(0.0f, 0.0f, info.minimum.z + minZ * step);
|
glm::vec3 position(0.0f, 0.0f, info.minimum.z + minZ * step);
|
||||||
if (_spanner->hasOwnColors()) {
|
if (_spanner->hasOwnColors()) {
|
||||||
|
@ -720,6 +719,8 @@ int VoxelMaterialSpannerEditVisitor::visit(MetavoxelInfo& info) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
QRgb rgb = _color.rgba();
|
||||||
|
flipped = (qAlpha(rgb) == 0);
|
||||||
for (QRgb* destZ = colorContents.data() + minZ * VOXEL_BLOCK_AREA + minY * VOXEL_BLOCK_SAMPLES + minX,
|
for (QRgb* destZ = colorContents.data() + minZ * VOXEL_BLOCK_AREA + minY * VOXEL_BLOCK_SAMPLES + minX,
|
||||||
*endZ = destZ + sizeZ * VOXEL_BLOCK_AREA; destZ != endZ; destZ += VOXEL_BLOCK_AREA, position.z += step) {
|
*endZ = destZ + sizeZ * VOXEL_BLOCK_AREA; destZ != endZ; destZ += VOXEL_BLOCK_AREA, position.z += step) {
|
||||||
position.y = info.minimum.y + minY * step;
|
position.y = info.minimum.y + minY * step;
|
||||||
|
|
|
@ -5,7 +5,7 @@ setup_hifi_library(Gui Network Script Widgets)
|
||||||
|
|
||||||
include_glm()
|
include_glm()
|
||||||
|
|
||||||
link_hifi_libraries(shared octree voxels fbx entities animation)
|
link_hifi_libraries(shared octree voxels fbx entities animation audio)
|
||||||
|
|
||||||
# call macro to link our dependencies and bubble them up via a property on our target
|
# call macro to link our dependencies and bubble them up via a property on our target
|
||||||
link_shared_dependencies()
|
link_shared_dependencies()
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <QtNetwork/QNetworkReply>
|
#include <QtNetwork/QNetworkReply>
|
||||||
#include <QScriptEngine>
|
#include <QScriptEngine>
|
||||||
|
|
||||||
|
#include <AudioEffectOptions.h>
|
||||||
#include <AudioInjector.h>
|
#include <AudioInjector.h>
|
||||||
#include <AudioRingBuffer.h>
|
#include <AudioRingBuffer.h>
|
||||||
#include <AvatarData.h>
|
#include <AvatarData.h>
|
||||||
|
@ -278,6 +279,9 @@ void ScriptEngine::init() {
|
||||||
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
|
QScriptValue localVoxelsValue = scriptValueFromQMetaObject<LocalVoxels>();
|
||||||
globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
globalObject().setProperty("LocalVoxels", localVoxelsValue);
|
||||||
|
|
||||||
|
QScriptValue audioEffectOptionsConstructorValue = newFunction(AudioEffectOptions::constructor);
|
||||||
|
globalObject().setProperty("AudioEffectOptions", audioEffectOptionsConstructorValue);
|
||||||
|
|
||||||
qScriptRegisterMetaType(this, injectorToScriptValue, injectorFromScriptValue);
|
qScriptRegisterMetaType(this, injectorToScriptValue, injectorFromScriptValue);
|
||||||
qScriptRegisterMetaType(this, inputControllerToScriptValue, inputControllerFromScriptValue);
|
qScriptRegisterMetaType(this, inputControllerToScriptValue, inputControllerFromScriptValue);
|
||||||
qScriptRegisterMetaType(this, avatarDataToScriptValue, avatarDataFromScriptValue);
|
qScriptRegisterMetaType(this, avatarDataToScriptValue, avatarDataFromScriptValue);
|
||||||
|
|
58
libraries/script-engine/src/UndoStackScriptingInterface.cpp
Normal file
58
libraries/script-engine/src/UndoStackScriptingInterface.cpp
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
//
|
||||||
|
// UndoStackScriptingInterface.cpp
|
||||||
|
// libraries/script-engine/src
|
||||||
|
//
|
||||||
|
// Created by Ryan Huffman on 10/22/14.
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include <QDebug>
|
||||||
|
#include <QScriptValue>
|
||||||
|
#include <QScriptValueList>
|
||||||
|
#include <QScriptEngine>
|
||||||
|
|
||||||
|
#include "UndoStackScriptingInterface.h"
|
||||||
|
|
||||||
|
UndoStackScriptingInterface::UndoStackScriptingInterface(QUndoStack* undoStack) : _undoStack(undoStack) {
|
||||||
|
}
|
||||||
|
|
||||||
|
void UndoStackScriptingInterface::pushCommand(QScriptValue undoFunction, QScriptValue undoData,
|
||||||
|
QScriptValue redoFunction, QScriptValue redoData) {
|
||||||
|
if (undoFunction.engine()) {
|
||||||
|
ScriptUndoCommand* undoCommand = new ScriptUndoCommand(undoFunction, undoData, redoFunction, redoData);
|
||||||
|
undoCommand->moveToThread(undoFunction.engine()->thread());
|
||||||
|
_undoStack->push(undoCommand);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ScriptUndoCommand::ScriptUndoCommand(QScriptValue undoFunction, QScriptValue undoData,
|
||||||
|
QScriptValue redoFunction, QScriptValue redoData) :
|
||||||
|
_undoFunction(undoFunction),
|
||||||
|
_undoData(undoData),
|
||||||
|
_redoFunction(redoFunction),
|
||||||
|
_redoData(redoData) {
|
||||||
|
}
|
||||||
|
|
||||||
|
void ScriptUndoCommand::undo() {
|
||||||
|
QMetaObject::invokeMethod(this, "doUndo");
|
||||||
|
}
|
||||||
|
|
||||||
|
void ScriptUndoCommand::redo() {
|
||||||
|
QMetaObject::invokeMethod(this, "doRedo");
|
||||||
|
}
|
||||||
|
|
||||||
|
void ScriptUndoCommand::doUndo() {
|
||||||
|
QScriptValueList args;
|
||||||
|
args << _undoData;
|
||||||
|
_undoFunction.call(QScriptValue(), args);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ScriptUndoCommand::doRedo() {
|
||||||
|
QScriptValueList args;
|
||||||
|
args << _redoData;
|
||||||
|
_redoFunction.call(QScriptValue(), args);
|
||||||
|
}
|
52
libraries/script-engine/src/UndoStackScriptingInterface.h
Normal file
52
libraries/script-engine/src/UndoStackScriptingInterface.h
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
//
|
||||||
|
// UndoStackScriptingInterface.h
|
||||||
|
// libraries/script-engine/src
|
||||||
|
//
|
||||||
|
// Created by Ryan Huffman on 10/22/14.
|
||||||
|
// Copyright 2014 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_UndoStackScriptingInterface_h
|
||||||
|
#define hifi_UndoStackScriptingInterface_h
|
||||||
|
|
||||||
|
#include <QUndoCommand>
|
||||||
|
#include <QUndoStack>
|
||||||
|
#include <QScriptValue>
|
||||||
|
|
||||||
|
class UndoStackScriptingInterface : public QObject {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
UndoStackScriptingInterface(QUndoStack* undoStack);
|
||||||
|
|
||||||
|
public slots:
|
||||||
|
void pushCommand(QScriptValue undoFunction, QScriptValue undoData, QScriptValue redoFunction, QScriptValue redoData);
|
||||||
|
|
||||||
|
private:
|
||||||
|
QUndoStack* _undoStack;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ScriptUndoCommand : public QObject, public QUndoCommand {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
ScriptUndoCommand(QScriptValue undoFunction, QScriptValue undoData, QScriptValue redoFunction, QScriptValue redoData);
|
||||||
|
|
||||||
|
virtual void undo();
|
||||||
|
virtual void redo();
|
||||||
|
virtual bool mergeWith(const QUndoCommand* command) { return false; }
|
||||||
|
virtual int id() const { return -1; }
|
||||||
|
|
||||||
|
public slots:
|
||||||
|
void doUndo();
|
||||||
|
void doRedo();
|
||||||
|
|
||||||
|
private:
|
||||||
|
QScriptValue _undoFunction;
|
||||||
|
QScriptValue _undoData;
|
||||||
|
QScriptValue _redoFunction;
|
||||||
|
QScriptValue _redoData;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_UndoStackScriptingInterface_h
|
Loading…
Reference in a new issue