mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-25 17:14:59 +02:00
Merge pull request #11874 from luiscuenca/audioscope
Improve Audio Scope
This commit is contained in:
commit
91b2d8df8a
13 changed files with 966 additions and 133 deletions
634
interface/resources/qml/AudioScope.qml
Normal file
634
interface/resources/qml/AudioScope.qml
Normal file
|
@ -0,0 +1,634 @@
|
|||
//
|
||||
// AudioScope.qml
|
||||
//
|
||||
// Created by Luis Cuenca on 11/22/2017
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
import QtQuick 2.5
|
||||
import QtQuick.Controls 1.4
|
||||
import "styles-uit"
|
||||
import "controls-uit" as HifiControlsUit
|
||||
|
||||
Item {
|
||||
id: root
|
||||
width: parent.width
|
||||
height: parent.height
|
||||
|
||||
property var _scopeInputData
|
||||
property var _scopeOutputLeftData
|
||||
property var _scopeOutputRightData
|
||||
|
||||
property var _triggerInputData
|
||||
property var _triggerOutputLeftData
|
||||
property var _triggerOutputRightData
|
||||
|
||||
property var _triggerValues: QtObject{
|
||||
property int x: parent.width/2
|
||||
property int y: parent.height/3
|
||||
}
|
||||
|
||||
property var _triggered: false
|
||||
property var _steps
|
||||
property var _refreshMs: 32
|
||||
property var _framesPerSecond: AudioScope.getFramesPerSecond()
|
||||
property var _isFrameUnits: true
|
||||
|
||||
property var _holdStart: QtObject{
|
||||
property int x: 0
|
||||
property int y: 0
|
||||
}
|
||||
|
||||
property var _holdEnd: QtObject{
|
||||
property int x: 0
|
||||
property int y: 0
|
||||
}
|
||||
|
||||
property var _timeBeforeHold: 300
|
||||
property var _pressedTime: 0
|
||||
property var _isPressed: false
|
||||
|
||||
property var _recOpacity : 0.0
|
||||
property var _recSign : 0.05
|
||||
|
||||
property var _outputLeftState: false
|
||||
property var _outputRightState: false
|
||||
|
||||
property var _wavFilePath: ""
|
||||
|
||||
function isHolding() {
|
||||
return (_pressedTime > _timeBeforeHold);
|
||||
}
|
||||
|
||||
function updateMeasureUnits() {
|
||||
timeButton.text = _isFrameUnits ? "Display Frames" : "Milliseconds";
|
||||
fiveLabel.text = _isFrameUnits ? "5" : "" + (Math.round(1000 * 5.0/_framesPerSecond));
|
||||
twentyLabel.text = _isFrameUnits ? "20" : "" + (Math.round(1000 * 20.0/_framesPerSecond));
|
||||
fiftyLabel.text = _isFrameUnits ? "50" : "" + (Math.round(1000 * 50.0/_framesPerSecond));
|
||||
}
|
||||
|
||||
function collectScopeData() {
|
||||
if (inputCh.checked) {
|
||||
_scopeInputData = AudioScope.scopeInput;
|
||||
}
|
||||
if (outputLeftCh.checked) {
|
||||
_scopeOutputLeftData = AudioScope.scopeOutputLeft;
|
||||
}
|
||||
if (outputRightCh.checked) {
|
||||
_scopeOutputRightData = AudioScope.scopeOutputRight;
|
||||
}
|
||||
}
|
||||
|
||||
function collectTriggerData() {
|
||||
if (inputCh.checked) {
|
||||
_triggerInputData = AudioScope.triggerInput;
|
||||
}
|
||||
if (outputLeftCh.checked) {
|
||||
_triggerOutputLeftData = AudioScope.triggerOutputLeft;
|
||||
}
|
||||
if (outputRightCh.checked) {
|
||||
_triggerOutputRightData = AudioScope.triggerOutputRight;
|
||||
}
|
||||
}
|
||||
|
||||
function setRecordingLabelOpacity(opacity) {
|
||||
_recOpacity = opacity;
|
||||
recCircle.opacity = _recOpacity;
|
||||
recText.opacity = _recOpacity;
|
||||
}
|
||||
|
||||
function updateRecordingLabel() {
|
||||
_recOpacity += _recSign;
|
||||
if (_recOpacity > 1.0 || _recOpacity < 0.0) {
|
||||
_recOpacity = _recOpacity > 1.0 ? 1.0 : 0.0;
|
||||
_recSign *= -1;
|
||||
}
|
||||
setRecordingLabelOpacity(_recOpacity);
|
||||
}
|
||||
|
||||
function pullFreshValues() {
|
||||
if (Audio.getRecording()) {
|
||||
updateRecordingLabel();
|
||||
}
|
||||
|
||||
if (!AudioScope.getPause()) {
|
||||
if (!_triggered) {
|
||||
collectScopeData();
|
||||
}
|
||||
}
|
||||
if (inputCh.checked || outputLeftCh.checked || outputRightCh.checked) {
|
||||
mycanvas.requestPaint();
|
||||
}
|
||||
}
|
||||
|
||||
function startRecording() {
|
||||
_wavFilePath = (new Date()).toISOString(); // yyyy-mm-ddThh:mm:ss.sssZ
|
||||
_wavFilePath = _wavFilePath.replace(/[\-:]|\.\d*Z$/g, "").replace("T", "-") + ".wav";
|
||||
// Using controller recording default directory
|
||||
_wavFilePath = Recording.getDefaultRecordingSaveDirectory() + _wavFilePath;
|
||||
if (!Audio.startRecording(_wavFilePath)) {
|
||||
Messages.sendMessage("Hifi-Notifications", JSON.stringify({message:"Error creating: "+_wavFilePath}));
|
||||
updateRecordingUI(false);
|
||||
}
|
||||
}
|
||||
|
||||
function stopRecording() {
|
||||
Audio.stopRecording();
|
||||
setRecordingLabelOpacity(0.0);
|
||||
Messages.sendMessage("Hifi-Notifications", JSON.stringify({message:"Saved: "+_wavFilePath}));
|
||||
}
|
||||
|
||||
function updateRecordingUI(isRecording) {
|
||||
if (!isRecording) {
|
||||
recordButton.text = "Record";
|
||||
recordButton.color = hifi.buttons.black;
|
||||
outputLeftCh.checked = _outputLeftState;
|
||||
outputRightCh.checked = _outputRightState;
|
||||
} else {
|
||||
recordButton.text = "Stop";
|
||||
recordButton.color = hifi.buttons.red;
|
||||
_outputLeftState = outputLeftCh.checked;
|
||||
_outputRightState = outputRightCh.checked;
|
||||
outputLeftCh.checked = true;
|
||||
outputRightCh.checked = true;
|
||||
}
|
||||
}
|
||||
|
||||
function toggleRecording() {
|
||||
if (Audio.getRecording()) {
|
||||
updateRecordingUI(false);
|
||||
stopRecording();
|
||||
} else {
|
||||
updateRecordingUI(true);
|
||||
startRecording();
|
||||
}
|
||||
}
|
||||
|
||||
Timer {
|
||||
interval: _refreshMs; running: true; repeat: true
|
||||
onTriggered: pullFreshValues()
|
||||
}
|
||||
|
||||
Canvas {
|
||||
id: mycanvas
|
||||
anchors.fill:parent
|
||||
|
||||
onPaint: {
|
||||
|
||||
function displayMeasureArea(ctx) {
|
||||
|
||||
ctx.fillStyle = Qt.rgba(0.1, 0.1, 0.1, 1);
|
||||
ctx.fillRect(_holdStart.x, 0, _holdEnd.x - _holdStart.x, height);
|
||||
|
||||
ctx.lineWidth = "2";
|
||||
ctx.strokeStyle = "#555555";
|
||||
|
||||
ctx.beginPath();
|
||||
ctx.moveTo(_holdStart.x, 0);
|
||||
ctx.lineTo(_holdStart.x, height);
|
||||
ctx.moveTo(_holdEnd.x, 0);
|
||||
ctx.lineTo(_holdEnd.x, height);
|
||||
|
||||
ctx.moveTo(_holdStart.x, _holdStart.y);
|
||||
ctx.lineTo(_holdEnd.x, _holdStart.y);
|
||||
ctx.moveTo(_holdEnd.x, _holdEnd.y);
|
||||
ctx.lineTo(_holdStart.x, _holdEnd.y);
|
||||
|
||||
ctx.stroke();
|
||||
}
|
||||
|
||||
function displayTrigger(ctx, lineWidth, color) {
|
||||
var crossSize = 3;
|
||||
var holeSize = 2;
|
||||
|
||||
ctx.lineWidth = lineWidth;
|
||||
ctx.strokeStyle = color;
|
||||
|
||||
ctx.beginPath();
|
||||
ctx.moveTo(_triggerValues.x - (crossSize + holeSize), _triggerValues.y);
|
||||
ctx.lineTo(_triggerValues.x - holeSize, _triggerValues.y);
|
||||
ctx.moveTo(_triggerValues.x + holeSize, _triggerValues.y);
|
||||
ctx.lineTo(_triggerValues.x + (crossSize + holeSize), _triggerValues.y);
|
||||
|
||||
ctx.moveTo(_triggerValues.x, _triggerValues.y - (crossSize + holeSize));
|
||||
ctx.lineTo(_triggerValues.x, _triggerValues.y - holeSize);
|
||||
ctx.moveTo(_triggerValues.x, _triggerValues.y + holeSize);
|
||||
ctx.lineTo(_triggerValues.x, _triggerValues.y + (crossSize + holeSize));
|
||||
|
||||
ctx.stroke();
|
||||
}
|
||||
|
||||
function displayBackground(ctx, datawidth, steps, lineWidth, color) {
|
||||
var verticalPadding = 100;
|
||||
|
||||
ctx.strokeStyle = color;
|
||||
ctx.lineWidth = lineWidth;
|
||||
|
||||
ctx.moveTo(0, height/2);
|
||||
ctx.lineTo(datawidth, height/2);
|
||||
|
||||
var gap = datawidth/steps;
|
||||
for (var i = 0; i < steps; i++) {
|
||||
ctx.moveTo(i*gap + 1, verticalPadding);
|
||||
ctx.lineTo(i*gap + 1, height-verticalPadding);
|
||||
}
|
||||
ctx.moveTo(datawidth-1, verticalPadding);
|
||||
ctx.lineTo(datawidth-1, height-verticalPadding);
|
||||
|
||||
ctx.stroke();
|
||||
}
|
||||
|
||||
function drawScope(ctx, data, width, color) {
|
||||
ctx.beginPath();
|
||||
ctx.strokeStyle = color;
|
||||
ctx.lineWidth = width;
|
||||
var x = 0;
|
||||
for (var i = 0; i < data.length-1; i++) {
|
||||
ctx.moveTo(x, data[i] + height/2);
|
||||
ctx.lineTo(++x, data[i+1] + height/2);
|
||||
}
|
||||
ctx.stroke();
|
||||
}
|
||||
|
||||
function getMeasurementText(dist) {
|
||||
var datasize = _scopeInputData.length;
|
||||
var value = 0;
|
||||
if (fiveFrames.checked) {
|
||||
value = (_isFrameUnits) ? 5.0*dist/datasize : (Math.round(1000 * 5.0/_framesPerSecond))*dist/datasize;
|
||||
} else if (twentyFrames.checked) {
|
||||
value = (_isFrameUnits) ? 20.0*dist/datasize : (Math.round(1000 * 20.0/_framesPerSecond))*dist/datasize;
|
||||
} else if (fiftyFrames.checked) {
|
||||
value = (_isFrameUnits) ? 50.0*dist/datasize : (Math.round(1000 * 50.0/_framesPerSecond))*dist/datasize;
|
||||
}
|
||||
value = Math.abs(Math.round(value*100)/100);
|
||||
var measureText = "" + value + (_isFrameUnits ? " frames" : " milliseconds");
|
||||
return measureText;
|
||||
}
|
||||
|
||||
function drawMeasurements(ctx, color) {
|
||||
ctx.fillStyle = color;
|
||||
ctx.font = "normal 16px sans-serif";
|
||||
var fontwidth = 8;
|
||||
var measureText = getMeasurementText(_holdEnd.x - _holdStart.x);
|
||||
if (_holdStart.x < _holdEnd.x) {
|
||||
ctx.fillText("" + height/2 - _holdStart.y, _holdStart.x-40, _holdStart.y);
|
||||
ctx.fillText("" + height/2 - _holdEnd.y, _holdStart.x-40, _holdEnd.y);
|
||||
ctx.fillText(measureText, _holdEnd.x+10, _holdEnd.y);
|
||||
} else {
|
||||
ctx.fillText("" + height/2 - _holdStart.y, _holdStart.x+10, _holdStart.y);
|
||||
ctx.fillText("" + height/2 - _holdEnd.y, _holdStart.x+10, _holdEnd.y);
|
||||
ctx.fillText(measureText, _holdEnd.x-fontwidth*measureText.length, _holdEnd.y);
|
||||
}
|
||||
}
|
||||
|
||||
var ctx = getContext("2d");
|
||||
|
||||
ctx.fillStyle = Qt.rgba(0, 0, 0, 1);
|
||||
ctx.fillRect(0, 0, width, height);
|
||||
|
||||
if (isHolding()) {
|
||||
displayMeasureArea(ctx);
|
||||
}
|
||||
|
||||
var guideLinesColor = "#555555"
|
||||
var guideLinesWidth = "1"
|
||||
|
||||
displayBackground(ctx, _scopeInputData.length, _steps, guideLinesWidth, guideLinesColor);
|
||||
|
||||
var triggerWidth = "3"
|
||||
var triggerColor = "#EFB400"
|
||||
|
||||
if (AudioScope.getAutoTrigger()) {
|
||||
displayTrigger(ctx, triggerWidth, triggerColor);
|
||||
}
|
||||
|
||||
var scopeWidth = "2"
|
||||
var scopeInputColor = "#00B4EF"
|
||||
var scopeOutputLeftColor = "#BB0000"
|
||||
var scopeOutputRightColor = "#00BB00"
|
||||
|
||||
if (!_triggered) {
|
||||
if (inputCh.checked) {
|
||||
drawScope(ctx, _scopeInputData, scopeWidth, scopeInputColor);
|
||||
}
|
||||
if (outputLeftCh.checked) {
|
||||
drawScope(ctx, _scopeOutputLeftData, scopeWidth, scopeOutputLeftColor);
|
||||
}
|
||||
if (outputRightCh.checked) {
|
||||
drawScope(ctx, _scopeOutputRightData, scopeWidth, scopeOutputRightColor);
|
||||
}
|
||||
} else {
|
||||
if (inputCh.checked) {
|
||||
drawScope(ctx, _triggerInputData, scopeWidth, scopeInputColor);
|
||||
}
|
||||
if (outputLeftCh.checked) {
|
||||
drawScope(ctx, _triggerOutputLeftData, scopeWidth, scopeOutputLeftColor);
|
||||
}
|
||||
if (outputRightCh.checked) {
|
||||
drawScope(ctx, _triggerOutputRightData, scopeWidth, scopeOutputRightColor);
|
||||
}
|
||||
}
|
||||
|
||||
if (isHolding()) {
|
||||
drawMeasurements(ctx, "#eeeeee");
|
||||
}
|
||||
|
||||
if (_isPressed) {
|
||||
_pressedTime += _refreshMs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MouseArea {
|
||||
id: hitbox
|
||||
anchors.fill: mycanvas
|
||||
hoverEnabled: true
|
||||
onPressed: {
|
||||
_isPressed = true;
|
||||
_pressedTime = 0;
|
||||
_holdStart.x = mouseX;
|
||||
_holdStart.y = mouseY;
|
||||
}
|
||||
onPositionChanged: {
|
||||
_holdEnd.x = mouseX;
|
||||
_holdEnd.y = mouseY;
|
||||
}
|
||||
onReleased: {
|
||||
if (!isHolding() && AudioScope.getAutoTrigger()) {
|
||||
_triggerValues.x = mouseX
|
||||
_triggerValues.y = mouseY
|
||||
AudioScope.setTriggerValues(mouseX, mouseY-height/2);
|
||||
}
|
||||
_isPressed = false;
|
||||
_pressedTime = 0;
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.CheckBox {
|
||||
id: activated
|
||||
boxSize: 20
|
||||
anchors.top: parent.top;
|
||||
anchors.left: parent.left;
|
||||
anchors.topMargin: 8;
|
||||
anchors.leftMargin: 20;
|
||||
checked: AudioScope.getVisible();
|
||||
onCheckedChanged: {
|
||||
AudioScope.setVisible(checked);
|
||||
activelabel.text = AudioScope.getVisible() ? "On" : "Off"
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Label {
|
||||
id: activelabel
|
||||
text: AudioScope.getVisible() ? "On" : "Off"
|
||||
anchors.top: activated.top;
|
||||
anchors.left: activated.right;
|
||||
}
|
||||
|
||||
HifiControlsUit.CheckBox {
|
||||
id: outputLeftCh
|
||||
boxSize: 20
|
||||
text: "Output L"
|
||||
anchors.horizontalCenter: parent.horizontalCenter;
|
||||
anchors.top: parent.top;
|
||||
anchors.topMargin: 8;
|
||||
onCheckedChanged: {
|
||||
AudioScope.setServerEcho(outputLeftCh.checked || outputRightCh.checked);
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Label {
|
||||
text: "Channels";
|
||||
anchors.horizontalCenter: outputLeftCh.horizontalCenter;
|
||||
anchors.bottom: outputLeftCh.top;
|
||||
anchors.bottomMargin: 8;
|
||||
}
|
||||
|
||||
HifiControlsUit.CheckBox {
|
||||
id: inputCh
|
||||
boxSize: 20
|
||||
text: "Input Mono"
|
||||
anchors.bottom: outputLeftCh.bottom;
|
||||
anchors.right: outputLeftCh.left;
|
||||
anchors.rightMargin: 40;
|
||||
onCheckedChanged: {
|
||||
AudioScope.setLocalEcho(checked);
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.CheckBox {
|
||||
id: outputRightCh
|
||||
boxSize: 20
|
||||
text: "Output R"
|
||||
anchors.bottom: outputLeftCh.bottom;
|
||||
anchors.left: outputLeftCh.right;
|
||||
anchors.leftMargin: 40;
|
||||
onCheckedChanged: {
|
||||
AudioScope.setServerEcho(outputLeftCh.checked || outputRightCh.checked);
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Button {
|
||||
id: recordButton;
|
||||
text: "Record";
|
||||
color: hifi.buttons.black;
|
||||
colorScheme: hifi.colorSchemes.dark;
|
||||
anchors.right: parent.right;
|
||||
anchors.bottom: parent.bottom;
|
||||
anchors.rightMargin: 30;
|
||||
anchors.bottomMargin: 8;
|
||||
width: 95;
|
||||
height: 55;
|
||||
onClicked: {
|
||||
toggleRecording();
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Button {
|
||||
id: pauseButton;
|
||||
color: hifi.buttons.black;
|
||||
colorScheme: hifi.colorSchemes.dark;
|
||||
anchors.right: recordButton.left;
|
||||
anchors.bottom: parent.bottom;
|
||||
anchors.rightMargin: 30;
|
||||
anchors.bottomMargin: 8;
|
||||
height: 55;
|
||||
width: 95;
|
||||
text: " Pause ";
|
||||
onClicked: {
|
||||
AudioScope.togglePause();
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.CheckBox {
|
||||
id: twentyFrames
|
||||
boxSize: 20
|
||||
anchors.left: parent.horizontalCenter;
|
||||
anchors.bottom: parent.bottom;
|
||||
anchors.bottomMargin: 8;
|
||||
onCheckedChanged: {
|
||||
if (checked){
|
||||
fiftyFrames.checked = false;
|
||||
fiveFrames.checked = false;
|
||||
AudioScope.selectAudioScopeTwentyFrames();
|
||||
_steps = 20;
|
||||
AudioScope.setPause(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Label {
|
||||
id:twentyLabel
|
||||
anchors.left: twentyFrames.right;
|
||||
anchors.verticalCenter: twentyFrames.verticalCenter;
|
||||
}
|
||||
|
||||
HifiControlsUit.Button {
|
||||
id: timeButton;
|
||||
color: hifi.buttons.black;
|
||||
colorScheme: hifi.colorSchemes.dark;
|
||||
text: "Display Frames";
|
||||
anchors.horizontalCenter: twentyFrames.horizontalCenter;
|
||||
anchors.bottom: twentyFrames.top;
|
||||
anchors.bottomMargin: 8;
|
||||
height: 26;
|
||||
onClicked: {
|
||||
_isFrameUnits = !_isFrameUnits;
|
||||
updateMeasureUnits();
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.CheckBox {
|
||||
id: fiveFrames
|
||||
boxSize: 20
|
||||
anchors.horizontalCenter: parent.horizontalCenter;
|
||||
anchors.bottom: parent.bottom;
|
||||
anchors.bottomMargin: 8;
|
||||
anchors.horizontalCenterOffset: -50;
|
||||
checked: true;
|
||||
onCheckedChanged: {
|
||||
if (checked) {
|
||||
fiftyFrames.checked = false;
|
||||
twentyFrames.checked = false;
|
||||
AudioScope.selectAudioScopeFiveFrames();
|
||||
_steps = 5;
|
||||
AudioScope.setPause(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Label {
|
||||
id:fiveLabel
|
||||
anchors.left: fiveFrames.right;
|
||||
anchors.verticalCenter: fiveFrames.verticalCenter;
|
||||
}
|
||||
|
||||
HifiControlsUit.CheckBox {
|
||||
id: fiftyFrames
|
||||
boxSize: 20
|
||||
anchors.horizontalCenter: parent.horizontalCenter;
|
||||
anchors.bottom: parent.bottom;
|
||||
anchors.bottomMargin: 8;
|
||||
anchors.horizontalCenterOffset: 70;
|
||||
onCheckedChanged: {
|
||||
if (checked) {
|
||||
twentyFrames.checked = false;
|
||||
fiveFrames.checked = false;
|
||||
AudioScope.selectAudioScopeFiftyFrames();
|
||||
_steps = 50;
|
||||
AudioScope.setPause(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Label {
|
||||
id:fiftyLabel
|
||||
anchors.left: fiftyFrames.right;
|
||||
anchors.verticalCenter: fiftyFrames.verticalCenter;
|
||||
}
|
||||
|
||||
HifiControlsUit.Switch {
|
||||
id: triggerSwitch;
|
||||
height: 26;
|
||||
anchors.left: parent.left;
|
||||
anchors.bottom: parent.bottom;
|
||||
anchors.leftMargin: 75;
|
||||
anchors.bottomMargin: 8;
|
||||
labelTextOff: "Off";
|
||||
labelTextOn: "On";
|
||||
onCheckedChanged: {
|
||||
if (!checked) AudioScope.setPause(false);
|
||||
AudioScope.setPause(false);
|
||||
AudioScope.setAutoTrigger(checked);
|
||||
AudioScope.setTriggerValues(_triggerValues.x, _triggerValues.y-root.height/2);
|
||||
}
|
||||
}
|
||||
|
||||
HifiControlsUit.Label {
|
||||
text: "Trigger";
|
||||
anchors.left: triggerSwitch.left;
|
||||
anchors.leftMargin: -15;
|
||||
anchors.bottom: triggerSwitch.top;
|
||||
}
|
||||
|
||||
Rectangle {
|
||||
id: recordIcon;
|
||||
width:110;
|
||||
height:40;
|
||||
anchors.right: parent.right;
|
||||
anchors.top: parent.top;
|
||||
anchors.topMargin: 8;
|
||||
color: "transparent"
|
||||
|
||||
Text {
|
||||
id: recText
|
||||
text: "REC"
|
||||
color: "red"
|
||||
font.pixelSize: 30;
|
||||
anchors.left: recCircle.right;
|
||||
anchors.leftMargin: 10;
|
||||
opacity: _recOpacity;
|
||||
y: -8;
|
||||
}
|
||||
|
||||
Rectangle {
|
||||
id: recCircle;
|
||||
width: 25;
|
||||
height: 25;
|
||||
radius: width*0.5
|
||||
opacity: _recOpacity;
|
||||
color: "red";
|
||||
}
|
||||
}
|
||||
|
||||
Component.onCompleted: {
|
||||
_steps = AudioScope.getFramesPerScope();
|
||||
AudioScope.setTriggerValues(_triggerValues.x, _triggerValues.y-root.height/2);
|
||||
activated.checked = true;
|
||||
inputCh.checked = true;
|
||||
updateMeasureUnits();
|
||||
}
|
||||
|
||||
Connections {
|
||||
target: AudioScope
|
||||
onPauseChanged: {
|
||||
if (!AudioScope.getPause()) {
|
||||
pauseButton.text = "Pause";
|
||||
pauseButton.color = hifi.buttons.black;
|
||||
AudioScope.setTriggered(false);
|
||||
_triggered = false;
|
||||
} else {
|
||||
pauseButton.text = "Continue";
|
||||
pauseButton.color = hifi.buttons.blue;
|
||||
}
|
||||
}
|
||||
onTriggered: {
|
||||
_triggered = true;
|
||||
collectTriggerData();
|
||||
AudioScope.setPause(true);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -679,36 +679,16 @@ Menu::Menu() {
|
|||
});
|
||||
|
||||
auto audioIO = DependencyManager::get<AudioClient>();
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoServerAudio, 0, false,
|
||||
audioIO.data(), SLOT(toggleServerEcho()));
|
||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::EchoLocalAudio, 0, false,
|
||||
audioIO.data(), SLOT(toggleLocalEcho()));
|
||||
addActionToQMenuAndActionHash(audioDebugMenu, MenuOption::MuteEnvironment, 0,
|
||||
audioIO.data(), SLOT(sendMuteEnvironmentPacket()));
|
||||
|
||||
auto scope = DependencyManager::get<AudioScope>();
|
||||
MenuWrapper* audioScopeMenu = audioDebugMenu->addMenu("Audio Scope");
|
||||
addCheckableActionToQMenuAndActionHash(audioScopeMenu, MenuOption::AudioScope, Qt::CTRL | Qt::Key_F2, false,
|
||||
scope.data(), SLOT(toggle()));
|
||||
addCheckableActionToQMenuAndActionHash(audioScopeMenu, MenuOption::AudioScopePause, Qt::CTRL | Qt::SHIFT | Qt::Key_F2, false,
|
||||
scope.data(), SLOT(togglePause()));
|
||||
|
||||
addDisabledActionAndSeparator(audioScopeMenu, "Display Frames");
|
||||
{
|
||||
QAction* fiveFrames = addCheckableActionToQMenuAndActionHash(audioScopeMenu, MenuOption::AudioScopeFiveFrames,
|
||||
0, true, scope.data(), SLOT(selectAudioScopeFiveFrames()));
|
||||
|
||||
QAction* twentyFrames = addCheckableActionToQMenuAndActionHash(audioScopeMenu, MenuOption::AudioScopeTwentyFrames,
|
||||
0, false, scope.data(), SLOT(selectAudioScopeTwentyFrames()));
|
||||
|
||||
QAction* fiftyFrames = addCheckableActionToQMenuAndActionHash(audioScopeMenu, MenuOption::AudioScopeFiftyFrames,
|
||||
0, false, scope.data(), SLOT(selectAudioScopeFiftyFrames()));
|
||||
|
||||
QActionGroup* audioScopeFramesGroup = new QActionGroup(audioScopeMenu);
|
||||
audioScopeFramesGroup->addAction(fiveFrames);
|
||||
audioScopeFramesGroup->addAction(twentyFrames);
|
||||
audioScopeFramesGroup->addAction(fiftyFrames);
|
||||
}
|
||||
action = addActionToQMenuAndActionHash(audioDebugMenu, MenuOption::AudioScope);
|
||||
connect(action, &QAction::triggered, [] {
|
||||
auto scriptEngines = DependencyManager::get<ScriptEngines>();
|
||||
QUrl defaultScriptsLoc = PathUtils::defaultScriptsLocation();
|
||||
defaultScriptsLoc.setPath(defaultScriptsLoc.path() + "developer/utilities/audio/audioScope.js");
|
||||
scriptEngines->loadScript(defaultScriptsLoc.toString());
|
||||
});
|
||||
|
||||
// Developer > Physics >>>
|
||||
MenuWrapper* physicsOptionsMenu = developerMenu->addMenu("Physics");
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <qvector2d.h>
|
||||
#include <limits>
|
||||
|
||||
#include <AudioClient.h>
|
||||
|
@ -21,13 +22,14 @@
|
|||
#include "AudioScope.h"
|
||||
|
||||
static const unsigned int DEFAULT_FRAMES_PER_SCOPE = 5;
|
||||
static const unsigned int SCOPE_WIDTH = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * DEFAULT_FRAMES_PER_SCOPE;
|
||||
static const unsigned int MULTIPLIER_SCOPE_HEIGHT = 20;
|
||||
static const unsigned int SCOPE_HEIGHT = 2 * 15 * MULTIPLIER_SCOPE_HEIGHT;
|
||||
|
||||
AudioScope::AudioScope() :
|
||||
_isEnabled(false),
|
||||
_isPaused(false),
|
||||
_isTriggered(false),
|
||||
_autoTrigger(false),
|
||||
_scopeInputOffset(0),
|
||||
_scopeOutputOffset(0),
|
||||
_framesPerScope(DEFAULT_FRAMES_PER_SCOPE),
|
||||
|
@ -43,6 +45,7 @@ AudioScope::AudioScope() :
|
|||
_outputRightD(DependencyManager::get<GeometryCache>()->allocateID())
|
||||
{
|
||||
auto audioIO = DependencyManager::get<AudioClient>();
|
||||
|
||||
connect(&audioIO->getReceivedAudioStream(), &MixedProcessedAudioStream::addedSilence,
|
||||
this, &AudioScope::addStereoSilenceToScope);
|
||||
connect(&audioIO->getReceivedAudioStream(), &MixedProcessedAudioStream::addedLastFrameRepeatedWithFade,
|
||||
|
@ -75,6 +78,18 @@ void AudioScope::selectAudioScopeFiftyFrames() {
|
|||
reallocateScope(50);
|
||||
}
|
||||
|
||||
void AudioScope::setLocalEcho(bool localEcho) {
|
||||
DependencyManager::get<AudioClient>()->setLocalEcho(localEcho);
|
||||
}
|
||||
|
||||
void AudioScope::setServerEcho(bool serverEcho) {
|
||||
DependencyManager::get<AudioClient>()->setServerEcho(serverEcho);
|
||||
}
|
||||
|
||||
float AudioScope::getFramesPerSecond(){
|
||||
return AudioConstants::NETWORK_FRAMES_PER_SEC;
|
||||
}
|
||||
|
||||
void AudioScope::allocateScope() {
|
||||
_scopeInputOffset = 0;
|
||||
_scopeOutputOffset = 0;
|
||||
|
@ -108,63 +123,14 @@ void AudioScope::freeScope() {
|
|||
}
|
||||
}
|
||||
|
||||
void AudioScope::render(RenderArgs* renderArgs, int width, int height) {
|
||||
|
||||
if (!_isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
static const glm::vec4 backgroundColor = { 0.4f, 0.4f, 0.4f, 0.6f };
|
||||
static const glm::vec4 gridColor = { 0.7f, 0.7f, 0.7f, 1.0f };
|
||||
static const glm::vec4 inputColor = { 0.3f, 1.0f, 0.3f, 1.0f };
|
||||
static const glm::vec4 outputLeftColor = { 1.0f, 0.3f, 0.3f, 1.0f };
|
||||
static const glm::vec4 outputRightColor = { 0.3f, 0.3f, 1.0f, 1.0f };
|
||||
static const int gridCols = 2;
|
||||
int gridRows = _framesPerScope;
|
||||
|
||||
int x = (width - (int)SCOPE_WIDTH) / 2;
|
||||
int y = (height - (int)SCOPE_HEIGHT) / 2;
|
||||
int w = (int)SCOPE_WIDTH;
|
||||
int h = (int)SCOPE_HEIGHT;
|
||||
|
||||
gpu::Batch& batch = *renderArgs->_batch;
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
|
||||
// Grid uses its own pipeline, so draw it before setting another
|
||||
const float GRID_EDGE = 0.005f;
|
||||
geometryCache->renderGrid(batch, glm::vec2(x, y), glm::vec2(x + w, y + h),
|
||||
gridRows, gridCols, GRID_EDGE, gridColor, true, _audioScopeGrid);
|
||||
|
||||
geometryCache->useSimpleDrawPipeline(batch);
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
batch.setResourceTexture(0, textureCache->getWhiteTexture());
|
||||
|
||||
// FIXME - do we really need to reset this here? we know that we're called inside of ApplicationOverlay::renderOverlays
|
||||
// which already set up our batch for us to have these settings
|
||||
mat4 legacyProjection = glm::ortho<float>(0, width, height, 0, -1000, 1000);
|
||||
batch.setProjectionTransform(legacyProjection);
|
||||
batch.setModelTransform(Transform());
|
||||
batch.resetViewTransform();
|
||||
|
||||
geometryCache->renderQuad(batch, x, y, w, h, backgroundColor, _audioScopeBackground);
|
||||
renderLineStrip(batch, _inputID, inputColor, x, y, _samplesPerScope, _scopeInputOffset, _scopeInput);
|
||||
renderLineStrip(batch, _outputLeftID, outputLeftColor, x, y, _samplesPerScope, _scopeOutputOffset, _scopeOutputLeft);
|
||||
renderLineStrip(batch, _outputRightD, outputRightColor, x, y, _samplesPerScope, _scopeOutputOffset, _scopeOutputRight);
|
||||
}
|
||||
|
||||
void AudioScope::renderLineStrip(gpu::Batch& batch, int id, const glm::vec4& color, int x, int y, int n, int offset, const QByteArray* byteArray) {
|
||||
|
||||
QVector<int> AudioScope::getScopeVector(const QByteArray* byteArray, int offset) {
|
||||
int16_t sample;
|
||||
int16_t* samples = ((int16_t*) byteArray->data()) + offset;
|
||||
QVector<int> points;
|
||||
if (!_isEnabled || byteArray == NULL) return points;
|
||||
int16_t* samples = ((int16_t*)byteArray->data()) + offset;
|
||||
int numSamplesToAverage = _framesPerScope / DEFAULT_FRAMES_PER_SCOPE;
|
||||
int count = (n - offset) / numSamplesToAverage;
|
||||
int remainder = (n - offset) % numSamplesToAverage;
|
||||
y += SCOPE_HEIGHT / 2;
|
||||
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
|
||||
QVector<glm::vec2> points;
|
||||
|
||||
int count = (_samplesPerScope - offset) / numSamplesToAverage;
|
||||
int remainder = (_samplesPerScope - offset) % numSamplesToAverage;
|
||||
|
||||
// Compute and draw the sample averages from the offset position
|
||||
for (int i = count; --i >= 0; ) {
|
||||
|
@ -173,7 +139,7 @@ void AudioScope::renderLineStrip(gpu::Batch& batch, int id, const glm::vec4& col
|
|||
sample += *samples++;
|
||||
}
|
||||
sample /= numSamplesToAverage;
|
||||
points << glm::vec2(x++, y - sample);
|
||||
points << -sample;
|
||||
}
|
||||
|
||||
// Compute and draw the sample average across the wrap boundary
|
||||
|
@ -182,16 +148,17 @@ void AudioScope::renderLineStrip(gpu::Batch& batch, int id, const glm::vec4& col
|
|||
for (int j = remainder; --j >= 0; ) {
|
||||
sample += *samples++;
|
||||
}
|
||||
|
||||
samples = (int16_t*) byteArray->data();
|
||||
|
||||
samples = (int16_t*)byteArray->data();
|
||||
|
||||
for (int j = numSamplesToAverage - remainder; --j >= 0; ) {
|
||||
sample += *samples++;
|
||||
}
|
||||
sample /= numSamplesToAverage;
|
||||
points << glm::vec2(x++, y - sample);
|
||||
} else {
|
||||
samples = (int16_t*) byteArray->data();
|
||||
points << -sample;
|
||||
}
|
||||
else {
|
||||
samples = (int16_t*)byteArray->data();
|
||||
}
|
||||
|
||||
// Compute and draw the sample average from the beginning to the offset
|
||||
|
@ -202,12 +169,51 @@ void AudioScope::renderLineStrip(gpu::Batch& batch, int id, const glm::vec4& col
|
|||
sample += *samples++;
|
||||
}
|
||||
sample /= numSamplesToAverage;
|
||||
points << glm::vec2(x++, y - sample);
|
||||
|
||||
points << -sample;
|
||||
}
|
||||
return points;
|
||||
}
|
||||
|
||||
bool AudioScope::shouldTrigger(const QVector<int>& scope) {
|
||||
int threshold = 4;
|
||||
if (_autoTrigger && _triggerValues.x < scope.size()) {
|
||||
for (int i = -4*threshold; i < +4*threshold; i++) {
|
||||
int idx = _triggerValues.x + i;
|
||||
idx = (idx < 0) ? 0 : (idx < scope.size() ? idx : scope.size() - 1);
|
||||
int dif = abs(_triggerValues.y - scope[idx]);
|
||||
if (dif < threshold) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void AudioScope::storeTriggerValues() {
|
||||
_triggerInputData = _scopeInputData;
|
||||
_triggerOutputLeftData = _scopeOutputLeftData;
|
||||
_triggerOutputRightData = _scopeOutputRightData;
|
||||
_isTriggered = true;
|
||||
emit triggered();
|
||||
}
|
||||
|
||||
void AudioScope::computeInputData() {
|
||||
_scopeInputData = getScopeVector(_scopeInput, _scopeInputOffset);
|
||||
if (shouldTrigger(_scopeInputData)) {
|
||||
storeTriggerValues();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioScope::computeOutputData() {
|
||||
_scopeOutputLeftData = getScopeVector(_scopeOutputLeft, _scopeOutputOffset);
|
||||
if (shouldTrigger(_scopeOutputLeftData)) {
|
||||
storeTriggerValues();
|
||||
}
|
||||
_scopeOutputRightData = getScopeVector(_scopeOutputRight, _scopeOutputOffset);
|
||||
if (shouldTrigger(_scopeOutputRightData)) {
|
||||
storeTriggerValues();
|
||||
}
|
||||
|
||||
|
||||
geometryCache->updateVertices(id, points, color);
|
||||
geometryCache->renderVertices(batch, gpu::LINE_STRIP, id);
|
||||
}
|
||||
|
||||
int AudioScope::addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamplesPerChannel,
|
||||
|
@ -231,7 +237,7 @@ int AudioScope::addBufferToScope(QByteArray* byteArray, int frameOffset, const i
|
|||
}
|
||||
|
||||
int AudioScope::addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples) {
|
||||
|
||||
|
||||
// Short int pointer to mapped samples in byte array
|
||||
int16_t* destination = (int16_t*)byteArray->data();
|
||||
|
||||
|
@ -271,6 +277,7 @@ void AudioScope::addStereoSamplesToScope(const QByteArray& samples) {
|
|||
_scopeOutputOffset = addBufferToScope(_scopeOutputRight, _scopeOutputOffset, samplesData, samplesPerChannel, 1, AudioConstants::STEREO);
|
||||
|
||||
_scopeLastFrame = samples.right(AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||
computeOutputData();
|
||||
}
|
||||
|
||||
void AudioScope::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
|
||||
|
@ -302,4 +309,5 @@ void AudioScope::addInputToScope(const QByteArray& inputSamples) {
|
|||
_scopeInputOffset = addBufferToScope(_scopeInput, _scopeInputOffset,
|
||||
reinterpret_cast<const int16_t*>(inputSamples.data()),
|
||||
inputSamples.size() / sizeof(int16_t), INPUT_AUDIO_CHANNEL, NUM_INPUT_CHANNELS);
|
||||
computeInputData();
|
||||
}
|
||||
|
|
|
@ -24,27 +24,60 @@
|
|||
class AudioScope : public QObject, public Dependency {
|
||||
Q_OBJECT
|
||||
SINGLETON_DEPENDENCY
|
||||
|
||||
Q_PROPERTY(QVector<int> scopeInput READ getScopeInput)
|
||||
Q_PROPERTY(QVector<int> scopeOutputLeft READ getScopeOutputLeft)
|
||||
Q_PROPERTY(QVector<int> scopeOutputRight READ getScopeOutputRight)
|
||||
|
||||
Q_PROPERTY(QVector<int> triggerInput READ getTriggerInput)
|
||||
Q_PROPERTY(QVector<int> triggerOutputLeft READ getTriggerOutputLeft)
|
||||
Q_PROPERTY(QVector<int> triggerOutputRight READ getTriggerOutputRight)
|
||||
|
||||
public:
|
||||
// Audio scope methods for allocation/deallocation
|
||||
void allocateScope();
|
||||
void freeScope();
|
||||
void reallocateScope(int frames);
|
||||
|
||||
void render(RenderArgs* renderArgs, int width, int height);
|
||||
|
||||
public slots:
|
||||
void toggle() { setVisible(!_isEnabled); }
|
||||
void setVisible(bool visible);
|
||||
bool getVisible() const { return _isEnabled; }
|
||||
|
||||
void togglePause() { _isPaused = !_isPaused; }
|
||||
void setPause(bool paused) { _isPaused = paused; }
|
||||
void togglePause() { setPause(!_isPaused); }
|
||||
void setPause(bool paused) { _isPaused = paused; emit pauseChanged(); }
|
||||
bool getPause() { return _isPaused; }
|
||||
|
||||
void toggleTrigger() { _autoTrigger = !_autoTrigger; }
|
||||
bool getAutoTrigger() { return _autoTrigger; }
|
||||
void setAutoTrigger(bool autoTrigger) { _isTriggered = false; _autoTrigger = autoTrigger; }
|
||||
|
||||
void setTriggerValues(int x, int y) { _triggerValues.x = x; _triggerValues.y = y; }
|
||||
void setTriggered(bool triggered) { _isTriggered = triggered; }
|
||||
bool getTriggered() { return _isTriggered; }
|
||||
|
||||
float getFramesPerSecond();
|
||||
int getFramesPerScope() { return _framesPerScope; }
|
||||
|
||||
void selectAudioScopeFiveFrames();
|
||||
void selectAudioScopeTwentyFrames();
|
||||
void selectAudioScopeFiftyFrames();
|
||||
|
||||
|
||||
QVector<int> getScopeInput() { return _scopeInputData; };
|
||||
QVector<int> getScopeOutputLeft() { return _scopeOutputLeftData; };
|
||||
QVector<int> getScopeOutputRight() { return _scopeOutputRightData; };
|
||||
|
||||
QVector<int> getTriggerInput() { return _triggerInputData; };
|
||||
QVector<int> getTriggerOutputLeft() { return _triggerOutputLeftData; };
|
||||
QVector<int> getTriggerOutputRight() { return _triggerOutputRightData; };
|
||||
|
||||
void setLocalEcho(bool serverEcho);
|
||||
void setServerEcho(bool serverEcho);
|
||||
|
||||
signals:
|
||||
void pauseChanged();
|
||||
void triggered();
|
||||
|
||||
protected:
|
||||
AudioScope();
|
||||
|
||||
|
@ -55,24 +88,44 @@ private slots:
|
|||
void addInputToScope(const QByteArray& inputSamples);
|
||||
|
||||
private:
|
||||
// Audio scope methods for rendering
|
||||
void renderLineStrip(gpu::Batch& batch, int id, const glm::vec4& color, int x, int y, int n, int offset, const QByteArray* byteArray);
|
||||
|
||||
// Audio scope methods for data acquisition
|
||||
int addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_t* source, int sourceSamples,
|
||||
unsigned int sourceChannel, unsigned int sourceNumberOfChannels, float fade = 1.0f);
|
||||
int addSilenceToScope(QByteArray* byteArray, int frameOffset, int silentSamples);
|
||||
|
||||
QVector<int> getScopeVector(const QByteArray* scope, int offset);
|
||||
|
||||
bool shouldTrigger(const QVector<int>& scope);
|
||||
void computeInputData();
|
||||
void computeOutputData();
|
||||
|
||||
void storeTriggerValues();
|
||||
|
||||
bool _isEnabled;
|
||||
bool _isPaused;
|
||||
bool _isTriggered;
|
||||
bool _autoTrigger;
|
||||
int _scopeInputOffset;
|
||||
int _scopeOutputOffset;
|
||||
int _framesPerScope;
|
||||
int _samplesPerScope;
|
||||
|
||||
QByteArray* _scopeInput;
|
||||
QByteArray* _scopeOutputLeft;
|
||||
QByteArray* _scopeOutputRight;
|
||||
QByteArray _scopeLastFrame;
|
||||
|
||||
QVector<int> _scopeInputData;
|
||||
QVector<int> _scopeOutputLeftData;
|
||||
QVector<int> _scopeOutputRightData;
|
||||
|
||||
QVector<int> _triggerInputData;
|
||||
QVector<int> _triggerOutputLeftData;
|
||||
QVector<int> _triggerOutputRightData;
|
||||
|
||||
|
||||
glm::ivec2 _triggerValues;
|
||||
|
||||
int _audioScopeBackground;
|
||||
int _audioScopeGrid;
|
||||
|
|
|
@ -58,6 +58,21 @@ Audio::Audio() : _devices(_contextIsHMD) {
|
|||
enableNoiseReduction(enableNoiseReductionSetting.get());
|
||||
}
|
||||
|
||||
bool Audio::startRecording(const QString& filepath) {
|
||||
auto client = DependencyManager::get<AudioClient>().data();
|
||||
return client->startRecording(filepath);
|
||||
}
|
||||
|
||||
bool Audio::getRecording() {
|
||||
auto client = DependencyManager::get<AudioClient>().data();
|
||||
return client->getRecording();
|
||||
}
|
||||
|
||||
void Audio::stopRecording() {
|
||||
auto client = DependencyManager::get<AudioClient>().data();
|
||||
client->stopRecording();
|
||||
}
|
||||
|
||||
void Audio::setMuted(bool isMuted) {
|
||||
if (_isMuted != isMuted) {
|
||||
auto client = DependencyManager::get<AudioClient>().data();
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "AudioDevices.h"
|
||||
#include "AudioEffectOptions.h"
|
||||
#include "SettingHandle.h"
|
||||
#include "AudioFileWav.h"
|
||||
|
||||
namespace scripting {
|
||||
|
||||
|
@ -55,6 +56,10 @@ public:
|
|||
Q_INVOKABLE void setReverb(bool enable);
|
||||
Q_INVOKABLE void setReverbOptions(const AudioEffectOptions* options);
|
||||
|
||||
Q_INVOKABLE bool startRecording(const QString& filename);
|
||||
Q_INVOKABLE void stopRecording();
|
||||
Q_INVOKABLE bool getRecording();
|
||||
|
||||
signals:
|
||||
void nop();
|
||||
void mutedChanged(bool isMuted);
|
||||
|
@ -83,7 +88,6 @@ private:
|
|||
bool _isMuted { false };
|
||||
bool _enableNoiseReduction { true }; // Match default value of AudioClient::_isNoiseGateEnabled.
|
||||
bool _contextIsHMD { false };
|
||||
|
||||
AudioDevices* getDevices() { return &_devices; }
|
||||
AudioDevices _devices;
|
||||
};
|
||||
|
|
|
@ -82,7 +82,6 @@ void ApplicationOverlay::renderOverlay(RenderArgs* renderArgs) {
|
|||
|
||||
// Now render the overlay components together into a single texture
|
||||
renderDomainConnectionStatusBorder(renderArgs); // renders the connected domain line
|
||||
renderAudioScope(renderArgs); // audio scope in the very back - NOTE: this is the debug audio scope, not the VU meter
|
||||
renderOverlays(renderArgs); // renders Scripts Overlay and AudioScope
|
||||
renderQmlUi(renderArgs); // renders a unit quad with the QML UI texture, and the text overlays from scripts
|
||||
});
|
||||
|
@ -118,25 +117,6 @@ void ApplicationOverlay::renderQmlUi(RenderArgs* renderArgs) {
|
|||
geometryCache->renderUnitQuad(batch, glm::vec4(1), _qmlGeometryId);
|
||||
}
|
||||
|
||||
void ApplicationOverlay::renderAudioScope(RenderArgs* renderArgs) {
|
||||
PROFILE_RANGE(app, __FUNCTION__);
|
||||
|
||||
gpu::Batch& batch = *renderArgs->_batch;
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
geometryCache->useSimpleDrawPipeline(batch);
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
batch.setResourceTexture(0, textureCache->getWhiteTexture());
|
||||
int width = renderArgs->_viewport.z;
|
||||
int height = renderArgs->_viewport.w;
|
||||
mat4 legacyProjection = glm::ortho<float>(0, width, height, 0, ORTHO_NEAR_CLIP, ORTHO_FAR_CLIP);
|
||||
batch.setProjectionTransform(legacyProjection);
|
||||
batch.setModelTransform(Transform());
|
||||
batch.resetViewTransform();
|
||||
|
||||
// Render the audio scope
|
||||
DependencyManager::get<AudioScope>()->render(renderArgs, width, height);
|
||||
}
|
||||
|
||||
void ApplicationOverlay::renderOverlays(RenderArgs* renderArgs) {
|
||||
PROFILE_RANGE(app, __FUNCTION__);
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ private:
|
|||
void renderStatsAndLogs(RenderArgs* renderArgs);
|
||||
void renderDomainConnectionStatusBorder(RenderArgs* renderArgs);
|
||||
void renderQmlUi(RenderArgs* renderArgs);
|
||||
void renderAudioScope(RenderArgs* renderArgs);
|
||||
void renderOverlays(RenderArgs* renderArgs);
|
||||
void buildFramebufferObject();
|
||||
|
||||
|
|
|
@ -79,6 +79,7 @@ Setting::Handle<int> staticJitterBufferFrames("staticJitterBufferFrames",
|
|||
using Mutex = std::mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
Mutex _deviceMutex;
|
||||
Mutex _recordMutex;
|
||||
|
||||
// thread-safe
|
||||
QList<QAudioDeviceInfo> getAvailableDevices(QAudio::Mode mode) {
|
||||
|
@ -222,8 +223,7 @@ AudioClient::AudioClient() :
|
|||
// initialize wasapi; if getAvailableDevices is called from the CheckDevicesThread before this, it will crash
|
||||
getAvailableDevices(QAudio::AudioInput);
|
||||
getAvailableDevices(QAudio::AudioOutput);
|
||||
|
||||
|
||||
|
||||
// start a thread to detect any device changes
|
||||
_checkDevicesTimer = new QTimer(this);
|
||||
connect(_checkDevicesTimer, &QTimer::timeout, [this] {
|
||||
|
@ -1845,11 +1845,9 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
|||
qCDebug(audiostream, "Read %d samples from buffer (%d available, %d requested)", networkSamplesPopped, _receivedAudioStream.getSamplesAvailable(), samplesRequested);
|
||||
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
|
||||
lastPopOutput.readSamples(scratchBuffer, networkSamplesPopped);
|
||||
|
||||
for (int i = 0; i < networkSamplesPopped; i++) {
|
||||
mixBuffer[i] = convertToFloat(scratchBuffer[i]);
|
||||
}
|
||||
|
||||
samplesRequested = networkSamplesPopped;
|
||||
}
|
||||
|
||||
|
@ -1911,6 +1909,13 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
|||
bytesWritten = maxSize;
|
||||
}
|
||||
|
||||
// send output buffer for recording
|
||||
if (_audio->_isRecording) {
|
||||
Lock lock(_recordMutex);
|
||||
_audio->_audioFileWav.addRawAudioChunk(reinterpret_cast<char*>(scratchBuffer), bytesWritten);
|
||||
}
|
||||
|
||||
|
||||
int bytesAudioOutputUnplayed = _audio->_audioOutput->bufferSize() - _audio->_audioOutput->bytesFree();
|
||||
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_audio->_outputFormat.bytesForDuration(USECS_PER_MSEC);
|
||||
_audio->_stats.updateOutputMsUnplayed(msecsAudioOutputUnplayed);
|
||||
|
@ -1922,6 +1927,22 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
|||
return bytesWritten;
|
||||
}
|
||||
|
||||
bool AudioClient::startRecording(const QString& filepath) {
|
||||
if (!_audioFileWav.create(_outputFormat, filepath)) {
|
||||
qDebug() << "Error creating audio file: " + filepath;
|
||||
return false;
|
||||
}
|
||||
_isRecording = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void AudioClient::stopRecording() {
|
||||
if (_isRecording) {
|
||||
_isRecording = false;
|
||||
_audioFileWav.close();
|
||||
}
|
||||
}
|
||||
|
||||
void AudioClient::loadSettings() {
|
||||
_receivedAudioStream.setDynamicJitterBufferEnabled(dynamicJitterBufferEnabled.get());
|
||||
_receivedAudioStream.setStaticJitterBufferFrames(staticJitterBufferFrames.get());
|
||||
|
|
|
@ -47,11 +47,13 @@
|
|||
#include <AudioConstants.h>
|
||||
#include <AudioGate.h>
|
||||
|
||||
|
||||
#include <shared/RateCounter.h>
|
||||
|
||||
#include <plugins/CodecPlugin.h>
|
||||
|
||||
#include "AudioIOStats.h"
|
||||
#include "AudioFileWav.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#pragma warning( push )
|
||||
|
@ -67,7 +69,6 @@ class QAudioInput;
|
|||
class QAudioOutput;
|
||||
class QIODevice;
|
||||
|
||||
|
||||
class Transform;
|
||||
class NLPacket;
|
||||
|
||||
|
@ -118,6 +119,8 @@ public:
|
|||
const MixedProcessedAudioStream& getReceivedAudioStream() const { return _receivedAudioStream; }
|
||||
MixedProcessedAudioStream& getReceivedAudioStream() { return _receivedAudioStream; }
|
||||
|
||||
const QAudioFormat& getOutputFormat() const { return _outputFormat; }
|
||||
|
||||
float getLastInputLoudness() const { return _lastInputLoudness; } // TODO: relative to noise floor?
|
||||
|
||||
float getTimeSinceLastClip() const { return _timeSinceLastClip; }
|
||||
|
@ -142,7 +145,7 @@ public:
|
|||
void setIsPlayingBackRecording(bool isPlayingBackRecording) { _isPlayingBackRecording = isPlayingBackRecording; }
|
||||
|
||||
Q_INVOKABLE void setAvatarBoundingBoxParameters(glm::vec3 corner, glm::vec3 scale);
|
||||
|
||||
|
||||
bool outputLocalInjector(const AudioInjectorPointer& injector) override;
|
||||
|
||||
QAudioDeviceInfo getActiveAudioDevice(QAudio::Mode mode) const;
|
||||
|
@ -155,6 +158,13 @@ public:
|
|||
|
||||
bool getNamedAudioDeviceForModeExists(QAudio::Mode mode, const QString& deviceName);
|
||||
|
||||
void setRecording(bool isRecording) { _isRecording = isRecording; };
|
||||
bool getRecording() { return _isRecording; };
|
||||
|
||||
bool startRecording(const QString& filename);
|
||||
void stopRecording();
|
||||
|
||||
|
||||
#ifdef Q_OS_WIN
|
||||
static QString getWinDeviceName(wchar_t* guid);
|
||||
#endif
|
||||
|
@ -184,13 +194,17 @@ public slots:
|
|||
void toggleMute();
|
||||
bool isMuted() { return _muted; }
|
||||
|
||||
|
||||
virtual void setIsStereoInput(bool stereo) override;
|
||||
|
||||
void setNoiseReduction(bool isNoiseGateEnabled);
|
||||
bool isNoiseReductionEnabled() const { return _isNoiseGateEnabled; }
|
||||
|
||||
bool getLocalEcho() { return _shouldEchoLocally; }
|
||||
void setLocalEcho(bool localEcho) { _shouldEchoLocally = localEcho; }
|
||||
void toggleLocalEcho() { _shouldEchoLocally = !_shouldEchoLocally; }
|
||||
|
||||
bool getServerEcho() { return _shouldEchoToServer; }
|
||||
void setServerEcho(bool serverEcho) { _shouldEchoToServer = serverEcho; }
|
||||
void toggleServerEcho() { _shouldEchoToServer = !_shouldEchoToServer; }
|
||||
|
||||
void processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||
|
@ -239,6 +253,8 @@ signals:
|
|||
|
||||
void muteEnvironmentRequested(glm::vec3 position, float radius);
|
||||
|
||||
void outputBufferReceived(const QByteArray _outputBuffer);
|
||||
|
||||
protected:
|
||||
AudioClient();
|
||||
~AudioClient();
|
||||
|
@ -354,9 +370,8 @@ private:
|
|||
int16_t _localScratchBuffer[AudioConstants::NETWORK_FRAME_SAMPLES_AMBISONIC];
|
||||
float* _localOutputMixBuffer { NULL };
|
||||
Mutex _localAudioMutex;
|
||||
|
||||
AudioLimiter _audioLimiter;
|
||||
|
||||
|
||||
// Adds Reverb
|
||||
void configureReverb();
|
||||
void updateReverbOptions();
|
||||
|
@ -391,6 +406,8 @@ private:
|
|||
QList<QAudioDeviceInfo> _inputDevices;
|
||||
QList<QAudioDeviceInfo> _outputDevices;
|
||||
|
||||
AudioFileWav _audioFileWav;
|
||||
|
||||
bool _hasReceivedFirstPacket { false };
|
||||
|
||||
QVector<AudioInjectorPointer> _activeLocalAudioInjectors;
|
||||
|
@ -412,6 +429,8 @@ private:
|
|||
|
||||
QTimer* _checkDevicesTimer { nullptr };
|
||||
QTimer* _checkPeakValuesTimer { nullptr };
|
||||
|
||||
bool _isRecording { false };
|
||||
};
|
||||
|
||||
|
||||
|
|
69
libraries/audio-client/src/AudioFileWav.cpp
Normal file
69
libraries/audio-client/src/AudioFileWav.cpp
Normal file
|
@ -0,0 +1,69 @@
|
|||
//
|
||||
// AudioWavFile.h
|
||||
// libraries/audio-client/src
|
||||
//
|
||||
// Created by Luis Cuenca on 12/1/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "AudioFileWav.h"
|
||||
|
||||
bool AudioFileWav::create(const QAudioFormat& audioFormat, const QString& filepath) {
|
||||
if (_file.isOpen()) {
|
||||
_file.close();
|
||||
}
|
||||
_file.setFileName(filepath);
|
||||
if (!_file.open(QIODevice::WriteOnly)) {
|
||||
return false;
|
||||
}
|
||||
addHeader(audioFormat);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AudioFileWav::addRawAudioChunk(char* chunk, int size) {
|
||||
if (_file.isOpen()) {
|
||||
QDataStream stream(&_file);
|
||||
stream.writeRawData(chunk, size);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void AudioFileWav::close() {
|
||||
QDataStream stream(&_file);
|
||||
stream.setByteOrder(QDataStream::LittleEndian);
|
||||
|
||||
// fill RIFF and size data on header
|
||||
_file.seek(4);
|
||||
stream << quint32(_file.size() - 8);
|
||||
_file.seek(40);
|
||||
stream << quint32(_file.size() - 44);
|
||||
_file.close();
|
||||
}
|
||||
|
||||
void AudioFileWav::addHeader(const QAudioFormat& audioFormat) {
|
||||
QDataStream stream(&_file);
|
||||
|
||||
stream.setByteOrder(QDataStream::LittleEndian);
|
||||
|
||||
// RIFF
|
||||
stream.writeRawData("RIFF", 4);
|
||||
stream << quint32(0);
|
||||
stream.writeRawData("WAVE", 4);
|
||||
|
||||
// Format description PCM = 16
|
||||
stream.writeRawData("fmt ", 4);
|
||||
stream << quint32(16);
|
||||
stream << quint16(1);
|
||||
stream << quint16(audioFormat.channelCount());
|
||||
stream << quint32(audioFormat.sampleRate());
|
||||
stream << quint32(audioFormat.sampleRate() * audioFormat.channelCount() * audioFormat.sampleSize() / 8); // bytes per second
|
||||
stream << quint16(audioFormat.channelCount() * audioFormat.sampleSize() / 8); // block align
|
||||
stream << quint16(audioFormat.sampleSize()); // bits Per Sample
|
||||
// Init data chunck
|
||||
stream.writeRawData("data", 4);
|
||||
stream << quint32(0);
|
||||
}
|
34
libraries/audio-client/src/AudioFileWav.h
Normal file
34
libraries/audio-client/src/AudioFileWav.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
//
|
||||
// AudioWavFile.h
|
||||
// libraries/audio-client/src
|
||||
//
|
||||
// Created by Luis Cuenca on 12/1/2017.
|
||||
// Copyright 2017 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioFileWav_h
|
||||
#define hifi_AudioFileWav_h
|
||||
|
||||
#include <QObject>
|
||||
#include <QFile>
|
||||
#include <QDataStream>
|
||||
#include <QVector>
|
||||
#include <QAudioFormat>
|
||||
|
||||
class AudioFileWav : public QObject {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AudioFileWav() {}
|
||||
bool create(const QAudioFormat& audioFormat, const QString& filepath);
|
||||
bool addRawAudioChunk(char* chunk, int size);
|
||||
void close();
|
||||
|
||||
private:
|
||||
void addHeader(const QAudioFormat& audioFormat);
|
||||
QFile _file;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioFileWav_h
|
17
scripts/developer/utilities/audio/audioScope.js
Normal file
17
scripts/developer/utilities/audio/audioScope.js
Normal file
|
@ -0,0 +1,17 @@
|
|||
var qml = Script.resourcesPath() + '/qml/AudioScope.qml';
|
||||
var window = new OverlayWindow({
|
||||
title: 'Audio Scope',
|
||||
source: qml,
|
||||
width: 1200,
|
||||
height: 500
|
||||
});
|
||||
window.closed.connect(function () {
|
||||
if (Audio.getRecording()) {
|
||||
Audio.stopRecording();
|
||||
}
|
||||
AudioScope.setVisible(false);
|
||||
AudioScope.setLocalEcho(false);
|
||||
AudioScope.setServerEcho(false);
|
||||
AudioScope.selectAudioScopeFiveFrames();
|
||||
Script.stop();
|
||||
});
|
Loading…
Reference in a new issue