mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 03:44:02 +02:00
Merge pull request #6322 from Atlante45/record
Recording fixes from Clement.
This commit is contained in:
commit
63106f6991
10 changed files with 162 additions and 164 deletions
|
@ -16,11 +16,11 @@ var NUM_AC = 3; // This is the number of AC. Their ID need to be unique and betw
|
|||
var NAMES = new Array("Craig", "Clement", "Jeff"); // ACs names ordered by IDs (Default name is "ACx", x = ID + 1))
|
||||
|
||||
// Those variables MUST be common to every scripts
|
||||
var controlVoxelSize = 0.25;
|
||||
var controlVoxelPosition = { x: 2000 , y: 0, z: 0 };
|
||||
var controlEntitySize = 0.25;
|
||||
var controlEntityPosition = { x: 2000 , y: 0, z: 0 };
|
||||
|
||||
// Script. DO NOT MODIFY BEYOND THIS LINE.
|
||||
Script.include("libraries/toolBars.js");
|
||||
Script.include("../libraries/toolBars.js");
|
||||
|
||||
var DO_NOTHING = 0;
|
||||
var PLAY = 1;
|
||||
|
@ -138,16 +138,22 @@ function sendCommand(id, action) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (id === toolBars.length - 1) {
|
||||
if (id === (toolBars.length - 1)) {
|
||||
for (i = 0; i < NUM_AC; i++) {
|
||||
sendCommand(i, action);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Fix this to use some mechanism other than voxels
|
||||
//Voxels.setVoxel(controlVoxelPosition.x + id * controlVoxelSize, controlVoxelPosition.y, controlVoxelPosition.z,
|
||||
// controlVoxelSize, COLORS[action].red, COLORS[action].green, COLORS[action].blue);
|
||||
|
||||
var position = { x: controlEntityPosition.x + id * controlEntitySize,
|
||||
y: controlEntityPosition.y, z: controlEntityPosition.z };
|
||||
Entities.addEntity({
|
||||
type: "Box",
|
||||
position: position,
|
||||
dimensions: { x: controlEntitySize, y: controlEntitySize, z: controlEntitySize },
|
||||
color: COLORS[action],
|
||||
lifetime: 5
|
||||
});
|
||||
}
|
||||
|
||||
function mousePressEvent(event) {
|
||||
|
|
|
@ -12,27 +12,25 @@
|
|||
HIFI_PUBLIC_BUCKET = "http://s3.amazonaws.com/hifi-public/";
|
||||
|
||||
// Set the following variables to the values needed
|
||||
var filename = HIFI_PUBLIC_BUCKET + "ozan/bartender.rec";
|
||||
var filename = "/Users/clement/Desktop/recording.hfr";
|
||||
var playFromCurrentLocation = true;
|
||||
var useDisplayName = true;
|
||||
var useAttachments = true;
|
||||
var useHeadModel = true;
|
||||
var useSkeletonModel = true;
|
||||
var useAvatarModel = true;
|
||||
|
||||
// ID of the agent. Two agents can't have the same ID.
|
||||
var id = 0;
|
||||
|
||||
// Set head and skeleton models
|
||||
Avatar.faceModelURL = "http://public.highfidelity.io/models/heads/EvilPhilip_v7.fst";
|
||||
Avatar.skeletonModelURL = "http://public.highfidelity.io/models/skeletons/Philip_Carl_Body_A-Pose.fst";
|
||||
// Set avatar model URL
|
||||
Avatar.skeletonModelURL = "https://hifi-public.s3.amazonaws.com/marketplace/contents/e21c0b95-e502-4d15-8c41-ea2fc40f1125/3585ddf674869a67d31d5964f7b52de1.fst?1427169998";
|
||||
// Set position/orientation/scale here if playFromCurrentLocation is true
|
||||
Avatar.position = { x:1, y: 1, z: 1 };
|
||||
Avatar.orientation = Quat.fromPitchYawRollDegrees(0, 0, 0);
|
||||
Avatar.scale = 1.0;
|
||||
|
||||
// Those variables MUST be common to every scripts
|
||||
var controlVoxelSize = 0.25;
|
||||
var controlVoxelPosition = { x: 2000 , y: 0, z: 0 };
|
||||
var controlEntitySize = 0.25;
|
||||
var controlEntityPosition = { x: 2000, y: 0, z: 0 };
|
||||
|
||||
// Script. DO NOT MODIFY BEYOND THIS LINE.
|
||||
var DO_NOTHING = 0;
|
||||
|
@ -49,113 +47,111 @@ COLORS[STOP] = { red: STOP, green: 0, blue: 0 };
|
|||
COLORS[SHOW] = { red: SHOW, green: 0, blue: 0 };
|
||||
COLORS[HIDE] = { red: HIDE, green: 0, blue: 0 };
|
||||
|
||||
controlVoxelPosition.x += id * controlVoxelSize;
|
||||
|
||||
controlEntityPosition.x += id * controlEntitySize;
|
||||
|
||||
Avatar.loadRecording(filename);
|
||||
|
||||
Avatar.setPlayFromCurrentLocation(playFromCurrentLocation);
|
||||
Avatar.setPlayerUseDisplayName(useDisplayName);
|
||||
Avatar.setPlayerUseAttachments(useAttachments);
|
||||
Avatar.setPlayerUseHeadModel(useHeadModel);
|
||||
Avatar.setPlayerUseSkeletonModel(useSkeletonModel);
|
||||
Avatar.setPlayerUseHeadModel(false);
|
||||
Avatar.setPlayerUseSkeletonModel(useAvatarModel);
|
||||
|
||||
function setupVoxelViewer() {
|
||||
var voxelViewerOffset = 10;
|
||||
var voxelViewerPosition = JSON.parse(JSON.stringify(controlVoxelPosition));
|
||||
voxelViewerPosition.x -= voxelViewerOffset;
|
||||
var voxelViewerOrientation = Quat.fromPitchYawRollDegrees(0, -90, 0);
|
||||
|
||||
VoxelViewer.setPosition(voxelViewerPosition);
|
||||
VoxelViewer.setOrientation(voxelViewerOrientation);
|
||||
VoxelViewer.queryOctree();
|
||||
function setupEntityViewer() {
|
||||
var entityViewerOffset = 10;
|
||||
var entityViewerPosition = { x: controlEntityPosition.x - entityViewerOffset,
|
||||
y: controlEntityPosition.y, z: controlEntityPosition.z };
|
||||
var entityViewerOrientation = Quat.fromPitchYawRollDegrees(0, -90, 0);
|
||||
|
||||
EntityViewer.setPosition(entityViewerPosition);
|
||||
EntityViewer.setOrientation(entityViewerOrientation);
|
||||
EntityViewer.queryOctree();
|
||||
}
|
||||
|
||||
function getAction(controlVoxel) {
|
||||
if (controlVoxel.x != controlVoxelPosition.x ||
|
||||
controlVoxel.y != controlVoxelPosition.y ||
|
||||
controlVoxel.z != controlVoxelPosition.z ||
|
||||
controlVoxel.s != controlVoxelSize) {
|
||||
return DO_NOTHING;
|
||||
}
|
||||
|
||||
for (i in COLORS) {
|
||||
if (controlVoxel.red === COLORS[i].red &&
|
||||
controlVoxel.green === COLORS[i].green &&
|
||||
controlVoxel.blue === COLORS[i].blue) {
|
||||
|
||||
// TODO: Fix this to use some mechanism other than voxels
|
||||
//Voxels.eraseVoxel(controlVoxelPosition.x, controlVoxelPosition.y, controlVoxelPosition.z, controlVoxelSize);
|
||||
return parseInt(i);
|
||||
function getAction(controlEntity) {
|
||||
if (controlEntity === null ||
|
||||
controlEntity.position.x !== controlEntityPosition.x ||
|
||||
controlEntity.position.y !== controlEntityPosition.y ||
|
||||
controlEntity.position.z !== controlEntityPosition.z ||
|
||||
controlEntity.dimensions.x !== controlEntitySize) {
|
||||
return DO_NOTHING;
|
||||
}
|
||||
}
|
||||
|
||||
return DO_NOTHING;
|
||||
|
||||
for (i in COLORS) {
|
||||
if (controlEntity.color.red === COLORS[i].red &&
|
||||
controlEntity.color.green === COLORS[i].green &&
|
||||
controlEntity.color.blue === COLORS[i].blue) {
|
||||
Entities.deleteEntity(controlEntity.id);
|
||||
return parseInt(i);
|
||||
}
|
||||
}
|
||||
|
||||
return DO_NOTHING;
|
||||
}
|
||||
|
||||
count = 300; // This is necessary to wait for the audio mixer to connect
|
||||
count = 100; // This is necessary to wait for the audio mixer to connect
|
||||
function update(event) {
|
||||
VoxelViewer.queryOctree();
|
||||
if (count > 0) {
|
||||
count--;
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Fix this to use some mechanism other than voxels
|
||||
// Voxels.getVoxelAt(controlVoxelPosition.x, controlVoxelPosition.y, controlVoxelPosition.z, controlVoxelSize);
|
||||
var controlVoxel = false;
|
||||
var action = getAction(controlVoxel);
|
||||
|
||||
switch(action) {
|
||||
case PLAY:
|
||||
print("Play");
|
||||
if (!Agent.isAvatar) {
|
||||
Agent.isAvatar = true;
|
||||
}
|
||||
if (!Avatar.isPlaying()) {
|
||||
Avatar.startPlaying();
|
||||
}
|
||||
Avatar.setPlayerLoop(false);
|
||||
break;
|
||||
case PLAY_LOOP:
|
||||
print("Play loop");
|
||||
if (!Agent.isAvatar) {
|
||||
Agent.isAvatar = true;
|
||||
}
|
||||
if (!Avatar.isPlaying()) {
|
||||
Avatar.startPlaying();
|
||||
}
|
||||
Avatar.setPlayerLoop(true);
|
||||
break;
|
||||
case STOP:
|
||||
print("Stop");
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.stopPlaying();
|
||||
}
|
||||
break;
|
||||
case SHOW:
|
||||
print("Show");
|
||||
if (!Agent.isAvatar) {
|
||||
Agent.isAvatar = true;
|
||||
}
|
||||
break;
|
||||
case HIDE:
|
||||
print("Hide");
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.stopPlaying();
|
||||
}
|
||||
Agent.isAvatar = false;
|
||||
break;
|
||||
case DO_NOTHING:
|
||||
break;
|
||||
default:
|
||||
print("Unknown action: " + action);
|
||||
break;
|
||||
}
|
||||
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.play();
|
||||
}
|
||||
EntityViewer.queryOctree();
|
||||
if (count > 0) {
|
||||
count--;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
var controlEntity = Entities.findClosestEntity(controlEntityPosition, controlEntitySize);
|
||||
var action = getAction(Entities.getEntityProperties(controlEntity));
|
||||
|
||||
switch(action) {
|
||||
case PLAY:
|
||||
print("Play");
|
||||
if (!Agent.isAvatar) {
|
||||
Agent.isAvatar = true;
|
||||
}
|
||||
if (!Avatar.isPlaying()) {
|
||||
Avatar.startPlaying();
|
||||
}
|
||||
Avatar.setPlayerLoop(false);
|
||||
break;
|
||||
case PLAY_LOOP:
|
||||
print("Play loop");
|
||||
if (!Agent.isAvatar) {
|
||||
Agent.isAvatar = true;
|
||||
}
|
||||
if (!Avatar.isPlaying()) {
|
||||
Avatar.startPlaying();
|
||||
}
|
||||
Avatar.setPlayerLoop(true);
|
||||
break;
|
||||
case STOP:
|
||||
print("Stop");
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.stopPlaying();
|
||||
}
|
||||
break;
|
||||
case SHOW:
|
||||
print("Show");
|
||||
if (!Agent.isAvatar) {
|
||||
Agent.isAvatar = true;
|
||||
}
|
||||
break;
|
||||
case HIDE:
|
||||
print("Hide");
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.stopPlaying();
|
||||
}
|
||||
Agent.isAvatar = false;
|
||||
break;
|
||||
case DO_NOTHING:
|
||||
break;
|
||||
default:
|
||||
print("Unknown action: " + action);
|
||||
break;
|
||||
}
|
||||
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.play();
|
||||
}
|
||||
}
|
||||
|
||||
Script.update.connect(update);
|
||||
setupVoxelViewer();
|
||||
setupEntityViewer();
|
||||
|
|
|
@ -14,8 +14,7 @@ var filename = "http://your.recording.url";
|
|||
var playFromCurrentLocation = true;
|
||||
var loop = true;
|
||||
|
||||
Avatar.faceModelURL = "http://public.highfidelity.io/models/heads/EvilPhilip_v7.fst";
|
||||
Avatar.skeletonModelURL = "http://public.highfidelity.io/models/skeletons/Philip_Carl_Body_A-Pose.fst";
|
||||
Avatar.skeletonModelURL = "https://hifi-public.s3.amazonaws.com/marketplace/contents/e21c0b95-e502-4d15-8c41-ea2fc40f1125/3585ddf674869a67d31d5964f7b52de1.fst?1427169998";
|
||||
|
||||
// Set position here if playFromCurrentLocation is true
|
||||
Avatar.position = { x:1, y: 1, z: 1 };
|
||||
|
@ -23,30 +22,34 @@ Avatar.orientation = Quat.fromPitchYawRollDegrees(0, 0, 0);
|
|||
Avatar.scale = 1.0;
|
||||
|
||||
Agent.isAvatar = true;
|
||||
|
||||
|
||||
Avatar.loadRecording(filename);
|
||||
|
||||
count = 300; // This is necessary to wait for the audio mixer to connect
|
||||
function update(event) {
|
||||
if (count > 0) {
|
||||
count--;
|
||||
return;
|
||||
}
|
||||
if (count == 0) {
|
||||
Avatar.setPlayFromCurrentLocation(playFromCurrentLocation);
|
||||
Avatar.setPlayerLoop(loop);
|
||||
Avatar.startPlaying();
|
||||
Avatar.play();
|
||||
Vec3.print("Playing from ", Avatar.position);
|
||||
|
||||
count--;
|
||||
}
|
||||
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.play();
|
||||
} else {
|
||||
Script.update.disconnect(update);
|
||||
}
|
||||
if (count > 0) {
|
||||
count--;
|
||||
return;
|
||||
}
|
||||
if (count == 0) {
|
||||
Avatar.setPlayFromCurrentLocation(playFromCurrentLocation);
|
||||
Avatar.setPlayerLoop(loop);
|
||||
Avatar.setPlayerUseDisplayName(true);
|
||||
Avatar.setPlayerUseAttachments(true);
|
||||
Avatar.setPlayerUseHeadModel(false);
|
||||
Avatar.setPlayerUseSkeletonModel(true);
|
||||
Avatar.startPlaying();
|
||||
Avatar.play();
|
||||
Vec3.print("Playing from ", Avatar.position);
|
||||
|
||||
count--;
|
||||
}
|
||||
|
||||
if (Avatar.isPlaying()) {
|
||||
Avatar.play();
|
||||
} else {
|
||||
Script.update.disconnect(update);
|
||||
}
|
||||
}
|
||||
|
||||
Script.update.connect(update);
|
||||
|
|
|
@ -112,10 +112,9 @@ function setupTimer() {
|
|||
text: (0.00).toFixed(3),
|
||||
backgroundColor: COLOR_OFF,
|
||||
x: 0, y: 0,
|
||||
width: 0,
|
||||
height: 0,
|
||||
alpha: 1.0,
|
||||
backgroundAlpha: 1.0,
|
||||
width: 0, height: 0,
|
||||
leftMargin: 10, topMargin: 10,
|
||||
alpha: 1.0, backgroundAlpha: 1.0,
|
||||
visible: true
|
||||
});
|
||||
|
||||
|
|
|
@ -604,7 +604,7 @@ void MyAvatar::startRecording() {
|
|||
// connect to AudioClient's signal so we get input audio
|
||||
auto audioClient = DependencyManager::get<AudioClient>();
|
||||
connect(audioClient.data(), &AudioClient::inputReceived, _recorder.data(),
|
||||
&Recorder::recordAudio, Qt::BlockingQueuedConnection);
|
||||
&Recorder::recordAudio, Qt::QueuedConnection);
|
||||
|
||||
_recorder->startRecording();
|
||||
}
|
||||
|
|
|
@ -748,12 +748,13 @@ void AudioClient::handleAudioInput() {
|
|||
_audioPacket = NLPacket::create(PacketType::MicrophoneAudioNoEcho);
|
||||
}
|
||||
|
||||
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio();
|
||||
const float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio();
|
||||
|
||||
const int inputSamplesRequired = (int)((float)AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio);
|
||||
const auto inputAudioSamples = std::unique_ptr<int16_t[]>(new int16_t[inputSamplesRequired]);
|
||||
|
||||
int inputSamplesRequired = (int)((float)AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio);
|
||||
|
||||
static int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||
int16_t* networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
|
||||
static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
|
||||
int16_t* const networkAudioSamples = (int16_t*)(_audioPacket->getPayload() + leadingBytes);
|
||||
|
||||
QByteArray inputByteArray = _inputDevice->readAll();
|
||||
|
||||
|
@ -802,16 +803,12 @@ void AudioClient::handleAudioInput() {
|
|||
_timeSinceLastClip += (float) numNetworkSamples / (float) AudioConstants::SAMPLE_RATE;
|
||||
}
|
||||
|
||||
int16_t* inputAudioSamples = new int16_t[inputSamplesRequired];
|
||||
_inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
|
||||
|
||||
_inputRingBuffer.readSamples(inputAudioSamples.get(), inputSamplesRequired);
|
||||
possibleResampling(_inputToNetworkResampler,
|
||||
inputAudioSamples, networkAudioSamples,
|
||||
inputAudioSamples.get(), networkAudioSamples,
|
||||
inputSamplesRequired, numNetworkSamples,
|
||||
_inputFormat, _desiredInputFormat);
|
||||
|
||||
delete[] inputAudioSamples;
|
||||
|
||||
// Remove DC offset
|
||||
if (!_isStereoInput && !_audioSourceInjectEnabled) {
|
||||
_inputGate.removeDCOffset(networkAudioSamples, numNetworkSamples);
|
||||
|
@ -842,8 +839,7 @@ void AudioClient::handleAudioInput() {
|
|||
_lastInputLoudness = fabs(loudness / numNetworkSamples);
|
||||
}
|
||||
|
||||
emit inputReceived(QByteArray(reinterpret_cast<const char*>(networkAudioSamples),
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * sizeof(AudioConstants::AudioSample)));
|
||||
emit inputReceived({reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes});
|
||||
|
||||
} else {
|
||||
// our input loudness is 0, since we're muted
|
||||
|
|
|
@ -205,7 +205,7 @@ void AudioInjector::injectToMixer() {
|
|||
|
||||
while (_currentSendOffset < _audioData.size() && !_shouldStop) {
|
||||
|
||||
int bytesToCopy = std::min(((_options.stereo) ? 2 : 1) * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL,
|
||||
int bytesToCopy = std::min((_options.stereo ? 2 : 1) * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL,
|
||||
_audioData.size() - _currentSendOffset);
|
||||
|
||||
// Measure the loudness of this frame
|
||||
|
@ -261,7 +261,7 @@ void AudioInjector::injectToMixer() {
|
|||
|
||||
// not the first packet and not done
|
||||
// sleep for the appropriate time
|
||||
int usecToSleep = (++nextFrame * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000;
|
||||
int usecToSleep = (++nextFrame * (_options.stereo ? 2 : 1) * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000;
|
||||
|
||||
if (usecToSleep > 0) {
|
||||
usleep(usecToSleep);
|
||||
|
|
|
@ -397,16 +397,15 @@ bool Player::computeCurrentFrame() {
|
|||
}
|
||||
|
||||
qint64 elapsed = glm::clamp(Player::elapsed() - _audioOffset, (qint64)0, (qint64)_recording->getLength());
|
||||
while(_currentFrame >= 0 &&
|
||||
_recording->getFrameTimestamp(_currentFrame) > elapsed) {
|
||||
--_currentFrame;
|
||||
}
|
||||
|
||||
while (_currentFrame < _recording->getFrameNumber() &&
|
||||
_recording->getFrameTimestamp(_currentFrame) < elapsed) {
|
||||
++_currentFrame;
|
||||
}
|
||||
--_currentFrame;
|
||||
|
||||
while(_currentFrame > 0 &&
|
||||
_recording->getFrameTimestamp(_currentFrame) > elapsed) {
|
||||
--_currentFrame;
|
||||
}
|
||||
|
||||
if (_currentFrame == _recording->getFrameNumber() - 1) {
|
||||
--_currentFrame;
|
||||
|
|
|
@ -43,7 +43,6 @@ public slots:
|
|||
void record();
|
||||
void recordAudio(const QByteArray& audioArray);
|
||||
|
||||
|
||||
private:
|
||||
QElapsedTimer _timer;
|
||||
RecordingPointer _recording;
|
||||
|
|
|
@ -69,10 +69,10 @@ const RecordingFrame& Recording::getFrame(int i) const {
|
|||
|
||||
int Recording::numberAudioChannel() const {
|
||||
// Check for stereo audio
|
||||
int MSEC_PER_SEC = 1000;
|
||||
int channelLength = (getLength() / MSEC_PER_SEC) *
|
||||
AudioConstants::SAMPLE_RATE * sizeof(AudioConstants::AudioSample);
|
||||
return glm::round((float)channelLength / (float)getAudioData().size());
|
||||
float MSEC_PER_SEC = 1000.0f;
|
||||
float channelLength = ((float)getLength() / MSEC_PER_SEC) * AudioConstants::SAMPLE_RATE *
|
||||
sizeof(AudioConstants::AudioSample);
|
||||
return glm::round((float)getAudioData().size() / channelLength);
|
||||
}
|
||||
|
||||
void Recording::addFrame(int timestamp, RecordingFrame &frame) {
|
||||
|
|
Loading…
Reference in a new issue