mirror of
https://github.com/lubosz/overte.git
synced 2025-04-08 15:43:24 +02:00
Rework audio data memory ownership model
This commit is contained in:
parent
76d2519205
commit
7192aed131
15 changed files with 355 additions and 268 deletions
|
@ -754,13 +754,13 @@ void Agent::processAgentAvatarAudio() {
|
|||
const int16_t* nextSoundOutput = NULL;
|
||||
|
||||
if (_avatarSound) {
|
||||
const QByteArray& soundByteArray = _avatarSound->getByteArray();
|
||||
nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
|
||||
auto audioData = _avatarSound->getAudioData();
|
||||
nextSoundOutput = reinterpret_cast<const int16_t*>(audioData->rawData()
|
||||
+ _numAvatarSoundSentBytes);
|
||||
|
||||
int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||
int numAvailableBytes = (audioData->getNumBytes() - _numAvatarSoundSentBytes) > AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||
? AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL
|
||||
: soundByteArray.size() - _numAvatarSoundSentBytes;
|
||||
: audioData->getNumBytes() - _numAvatarSoundSentBytes;
|
||||
numAvailableSamples = (int16_t)numAvailableBytes / sizeof(int16_t);
|
||||
|
||||
|
||||
|
@ -773,7 +773,7 @@ void Agent::processAgentAvatarAudio() {
|
|||
}
|
||||
|
||||
_numAvatarSoundSentBytes += numAvailableBytes;
|
||||
if (_numAvatarSoundSentBytes == soundByteArray.size()) {
|
||||
if (_numAvatarSoundSentBytes == (int)audioData->getNumBytes()) {
|
||||
// we're done with this sound object - so set our pointer back to NULL
|
||||
// and our sent bytes back to zero
|
||||
_avatarSound.clear();
|
||||
|
|
|
@ -4050,8 +4050,7 @@ void Application::keyPressEvent(QKeyEvent* event) {
|
|||
_snapshotSoundInjector->setOptions(options);
|
||||
_snapshotSoundInjector->restart();
|
||||
} else {
|
||||
QByteArray samples = _snapshotSound->getByteArray();
|
||||
_snapshotSoundInjector = AudioInjector::playSound(samples, options);
|
||||
_snapshotSoundInjector = AudioInjector::playSound(_snapshotSound, options);
|
||||
}
|
||||
}
|
||||
takeSnapshot(true);
|
||||
|
|
|
@ -562,8 +562,13 @@ void AvatarManager::handleCollisionEvents(const CollisionEvents& collisionEvents
|
|||
|
||||
static const int MAX_INJECTOR_COUNT = 3;
|
||||
if (_collisionInjectors.size() < MAX_INJECTOR_COUNT) {
|
||||
auto injector = AudioInjector::playSound(collisionSound, energyFactorOfFull, AVATAR_STRETCH_FACTOR,
|
||||
myAvatar->getWorldPosition());
|
||||
AudioInjectorOptions options;
|
||||
options.stereo = collisionSound->isStereo();
|
||||
options.position = myAvatar->getWorldPosition();
|
||||
options.volume = energyFactorOfFull;
|
||||
options.pitch = 1.0f / AVATAR_STRETCH_FACTOR;
|
||||
|
||||
auto injector = AudioInjector::playSoundAndDelete(collisionSound, options);
|
||||
_collisionInjectors.emplace_back(injector);
|
||||
}
|
||||
myAvatar->collisionWithEntity(collision);
|
||||
|
|
|
@ -22,7 +22,7 @@ namespace AudioConstants {
|
|||
const int STEREO = 2;
|
||||
const int AMBISONIC = 4;
|
||||
|
||||
typedef int16_t AudioSample;
|
||||
using AudioSample = int16_t;
|
||||
const int SAMPLE_SIZE = sizeof(AudioSample);
|
||||
|
||||
inline const char* getAudioFrameName() { return "com.highfidelity.recording.Audio"; }
|
||||
|
|
|
@ -38,12 +38,14 @@ AudioInjectorState& operator|= (AudioInjectorState& lhs, AudioInjectorState rhs)
|
|||
return lhs;
|
||||
};
|
||||
|
||||
AudioInjector::AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions) :
|
||||
AudioInjector(sound.getByteArray(), injectorOptions)
|
||||
AudioInjector::AudioInjector(SharedSoundPointer sound, const AudioInjectorOptions& injectorOptions) :
|
||||
_sound(sound),
|
||||
_audioData(sound->getAudioData()),
|
||||
_options(injectorOptions)
|
||||
{
|
||||
}
|
||||
|
||||
AudioInjector::AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions) :
|
||||
AudioInjector::AudioInjector(AudioDataPointer audioData, const AudioInjectorOptions& injectorOptions) :
|
||||
_audioData(audioData),
|
||||
_options(injectorOptions)
|
||||
{
|
||||
|
@ -154,7 +156,7 @@ bool AudioInjector::inject(bool(AudioInjectorManager::*injection)(const AudioInj
|
|||
bool AudioInjector::injectLocally() {
|
||||
bool success = false;
|
||||
if (_localAudioInterface) {
|
||||
if (_audioData.size() > 0) {
|
||||
if (_audioData->getNumBytes() > 0) {
|
||||
|
||||
_localBuffer = new AudioInjectorLocalBuffer(_audioData);
|
||||
|
||||
|
@ -220,22 +222,12 @@ int64_t AudioInjector::injectNextFrame() {
|
|||
|
||||
if (!_currentPacket) {
|
||||
if (_currentSendOffset < 0 ||
|
||||
_currentSendOffset >= _audioData.size()) {
|
||||
_currentSendOffset >= (int)_audioData->getNumBytes()) {
|
||||
_currentSendOffset = 0;
|
||||
}
|
||||
|
||||
// make sure we actually have samples downloaded to inject
|
||||
if (_audioData.size()) {
|
||||
|
||||
int sampleSize = (_options.stereo ? 2 : 1) * sizeof(AudioConstants::AudioSample);
|
||||
auto numSamples = static_cast<int>(_audioData.size() / sampleSize);
|
||||
auto targetSize = numSamples * sampleSize;
|
||||
if (targetSize != _audioData.size()) {
|
||||
qCDebug(audio) << "Resizing audio that doesn't end at multiple of sample size, resizing from "
|
||||
<< _audioData.size() << " to " << targetSize;
|
||||
_audioData.resize(targetSize);
|
||||
}
|
||||
|
||||
if (_audioData && _audioData->getNumSamples() > 0) {
|
||||
_outgoingSequenceNumber = 0;
|
||||
_nextFrame = 0;
|
||||
|
||||
|
@ -307,19 +299,10 @@ int64_t AudioInjector::injectNextFrame() {
|
|||
_frameTimer->restart();
|
||||
}
|
||||
|
||||
int totalBytesLeftToCopy = (_options.stereo ? 2 : 1) * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||
if (!_options.loop) {
|
||||
// If we aren't looping, let's make sure we don't read past the end
|
||||
totalBytesLeftToCopy = std::min(totalBytesLeftToCopy, _audioData.size() - _currentSendOffset);
|
||||
}
|
||||
|
||||
// Measure the loudness of this frame
|
||||
_loudness = 0.0f;
|
||||
for (int i = 0; i < totalBytesLeftToCopy; i += sizeof(int16_t)) {
|
||||
_loudness += abs(*reinterpret_cast<int16_t*>(_audioData.data() + ((_currentSendOffset + i) % _audioData.size()))) /
|
||||
(AudioConstants::MAX_SAMPLE_VALUE / 2.0f);
|
||||
}
|
||||
_loudness /= (float)(totalBytesLeftToCopy/ sizeof(int16_t));
|
||||
assert(loopbackOptionOffset != -1);
|
||||
assert(positionOptionOffset != -1);
|
||||
assert(volumeOptionOffset != -1);
|
||||
assert(audioDataOffset != -1);
|
||||
|
||||
_currentPacket->seek(0);
|
||||
|
||||
|
@ -339,19 +322,37 @@ int64_t AudioInjector::injectNextFrame() {
|
|||
|
||||
_currentPacket->seek(audioDataOffset);
|
||||
|
||||
// This code is copying bytes from the _audioData directly into the packet, handling looping appropriately.
|
||||
// This code is copying bytes from the _sound directly into the packet, handling looping appropriately.
|
||||
// Might be a reasonable place to do the encode step here.
|
||||
QByteArray decodedAudio;
|
||||
while (totalBytesLeftToCopy > 0) {
|
||||
int bytesToCopy = std::min(totalBytesLeftToCopy, _audioData.size() - _currentSendOffset);
|
||||
|
||||
decodedAudio.append(_audioData.data() + _currentSendOffset, bytesToCopy);
|
||||
_currentSendOffset += bytesToCopy;
|
||||
totalBytesLeftToCopy -= bytesToCopy;
|
||||
if (_options.loop && _currentSendOffset >= _audioData.size()) {
|
||||
_currentSendOffset = 0;
|
||||
}
|
||||
int totalBytesLeftToCopy = (_options.stereo ? 2 : 1) * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
|
||||
if (!_options.loop) {
|
||||
// If we aren't looping, let's make sure we don't read past the end
|
||||
int bytesLeftToRead = _audioData->getNumBytes() - _currentSendOffset;
|
||||
totalBytesLeftToCopy = std::min(totalBytesLeftToCopy, bytesLeftToRead);
|
||||
}
|
||||
|
||||
auto samples = _audioData->data();
|
||||
auto currentSample = _currentSendOffset / AudioConstants::SAMPLE_SIZE;
|
||||
auto samplesLeftToCopy = totalBytesLeftToCopy / AudioConstants::SAMPLE_SIZE;
|
||||
|
||||
using AudioConstants::AudioSample;
|
||||
decodedAudio.resize(totalBytesLeftToCopy);
|
||||
auto samplesOut = reinterpret_cast<AudioSample*>(decodedAudio.data());
|
||||
|
||||
// Copy and Measure the loudness of this frame
|
||||
_loudness = 0.0f;
|
||||
for (int i = 0; i < samplesLeftToCopy; ++i) {
|
||||
auto index = (currentSample + i) % _audioData->getNumSamples();
|
||||
auto sample = samples[index];
|
||||
samplesOut[i] = sample;
|
||||
_loudness += abs(sample) / (AudioConstants::MAX_SAMPLE_VALUE / 2.0f);
|
||||
}
|
||||
_loudness /= (float)samplesLeftToCopy;
|
||||
_currentSendOffset = (_currentSendOffset + totalBytesLeftToCopy) %
|
||||
_audioData->getNumBytes();
|
||||
|
||||
// FIXME -- good place to call codec encode here. We need to figure out how to tell the AudioInjector which
|
||||
// codec to use... possible through AbstractAudioInterface.
|
||||
QByteArray encodedAudio = decodedAudio;
|
||||
|
@ -370,7 +371,7 @@ int64_t AudioInjector::injectNextFrame() {
|
|||
_outgoingSequenceNumber++;
|
||||
}
|
||||
|
||||
if (_currentSendOffset >= _audioData.size() && !_options.loop) {
|
||||
if (_currentSendOffset == 0 && !_options.loop) {
|
||||
finishNetworkInjection();
|
||||
return NEXT_FRAME_DELTA_ERROR_OR_FINISHED;
|
||||
}
|
||||
|
@ -390,7 +391,7 @@ int64_t AudioInjector::injectNextFrame() {
|
|||
// If we are falling behind by more frames than our threshold, let's skip the frames ahead
|
||||
qCDebug(audio) << this << "injectNextFrame() skipping ahead, fell behind by " << (currentFrameBasedOnElapsedTime - _nextFrame) << " frames";
|
||||
_nextFrame = currentFrameBasedOnElapsedTime;
|
||||
_currentSendOffset = _nextFrame * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL * (_options.stereo ? 2 : 1) % _audioData.size();
|
||||
_currentSendOffset = _nextFrame * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL * (_options.stereo ? 2 : 1) % _audioData->getNumBytes();
|
||||
}
|
||||
|
||||
int64_t playNextFrameAt = ++_nextFrame * AudioConstants::NETWORK_FRAME_USECS;
|
||||
|
@ -417,38 +418,25 @@ void AudioInjector::triggerDeleteAfterFinish() {
|
|||
}
|
||||
}
|
||||
|
||||
AudioInjectorPointer AudioInjector::playSound(SharedSoundPointer sound, const float volume,
|
||||
const float stretchFactor, const glm::vec3 position) {
|
||||
AudioInjectorPointer AudioInjector::playSoundAndDelete(SharedSoundPointer sound, const AudioInjectorOptions& options) {
|
||||
AudioInjectorPointer injector = playSound(sound, options);
|
||||
|
||||
if (injector) {
|
||||
injector->_state |= AudioInjectorState::PendingDelete;
|
||||
}
|
||||
|
||||
return injector;
|
||||
}
|
||||
|
||||
|
||||
AudioInjectorPointer AudioInjector::playSound(SharedSoundPointer sound, const AudioInjectorOptions& options) {
|
||||
if (!sound || !sound->isReady()) {
|
||||
return AudioInjectorPointer();
|
||||
}
|
||||
|
||||
AudioInjectorOptions options;
|
||||
options.stereo = sound->isStereo();
|
||||
options.position = position;
|
||||
options.volume = volume;
|
||||
options.pitch = 1.0f / stretchFactor;
|
||||
|
||||
QByteArray samples = sound->getByteArray();
|
||||
|
||||
return playSoundAndDelete(samples, options);
|
||||
}
|
||||
|
||||
AudioInjectorPointer AudioInjector::playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options) {
|
||||
AudioInjectorPointer sound = playSound(buffer, options);
|
||||
|
||||
if (sound) {
|
||||
sound->_state |= AudioInjectorState::PendingDelete;
|
||||
}
|
||||
|
||||
return sound;
|
||||
}
|
||||
|
||||
AudioInjectorPointer AudioInjector::playSound(const QByteArray& buffer, const AudioInjectorOptions options) {
|
||||
|
||||
if (options.pitch == 1.0f) {
|
||||
|
||||
AudioInjectorPointer injector = AudioInjectorPointer::create(buffer, options);
|
||||
AudioInjectorPointer injector = AudioInjectorPointer::create(sound, options);
|
||||
|
||||
if (!injector->inject(&AudioInjectorManager::threadInjector)) {
|
||||
qWarning() << "AudioInjector::playSound failed to thread injector";
|
||||
|
@ -456,24 +444,31 @@ AudioInjectorPointer AudioInjector::playSound(const QByteArray& buffer, const Au
|
|||
return injector;
|
||||
|
||||
} else {
|
||||
using AudioConstants::AudioSample;
|
||||
using AudioConstants::SAMPLE_RATE;
|
||||
const int standardRate = SAMPLE_RATE;
|
||||
// limit to 4 octaves
|
||||
const int pitch = glm::clamp(options.pitch, 1 / 16.0f, 16.0f);
|
||||
const int resampledRate = SAMPLE_RATE / pitch;
|
||||
|
||||
const int standardRate = AudioConstants::SAMPLE_RATE;
|
||||
const int resampledRate = AudioConstants::SAMPLE_RATE / glm::clamp(options.pitch, 1/16.0f, 16.0f); // limit to 4 octaves
|
||||
const int numChannels = options.ambisonic ? AudioConstants::AMBISONIC :
|
||||
(options.stereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
auto audioData = sound->getAudioData();
|
||||
auto numChannels = audioData->getNumChannels();
|
||||
auto numFrames = audioData->getNumFrames();
|
||||
|
||||
AudioSRC resampler(standardRate, resampledRate, numChannels);
|
||||
|
||||
// create a resampled buffer that is guaranteed to be large enough
|
||||
const int nInputFrames = buffer.size() / (numChannels * sizeof(int16_t));
|
||||
const int maxOutputFrames = resampler.getMaxOutput(nInputFrames);
|
||||
QByteArray resampledBuffer(maxOutputFrames * numChannels * sizeof(int16_t), '\0');
|
||||
const int maxOutputFrames = resampler.getMaxOutput(numFrames);
|
||||
const int maxOutputSize = maxOutputFrames * numChannels * sizeof(AudioSample);
|
||||
QByteArray resampledBuffer(maxOutputSize, '\0');
|
||||
auto bufferPtr = reinterpret_cast<AudioSample*>(resampledBuffer.data());
|
||||
|
||||
resampler.render(reinterpret_cast<const int16_t*>(buffer.data()),
|
||||
reinterpret_cast<int16_t*>(resampledBuffer.data()),
|
||||
nInputFrames);
|
||||
resampler.render(audioData->data(), bufferPtr, numFrames);
|
||||
|
||||
AudioInjectorPointer injector = AudioInjectorPointer::create(resampledBuffer, options);
|
||||
int numSamples = maxOutputFrames * numChannels;
|
||||
auto newAudioData = AudioData::make(numSamples, numChannels, bufferPtr);
|
||||
|
||||
AudioInjectorPointer injector = AudioInjectorPointer::create(newAudioData, options);
|
||||
|
||||
if (!injector->inject(&AudioInjectorManager::threadInjector)) {
|
||||
qWarning() << "AudioInjector::playSound failed to thread pitch-shifted injector";
|
||||
|
|
|
@ -52,8 +52,8 @@ AudioInjectorState& operator|= (AudioInjectorState& lhs, AudioInjectorState rhs)
|
|||
class AudioInjector : public QObject, public QEnableSharedFromThis<AudioInjector> {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AudioInjector(const Sound& sound, const AudioInjectorOptions& injectorOptions);
|
||||
AudioInjector(const QByteArray& audioData, const AudioInjectorOptions& injectorOptions);
|
||||
AudioInjector(SharedSoundPointer sound, const AudioInjectorOptions& injectorOptions);
|
||||
AudioInjector(AudioDataPointer audioData, const AudioInjectorOptions& injectorOptions);
|
||||
~AudioInjector();
|
||||
|
||||
bool isFinished() const { return (stateHas(AudioInjectorState::Finished)); }
|
||||
|
@ -74,10 +74,9 @@ public:
|
|||
|
||||
bool stateHas(AudioInjectorState state) const ;
|
||||
static void setLocalAudioInterface(AbstractAudioInterface* audioInterface) { _localAudioInterface = audioInterface; }
|
||||
static AudioInjectorPointer playSoundAndDelete(const QByteArray& buffer, const AudioInjectorOptions options);
|
||||
static AudioInjectorPointer playSound(const QByteArray& buffer, const AudioInjectorOptions options);
|
||||
static AudioInjectorPointer playSound(SharedSoundPointer sound, const float volume,
|
||||
const float stretchFactor, const glm::vec3 position);
|
||||
|
||||
static AudioInjectorPointer playSoundAndDelete(SharedSoundPointer sound, const AudioInjectorOptions& options);
|
||||
static AudioInjectorPointer playSound(SharedSoundPointer sound, const AudioInjectorOptions& options);
|
||||
|
||||
public slots:
|
||||
void restart();
|
||||
|
@ -106,7 +105,8 @@ private:
|
|||
|
||||
static AbstractAudioInterface* _localAudioInterface;
|
||||
|
||||
QByteArray _audioData;
|
||||
const SharedSoundPointer _sound;
|
||||
AudioDataPointer _audioData;
|
||||
AudioInjectorOptions _options;
|
||||
AudioInjectorState _state { AudioInjectorState::NotFinished };
|
||||
bool _hasSentFirstFrame { false };
|
||||
|
|
|
@ -11,13 +11,9 @@
|
|||
|
||||
#include "AudioInjectorLocalBuffer.h"
|
||||
|
||||
AudioInjectorLocalBuffer::AudioInjectorLocalBuffer(const QByteArray& rawAudioArray) :
|
||||
_rawAudioArray(rawAudioArray),
|
||||
_shouldLoop(false),
|
||||
_isStopped(false),
|
||||
_currentOffset(0)
|
||||
AudioInjectorLocalBuffer::AudioInjectorLocalBuffer(AudioDataPointer audioData) :
|
||||
_audioData(audioData)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void AudioInjectorLocalBuffer::stop() {
|
||||
|
@ -39,7 +35,7 @@ qint64 AudioInjectorLocalBuffer::readData(char* data, qint64 maxSize) {
|
|||
if (!_isStopped) {
|
||||
|
||||
// first copy to the end of the raw audio
|
||||
int bytesToEnd = _rawAudioArray.size() - _currentOffset;
|
||||
int bytesToEnd = (int)_audioData->getNumBytes() - _currentOffset;
|
||||
|
||||
int bytesRead = maxSize;
|
||||
|
||||
|
@ -47,7 +43,7 @@ qint64 AudioInjectorLocalBuffer::readData(char* data, qint64 maxSize) {
|
|||
bytesRead = bytesToEnd;
|
||||
}
|
||||
|
||||
memcpy(data, _rawAudioArray.data() + _currentOffset, bytesRead);
|
||||
memcpy(data, _audioData->rawData() + _currentOffset, bytesRead);
|
||||
|
||||
// now check if we are supposed to loop and if we can copy more from the beginning
|
||||
if (_shouldLoop && maxSize != bytesRead) {
|
||||
|
@ -56,7 +52,7 @@ qint64 AudioInjectorLocalBuffer::readData(char* data, qint64 maxSize) {
|
|||
_currentOffset += bytesRead;
|
||||
}
|
||||
|
||||
if (_shouldLoop && _currentOffset == _rawAudioArray.size()) {
|
||||
if (_shouldLoop && _currentOffset == (int)_audioData->getNumBytes()) {
|
||||
_currentOffset = 0;
|
||||
}
|
||||
|
||||
|
@ -70,12 +66,12 @@ qint64 AudioInjectorLocalBuffer::recursiveReadFromFront(char* data, qint64 maxSi
|
|||
// see how much we can get in this pass
|
||||
int bytesRead = maxSize;
|
||||
|
||||
if (bytesRead > _rawAudioArray.size()) {
|
||||
bytesRead = _rawAudioArray.size();
|
||||
if (bytesRead > (int)_audioData->getNumBytes()) {
|
||||
bytesRead = _audioData->getNumBytes();
|
||||
}
|
||||
|
||||
// copy that amount
|
||||
memcpy(data, _rawAudioArray.data(), bytesRead);
|
||||
memcpy(data, _audioData->rawData(), bytesRead);
|
||||
|
||||
// check if we need to call ourselves again and pull from the front again
|
||||
if (bytesRead < maxSize) {
|
||||
|
|
|
@ -16,10 +16,12 @@
|
|||
|
||||
#include <glm/common.hpp>
|
||||
|
||||
#include "Sound.h"
|
||||
|
||||
class AudioInjectorLocalBuffer : public QIODevice {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AudioInjectorLocalBuffer(const QByteArray& rawAudioArray);
|
||||
AudioInjectorLocalBuffer(AudioDataPointer audioData);
|
||||
|
||||
void stop();
|
||||
|
||||
|
@ -34,11 +36,10 @@ public:
|
|||
private:
|
||||
qint64 recursiveReadFromFront(char* data, qint64 maxSize);
|
||||
|
||||
QByteArray _rawAudioArray;
|
||||
bool _shouldLoop;
|
||||
bool _isStopped;
|
||||
|
||||
int _currentOffset;
|
||||
AudioDataPointer _audioData;
|
||||
bool _shouldLoop { false };
|
||||
bool _isStopped { false };
|
||||
int _currentOffset { 0 };
|
||||
};
|
||||
|
||||
#endif // hifi_AudioInjectorLocalBuffer_h
|
||||
|
|
|
@ -33,48 +33,59 @@
|
|||
|
||||
#include "flump3dec.h"
|
||||
|
||||
QScriptValue soundSharedPointerToScriptValue(QScriptEngine* engine, const SharedSoundPointer& in) {
|
||||
return engine->newQObject(new SoundScriptingInterface(in), QScriptEngine::ScriptOwnership);
|
||||
int audioDataPointerMetaTypeID = qRegisterMetaType<AudioDataPointer>("AudioDataPointer");
|
||||
|
||||
using AudioConstants::AudioSample;
|
||||
|
||||
AudioDataPointer AudioData::make(uint32_t numSamples, uint32_t numChannels,
|
||||
const AudioSample* samples) {
|
||||
// Compute the amount of memory required for the audio data object
|
||||
const size_t bufferSize = numSamples * sizeof(AudioSample);
|
||||
const size_t memorySize = sizeof(AudioData) + bufferSize;
|
||||
|
||||
// Allocate the memory for the audio data object and the buffer
|
||||
void* memory = ::malloc(memorySize);
|
||||
auto audioData = reinterpret_cast<AudioData*>(memory);
|
||||
auto buffer = reinterpret_cast<AudioSample*>(audioData + 1);
|
||||
assert(((char*)buffer - (char*)audioData) == sizeof(AudioData));
|
||||
|
||||
// Use placement new to construct the audio data object at the memory allocated
|
||||
::new(audioData) AudioData(numSamples, numChannels, buffer);
|
||||
|
||||
// Copy the samples to the buffer
|
||||
memcpy(buffer, samples, bufferSize);
|
||||
|
||||
// Return shared_ptr that properly destruct the object and release the memory
|
||||
return AudioDataPointer(audioData, [](AudioData* ptr) {
|
||||
ptr->~AudioData();
|
||||
::free(ptr);
|
||||
});
|
||||
}
|
||||
|
||||
void soundSharedPointerFromScriptValue(const QScriptValue& object, SharedSoundPointer& out) {
|
||||
if (auto soundInterface = qobject_cast<SoundScriptingInterface*>(object.toQObject())) {
|
||||
out = soundInterface->getSound();
|
||||
}
|
||||
}
|
||||
|
||||
SoundScriptingInterface::SoundScriptingInterface(const SharedSoundPointer& sound) : _sound(sound) {
|
||||
// During shutdown we can sometimes get an empty sound pointer back
|
||||
if (_sound) {
|
||||
QObject::connect(_sound.data(), &Sound::ready, this, &SoundScriptingInterface::ready);
|
||||
}
|
||||
}
|
||||
|
||||
Sound::Sound(const QUrl& url, bool isStereo, bool isAmbisonic) :
|
||||
Resource(url),
|
||||
_isStereo(isStereo),
|
||||
_isAmbisonic(isAmbisonic),
|
||||
_isReady(false)
|
||||
{
|
||||
}
|
||||
AudioData::AudioData(uint32_t numSamples, uint32_t numChannels, const AudioSample* samples)
|
||||
: _numSamples(numSamples),
|
||||
_numChannels(numChannels),
|
||||
_data(samples)
|
||||
{}
|
||||
|
||||
void Sound::downloadFinished(const QByteArray& data) {
|
||||
if (!_self) {
|
||||
soundProcessError(301, "Sound object has gone out of scope");
|
||||
return;
|
||||
}
|
||||
|
||||
// this is a QRunnable, will delete itself after it has finished running
|
||||
SoundProcessor* soundProcessor = new SoundProcessor(_url, data, _isStereo, _isAmbisonic);
|
||||
auto soundProcessor = new SoundProcessor(_self, data);
|
||||
connect(soundProcessor, &SoundProcessor::onSuccess, this, &Sound::soundProcessSuccess);
|
||||
connect(soundProcessor, &SoundProcessor::onError, this, &Sound::soundProcessError);
|
||||
QThreadPool::globalInstance()->start(soundProcessor);
|
||||
}
|
||||
|
||||
void Sound::soundProcessSuccess(QByteArray data, bool stereo, bool ambisonic, float duration) {
|
||||
void Sound::soundProcessSuccess(AudioDataPointer audioData) {
|
||||
qCDebug(audio) << "Setting ready state for sound file" << _url.fileName();
|
||||
|
||||
qCDebug(audio) << "Setting ready state for sound file";
|
||||
|
||||
_byteArray = data;
|
||||
_isStereo = stereo;
|
||||
_isAmbisonic = ambisonic;
|
||||
_duration = duration;
|
||||
_isReady = true;
|
||||
_audioData = std::move(audioData);
|
||||
finishedLoading(true);
|
||||
|
||||
emit ready();
|
||||
|
@ -86,91 +97,102 @@ void Sound::soundProcessError(int error, QString str) {
|
|||
finishedLoading(false);
|
||||
}
|
||||
|
||||
void SoundProcessor::run() {
|
||||
|
||||
qCDebug(audio) << "Processing sound file";
|
||||
SoundProcessor::SoundProcessor(QWeakPointer<Resource> sound, QByteArray data) :
|
||||
_sound(sound),
|
||||
_data(data)
|
||||
{
|
||||
}
|
||||
|
||||
void SoundProcessor::run() {
|
||||
auto sound = qSharedPointerCast<Sound>(_sound.lock());
|
||||
if (!sound) {
|
||||
emit onError(301, "Sound object has gone out of scope");
|
||||
return;
|
||||
}
|
||||
|
||||
QString fileName = sound->getURL().fileName().toLower();
|
||||
qCDebug(audio) << "Processing sound file" << fileName;
|
||||
|
||||
// replace our byte array with the downloaded data
|
||||
QByteArray rawAudioByteArray = QByteArray(_data);
|
||||
QString fileName = _url.fileName().toLower();
|
||||
|
||||
static const QString WAV_EXTENSION = ".wav";
|
||||
static const QString MP3_EXTENSION = ".mp3";
|
||||
static const QString RAW_EXTENSION = ".raw";
|
||||
static const QString STEREO_RAW_EXTENSION = ".stereo.raw";
|
||||
QString fileType;
|
||||
|
||||
QByteArray outputAudioByteArray;
|
||||
AudioProperties properties;
|
||||
|
||||
if (fileName.endsWith(WAV_EXTENSION)) {
|
||||
|
||||
QByteArray outputAudioByteArray;
|
||||
|
||||
int sampleRate = interpretAsWav(rawAudioByteArray, outputAudioByteArray);
|
||||
if (sampleRate == 0) {
|
||||
qCWarning(audio) << "Unsupported WAV file type";
|
||||
emit onError(300, "Failed to load sound file, reason: unsupported WAV file type");
|
||||
return;
|
||||
}
|
||||
|
||||
downSample(outputAudioByteArray, sampleRate);
|
||||
|
||||
fileType = "WAV";
|
||||
properties = interpretAsWav(_data, outputAudioByteArray);
|
||||
} else if (fileName.endsWith(MP3_EXTENSION)) {
|
||||
|
||||
QByteArray outputAudioByteArray;
|
||||
|
||||
int sampleRate = interpretAsMP3(rawAudioByteArray, outputAudioByteArray);
|
||||
if (sampleRate == 0) {
|
||||
qCWarning(audio) << "Unsupported MP3 file type";
|
||||
emit onError(300, "Failed to load sound file, reason: unsupported MP3 file type");
|
||||
return;
|
||||
}
|
||||
|
||||
downSample(outputAudioByteArray, sampleRate);
|
||||
|
||||
} else if (fileName.endsWith(RAW_EXTENSION)) {
|
||||
fileType = "MP3";
|
||||
properties = interpretAsMP3(_data, outputAudioByteArray);
|
||||
} else if (fileName.endsWith(STEREO_RAW_EXTENSION)) {
|
||||
// check if this was a stereo raw file
|
||||
// since it's raw the only way for us to know that is if the file was called .stereo.raw
|
||||
if (fileName.toLower().endsWith("stereo.raw")) {
|
||||
_isStereo = true;
|
||||
qCDebug(audio) << "Processing sound of" << rawAudioByteArray.size() << "bytes as stereo audio file.";
|
||||
}
|
||||
|
||||
qCDebug(audio) << "Processing sound of" << _data.size() << "bytes from" << fileName << "as stereo audio file.";
|
||||
// Process as 48khz RAW file
|
||||
downSample(rawAudioByteArray, 48000);
|
||||
|
||||
properties.numChannels = 2;
|
||||
properties.sampleRate = 48000;
|
||||
outputAudioByteArray = _data;
|
||||
} else if (fileName.endsWith(RAW_EXTENSION)) {
|
||||
// Process as 48khz RAW file
|
||||
properties.numChannels = 1;
|
||||
properties.sampleRate = 48000;
|
||||
outputAudioByteArray = _data;
|
||||
} else {
|
||||
qCWarning(audio) << "Unknown sound file type";
|
||||
emit onError(300, "Failed to load sound file, reason: unknown sound file type");
|
||||
return;
|
||||
}
|
||||
|
||||
emit onSuccess(_data, _isStereo, _isAmbisonic, _duration);
|
||||
if (properties.sampleRate == 0) {
|
||||
qCWarning(audio) << "Unsupported" << fileType << "file type";
|
||||
emit onError(300, "Failed to load sound file, reason: unsupported " + fileType + " file type");
|
||||
return;
|
||||
}
|
||||
|
||||
auto data = downSample(outputAudioByteArray, properties);
|
||||
|
||||
int numSamples = data.size() / AudioConstants::SAMPLE_SIZE;
|
||||
auto audioData = AudioData::make(numSamples, properties.numChannels,
|
||||
(const AudioSample*)data.constData());
|
||||
emit onSuccess(audioData);
|
||||
}
|
||||
|
||||
void SoundProcessor::downSample(const QByteArray& rawAudioByteArray, int sampleRate) {
|
||||
QByteArray SoundProcessor::downSample(const QByteArray& rawAudioByteArray,
|
||||
AudioProperties properties) {
|
||||
|
||||
// we want to convert it to the format that the audio-mixer wants
|
||||
// which is signed, 16-bit, 24Khz
|
||||
|
||||
if (sampleRate == AudioConstants::SAMPLE_RATE) {
|
||||
if (properties.sampleRate == AudioConstants::SAMPLE_RATE) {
|
||||
// no resampling needed
|
||||
_data = rawAudioByteArray;
|
||||
} else {
|
||||
|
||||
int numChannels = _isAmbisonic ? AudioConstants::AMBISONIC : (_isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
AudioSRC resampler(sampleRate, AudioConstants::SAMPLE_RATE, numChannels);
|
||||
|
||||
// resize to max possible output
|
||||
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
|
||||
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
|
||||
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_data.resize(maxDestinationBytes);
|
||||
|
||||
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
|
||||
(int16_t*)_data.data(),
|
||||
numSourceFrames);
|
||||
|
||||
// truncate to actual output
|
||||
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_data.resize(numDestinationBytes);
|
||||
return rawAudioByteArray;
|
||||
}
|
||||
|
||||
AudioSRC resampler(properties.sampleRate, AudioConstants::SAMPLE_RATE,
|
||||
properties.numChannels);
|
||||
|
||||
// resize to max possible output
|
||||
int numSourceFrames = rawAudioByteArray.size() / (properties.numChannels * AudioConstants::SAMPLE_SIZE);
|
||||
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
|
||||
int maxDestinationBytes = maxDestinationFrames * properties.numChannels * AudioConstants::SAMPLE_SIZE;
|
||||
QByteArray data(maxDestinationBytes, Qt::Uninitialized);
|
||||
|
||||
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
|
||||
(int16_t*)data.data(),
|
||||
numSourceFrames);
|
||||
|
||||
// truncate to actual output
|
||||
int numDestinationBytes = numDestinationFrames * properties.numChannels * sizeof(AudioSample);
|
||||
data.resize(numDestinationBytes);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -218,7 +240,9 @@ struct WAVEFormat {
|
|||
};
|
||||
|
||||
// returns wavfile sample rate, used for resampling
|
||||
int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
||||
SoundProcessor::AudioProperties SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray,
|
||||
QByteArray& outputAudioByteArray) {
|
||||
AudioProperties properties;
|
||||
|
||||
// Create a data stream to analyze the data
|
||||
QDataStream waveStream(const_cast<QByteArray *>(&inputAudioByteArray), QIODevice::ReadOnly);
|
||||
|
@ -227,7 +251,7 @@ int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteA
|
|||
RIFFHeader riff;
|
||||
if (waveStream.readRawData((char*)&riff, sizeof(RIFFHeader)) != sizeof(RIFFHeader)) {
|
||||
qCWarning(audio) << "Not a valid WAVE file.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
// Parse the "RIFF" chunk
|
||||
|
@ -235,11 +259,11 @@ int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteA
|
|||
waveStream.setByteOrder(QDataStream::LittleEndian);
|
||||
} else {
|
||||
qCWarning(audio) << "Currently not supporting big-endian audio files.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
if (strncmp(riff.type, "WAVE", 4) != 0) {
|
||||
qCWarning(audio) << "Not a valid WAVE file.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
// Read chunks until the "fmt " chunk is found
|
||||
|
@ -247,7 +271,7 @@ int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteA
|
|||
while (true) {
|
||||
if (waveStream.readRawData((char*)&fmt, sizeof(chunk)) != sizeof(chunk)) {
|
||||
qCWarning(audio) << "Not a valid WAVE file.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
if (strncmp(fmt.id, "fmt ", 4) == 0) {
|
||||
break;
|
||||
|
@ -259,26 +283,26 @@ int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteA
|
|||
WAVEFormat wave;
|
||||
if (waveStream.readRawData((char*)&wave, sizeof(WAVEFormat)) != sizeof(WAVEFormat)) {
|
||||
qCWarning(audio) << "Not a valid WAVE file.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
// Parse the "fmt " chunk
|
||||
if (qFromLittleEndian<quint16>(wave.audioFormat) != WAVEFORMAT_PCM &&
|
||||
qFromLittleEndian<quint16>(wave.audioFormat) != WAVEFORMAT_EXTENSIBLE) {
|
||||
qCWarning(audio) << "Currently not supporting non PCM audio files.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
if (qFromLittleEndian<quint16>(wave.numChannels) == 2) {
|
||||
_isStereo = true;
|
||||
} else if (qFromLittleEndian<quint16>(wave.numChannels) == 4) {
|
||||
_isAmbisonic = true;
|
||||
} else if (qFromLittleEndian<quint16>(wave.numChannels) != 1) {
|
||||
|
||||
properties.numChannels = qFromLittleEndian<quint16>(wave.numChannels);
|
||||
if (properties.numChannels != 1 &&
|
||||
properties.numChannels != 2 &&
|
||||
properties.numChannels != 4) {
|
||||
qCWarning(audio) << "Currently not supporting audio files with other than 1/2/4 channels.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
if (qFromLittleEndian<quint16>(wave.bitsPerSample) != 16) {
|
||||
qCWarning(audio) << "Currently not supporting non 16bit audio files.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
// Skip any extra data in the "fmt " chunk
|
||||
|
@ -289,7 +313,7 @@ int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteA
|
|||
while (true) {
|
||||
if (waveStream.readRawData((char*)&data, sizeof(chunk)) != sizeof(chunk)) {
|
||||
qCWarning(audio) << "Not a valid WAVE file.";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
if (strncmp(data.id, "data", 4) == 0) {
|
||||
break;
|
||||
|
@ -300,17 +324,21 @@ int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteA
|
|||
// Read the "data" chunk
|
||||
quint32 outputAudioByteArraySize = qFromLittleEndian<quint32>(data.size);
|
||||
outputAudioByteArray.resize(outputAudioByteArraySize);
|
||||
if (waveStream.readRawData(outputAudioByteArray.data(), outputAudioByteArraySize) != (int)outputAudioByteArraySize) {
|
||||
auto bytesRead = waveStream.readRawData(outputAudioByteArray.data(), outputAudioByteArraySize);
|
||||
if (bytesRead != (int)outputAudioByteArraySize) {
|
||||
qCWarning(audio) << "Error reading WAV file";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
_duration = (float)(outputAudioByteArraySize / (wave.sampleRate * wave.numChannels * wave.bitsPerSample / 8.0f));
|
||||
return wave.sampleRate;
|
||||
properties.sampleRate = wave.sampleRate;
|
||||
return properties;
|
||||
}
|
||||
|
||||
// returns MP3 sample rate, used for resampling
|
||||
int SoundProcessor::interpretAsMP3(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
||||
SoundProcessor::AudioProperties SoundProcessor::interpretAsMP3(const QByteArray& inputAudioByteArray,
|
||||
QByteArray& outputAudioByteArray) {
|
||||
AudioProperties properties;
|
||||
|
||||
using namespace flump3dec;
|
||||
|
||||
static const int MP3_SAMPLES_MAX = 1152;
|
||||
|
@ -321,21 +349,19 @@ int SoundProcessor::interpretAsMP3(const QByteArray& inputAudioByteArray, QByteA
|
|||
// create bitstream
|
||||
Bit_stream_struc *bitstream = bs_new();
|
||||
if (bitstream == nullptr) {
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
// create decoder
|
||||
mp3tl *decoder = mp3tl_new(bitstream, MP3TL_MODE_16BIT);
|
||||
if (decoder == nullptr) {
|
||||
bs_free(bitstream);
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
// initialize
|
||||
bs_set_data(bitstream, (uint8_t*)inputAudioByteArray.data(), inputAudioByteArray.size());
|
||||
int frameCount = 0;
|
||||
int sampleRate = 0;
|
||||
int numChannels = 0;
|
||||
|
||||
// skip ID3 tag, if present
|
||||
Mp3TlRetcode result = mp3tl_skip_id3(decoder);
|
||||
|
@ -357,8 +383,8 @@ int SoundProcessor::interpretAsMP3(const QByteArray& inputAudioByteArray, QByteA
|
|||
<< "channels =" << header->channels;
|
||||
|
||||
// save header info
|
||||
sampleRate = header->sample_rate;
|
||||
numChannels = header->channels;
|
||||
properties.sampleRate = header->sample_rate;
|
||||
properties.numChannels = header->channels;
|
||||
|
||||
// skip Xing header, if present
|
||||
result = mp3tl_skip_xing(decoder, header);
|
||||
|
@ -388,14 +414,32 @@ int SoundProcessor::interpretAsMP3(const QByteArray& inputAudioByteArray, QByteA
|
|||
// free bitstream
|
||||
bs_free(bitstream);
|
||||
|
||||
int outputAudioByteArraySize = outputAudioByteArray.size();
|
||||
if (outputAudioByteArraySize == 0) {
|
||||
if (outputAudioByteArray.isEmpty()) {
|
||||
qCWarning(audio) << "Error decoding MP3 file";
|
||||
return 0;
|
||||
return AudioProperties();
|
||||
}
|
||||
|
||||
_isStereo = (numChannels == 2);
|
||||
_isAmbisonic = false;
|
||||
_duration = (float)outputAudioByteArraySize / (sampleRate * numChannels * sizeof(int16_t));
|
||||
return sampleRate;
|
||||
return properties;
|
||||
}
|
||||
|
||||
|
||||
QScriptValue soundSharedPointerToScriptValue(QScriptEngine* engine, const SharedSoundPointer& in) {
|
||||
return engine->newQObject(new SoundScriptingInterface(in), QScriptEngine::ScriptOwnership);
|
||||
}
|
||||
|
||||
void soundSharedPointerFromScriptValue(const QScriptValue& object, SharedSoundPointer& out) {
|
||||
if (auto soundInterface = qobject_cast<SoundScriptingInterface*>(object.toQObject())) {
|
||||
out = soundInterface->getSound();
|
||||
}
|
||||
}
|
||||
|
||||
SoundScriptingInterface::SoundScriptingInterface(const SharedSoundPointer& sound) : _sound(sound) {
|
||||
// During shutdown we can sometimes get an empty sound pointer back
|
||||
if (_sound) {
|
||||
QObject::connect(_sound.data(), &Sound::ready, this, &SoundScriptingInterface::ready);
|
||||
}
|
||||
}
|
||||
|
||||
Sound::Sound(const QUrl& url, bool isStereo, bool isAmbisonic) : Resource(url) {
|
||||
_numChannels = isAmbisonic ? 4 : (isStereo ? 2 : 1);
|
||||
}
|
||||
|
|
|
@ -19,61 +19,102 @@
|
|||
|
||||
#include <ResourceCache.h>
|
||||
|
||||
#include "AudioConstants.h"
|
||||
|
||||
class AudioData;
|
||||
using AudioDataPointer = std::shared_ptr<const AudioData>;
|
||||
|
||||
Q_DECLARE_METATYPE(AudioDataPointer);
|
||||
|
||||
// AudioData is designed to be immutable
|
||||
// All of its members and methods are const
|
||||
// This makes it perfectly safe to access from multiple threads at once
|
||||
class AudioData {
|
||||
public:
|
||||
using AudioSample = AudioConstants::AudioSample;
|
||||
|
||||
// Allocates the buffer memory contiguous with the object
|
||||
static AudioDataPointer make(uint32_t numSamples, uint32_t numChannels,
|
||||
const AudioSample* samples);
|
||||
|
||||
AudioData(uint32_t numSamples, uint32_t numChannels, const AudioSample* samples);
|
||||
|
||||
uint32_t getNumSamples() const { return _numSamples; }
|
||||
uint32_t getNumChannels() const { return _numChannels; }
|
||||
const AudioSample* data() const { return _data; }
|
||||
const char* rawData() const { return reinterpret_cast<const char*>(_data); }
|
||||
|
||||
float isStereo() const { return _numChannels == 2; }
|
||||
float isAmbisonic() const { return _numChannels == 4; }
|
||||
float getDuration() const { return (float)_numSamples / (_numChannels * AudioConstants::SAMPLE_RATE); }
|
||||
uint32_t getNumFrames() const { return _numSamples / _numChannels; }
|
||||
uint32_t getNumBytes() const { return _numSamples * sizeof(AudioSample); }
|
||||
|
||||
private:
|
||||
const uint32_t _numSamples { 0 };
|
||||
const uint32_t _numChannels { 0 };
|
||||
const AudioSample* const _data { nullptr };
|
||||
};
|
||||
|
||||
class Sound : public Resource {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
Sound(const QUrl& url, bool isStereo = false, bool isAmbisonic = false);
|
||||
|
||||
bool isStereo() const { return _isStereo; }
|
||||
bool isAmbisonic() const { return _isAmbisonic; }
|
||||
bool isReady() const { return _isReady; }
|
||||
float getDuration() const { return _duration; }
|
||||
|
||||
const QByteArray& getByteArray() const { return _byteArray; }
|
||||
bool isReady() const { return (bool)_audioData; }
|
||||
|
||||
bool isStereo() const { return _audioData ? _audioData->isStereo() : false; }
|
||||
bool isAmbisonic() const { return _audioData ? _audioData->isAmbisonic() : false; }
|
||||
float getDuration() const { return _audioData ? _audioData->getDuration() : 0.0f; }
|
||||
|
||||
AudioDataPointer getAudioData() const { return _audioData; }
|
||||
|
||||
int getNumChannels() const { return _numChannels; }
|
||||
|
||||
signals:
|
||||
void ready();
|
||||
|
||||
protected slots:
|
||||
void soundProcessSuccess(QByteArray data, bool stereo, bool ambisonic, float duration);
|
||||
void soundProcessSuccess(AudioDataPointer audioData);
|
||||
void soundProcessError(int error, QString str);
|
||||
|
||||
private:
|
||||
QByteArray _byteArray;
|
||||
bool _isStereo;
|
||||
bool _isAmbisonic;
|
||||
bool _isReady;
|
||||
float _duration; // In seconds
|
||||
|
||||
virtual void downloadFinished(const QByteArray& data) override;
|
||||
|
||||
AudioDataPointer _audioData;
|
||||
|
||||
// Only used for caching until the download has finished
|
||||
int _numChannels { 0 };
|
||||
};
|
||||
|
||||
class SoundProcessor : public QObject, public QRunnable {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
SoundProcessor(const QUrl& url, const QByteArray& data, bool stereo, bool ambisonic)
|
||||
: _url(url), _data(data), _isStereo(stereo), _isAmbisonic(ambisonic)
|
||||
{
|
||||
}
|
||||
struct AudioProperties {
|
||||
uint8_t numChannels { 0 };
|
||||
uint32_t sampleRate { 0 };
|
||||
};
|
||||
|
||||
SoundProcessor(QWeakPointer<Resource> sound, QByteArray data);
|
||||
|
||||
virtual void run() override;
|
||||
|
||||
void downSample(const QByteArray& rawAudioByteArray, int sampleRate);
|
||||
int interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
int interpretAsMP3(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
QByteArray downSample(const QByteArray& rawAudioByteArray,
|
||||
AudioProperties properties);
|
||||
AudioProperties interpretAsWav(const QByteArray& inputAudioByteArray,
|
||||
QByteArray& outputAudioByteArray);
|
||||
AudioProperties interpretAsMP3(const QByteArray& inputAudioByteArray,
|
||||
QByteArray& outputAudioByteArray);
|
||||
|
||||
signals:
|
||||
void onSuccess(QByteArray data, bool stereo, bool ambisonic, float duration);
|
||||
void onSuccess(AudioDataPointer audioData);
|
||||
void onError(int error, QString str);
|
||||
|
||||
private:
|
||||
QUrl _url;
|
||||
QByteArray _data;
|
||||
bool _isStereo;
|
||||
bool _isAmbisonic;
|
||||
float _duration;
|
||||
const QWeakPointer<Resource> _sound;
|
||||
const QByteArray _data;
|
||||
};
|
||||
|
||||
typedef QSharedPointer<Sound> SharedSoundPointer;
|
||||
|
|
|
@ -1031,7 +1031,14 @@ void EntityTreeRenderer::playEntityCollisionSound(const EntityItemPointer& entit
|
|||
// Shift the pitch down by ln(1 + (size / COLLISION_SIZE_FOR_STANDARD_PITCH)) / ln(2)
|
||||
const float COLLISION_SIZE_FOR_STANDARD_PITCH = 0.2f;
|
||||
const float stretchFactor = logf(1.0f + (minAACube.getLargestDimension() / COLLISION_SIZE_FOR_STANDARD_PITCH)) / logf(2.0f);
|
||||
AudioInjector::playSound(collisionSound, volume, stretchFactor, collision.contactPoint);
|
||||
|
||||
AudioInjectorOptions options;
|
||||
options.stereo = collisionSound->isStereo();
|
||||
options.position = collision.contactPoint;
|
||||
options.volume = volume;
|
||||
options.pitch = 1.0f / stretchFactor;
|
||||
|
||||
AudioInjector::playSoundAndDelete(collisionSound, options);
|
||||
}
|
||||
|
||||
void EntityTreeRenderer::entityCollisionWithEntity(const EntityItemID& idA, const EntityItemID& idB,
|
||||
|
|
|
@ -63,15 +63,15 @@ ScriptAudioInjector* AudioScriptingInterface::playSound(SharedSoundPointer sound
|
|||
optionsCopy.ambisonic = sound->isAmbisonic();
|
||||
optionsCopy.localOnly = optionsCopy.localOnly || sound->isAmbisonic(); // force localOnly when Ambisonic
|
||||
|
||||
auto injector = AudioInjector::playSound(sound->getByteArray(), optionsCopy);
|
||||
auto injector = AudioInjector::playSound(sound, optionsCopy);
|
||||
if (!injector) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return new ScriptAudioInjector(injector);
|
||||
|
||||
} else {
|
||||
qCDebug(scriptengine) << "AudioScriptingInterface::playSound called with null Sound object.";
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -766,8 +766,8 @@ protected:
|
|||
*/
|
||||
Q_INVOKABLE void entityScriptContentAvailable(const EntityItemID& entityID, const QString& scriptOrURL, const QString& contents, bool isURL, bool success, const QString& status);
|
||||
|
||||
EntityItemID currentEntityIdentifier {}; // Contains the defining entity script entity id during execution, if any. Empty for interface script execution.
|
||||
QUrl currentSandboxURL {}; // The toplevel url string for the entity script that loaded the code being executed, else empty.
|
||||
EntityItemID currentEntityIdentifier; // Contains the defining entity script entity id during execution, if any. Empty for interface script execution.
|
||||
QUrl currentSandboxURL; // The toplevel url string for the entity script that loaded the code being executed, else empty.
|
||||
void doWithEnvironment(const EntityItemID& entityID, const QUrl& sandboxURL, std::function<void()> operation);
|
||||
void callWithEnvironment(const EntityItemID& entityID, const QUrl& sandboxURL, QScriptValue function, QScriptValue thisObject, QScriptValueList args);
|
||||
|
||||
|
|
|
@ -211,7 +211,7 @@ void TabletScriptingInterface::playSound(TabletAudioEvents aEvent) {
|
|||
options.ambisonic = sound->isAmbisonic();
|
||||
options.localOnly = true;
|
||||
|
||||
AudioInjectorPointer injector = AudioInjector::playSoundAndDelete(sound->getByteArray(), options);
|
||||
AudioInjectorPointer injector = AudioInjector::playSoundAndDelete(sound, options);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ void SoundEffect::play(QVariant position) {
|
|||
_injector->setOptions(options);
|
||||
_injector->restart();
|
||||
} else {
|
||||
QByteArray samples = _sound->getByteArray();
|
||||
_injector = AudioInjector::playSound(samples, options);
|
||||
_injector = AudioInjector::playSound(_sound, options);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue