mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-13 12:12:36 +02:00
Removed SoundProcessor's dependence on Sound objects
This commit is contained in:
parent
74f0ac5b78
commit
90d5b2a6d4
2 changed files with 54 additions and 34 deletions
|
@ -51,67 +51,81 @@ Sound::Sound(const QUrl& url, bool isStereo, bool isAmbisonic) :
|
|||
_isAmbisonic(isAmbisonic),
|
||||
_isReady(false)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void Sound::downloadFinished(const QByteArray& data) {
|
||||
SoundProcessor* soundProcessor = new SoundProcessor(_url, data, this);
|
||||
// this is a QRunnable, will delete itself after it has finished running
|
||||
SoundProcessor* soundProcessor = new SoundProcessor(_url, data, _isStereo, _isAmbisonic);
|
||||
connect(soundProcessor, SIGNAL(onSuccess(QByteArray, bool, bool, float)), SLOT(soundProcessSuccess(QByteArray, bool, bool, float)));
|
||||
connect(soundProcessor, SIGNAL(onError(int, QString)), SLOT(soundProcessError(int, QString)));
|
||||
QThreadPool::globalInstance()->start(soundProcessor);
|
||||
}
|
||||
|
||||
void Sound::setReady(bool isReady) {
|
||||
if (isReady) {
|
||||
finishedLoading(true);
|
||||
_isReady = true;
|
||||
emit ready();
|
||||
qCDebug(audio) << "Setting ready state for audio asset" << _url;
|
||||
}
|
||||
void Sound::soundProcessSuccess(QByteArray data, bool stereo, bool ambisonic, float duration) {
|
||||
|
||||
qCDebug(audio) << "Setting ready state for sound file" << _url.toDisplayString();
|
||||
|
||||
_byteArray = data;
|
||||
_isStereo = stereo;
|
||||
_isAmbisonic = ambisonic;
|
||||
_duration = duration;
|
||||
_isReady = true;
|
||||
finishedLoading(true);
|
||||
|
||||
emit ready();
|
||||
}
|
||||
|
||||
void Sound::soundProcessError(int error, QString str) {
|
||||
qCCritical(audio) << "Failed to process sound file" << _url.toDisplayString() << "code =" << error << str;
|
||||
emit failed(QNetworkReply::UnknownContentError);
|
||||
finishedLoading(false);
|
||||
}
|
||||
|
||||
void SoundProcessor::run() {
|
||||
|
||||
qCDebug(audio) << "Processing sound file" << _url.toDisplayString();
|
||||
|
||||
// replace our byte array with the downloaded data
|
||||
QByteArray rawAudioByteArray = QByteArray(_data);
|
||||
QString fileName = _url.fileName().toLower();
|
||||
|
||||
//Sound* sound = dynamic_cast<Sound*>(_sound);
|
||||
|
||||
static const QString WAV_EXTENSION = ".wav";
|
||||
static const QString RAW_EXTENSION = ".raw";
|
||||
if (fileName.endsWith(WAV_EXTENSION)) {
|
||||
|
||||
QByteArray outputAudioByteArray;
|
||||
|
||||
int sampleRate = _sound->interpretAsWav(rawAudioByteArray, outputAudioByteArray);
|
||||
int sampleRate = interpretAsWav(rawAudioByteArray, outputAudioByteArray);
|
||||
if (sampleRate != 0) {
|
||||
_sound->downSample(outputAudioByteArray, sampleRate);
|
||||
downSample(outputAudioByteArray, sampleRate);
|
||||
}
|
||||
} else if (fileName.endsWith(RAW_EXTENSION)) {
|
||||
// check if this was a stereo raw file
|
||||
// since it's raw the only way for us to know that is if the file was called .stereo.raw
|
||||
if (fileName.toLower().endsWith("stereo.raw")) {
|
||||
_sound->setStereo(true);
|
||||
_isStereo = true;
|
||||
qCDebug(audio) << "Processing sound of" << rawAudioByteArray.size() << "bytes from" << _url << "as stereo audio file.";
|
||||
}
|
||||
|
||||
// Process as 48khz RAW file
|
||||
_sound->downSample(rawAudioByteArray, 48000);
|
||||
downSample(rawAudioByteArray, 48000);
|
||||
} else {
|
||||
qCDebug(audio) << "Unknown sound file type";
|
||||
emit onError(300, "Failed to load sound file, reason: unknown sound file type");
|
||||
return;
|
||||
}
|
||||
|
||||
_sound->setReady(true);
|
||||
emit onSuccess(_data, _isStereo, _isAmbisonic, _duration);
|
||||
}
|
||||
|
||||
void Sound::downSample(const QByteArray& rawAudioByteArray, int sampleRate) {
|
||||
void SoundProcessor::downSample(const QByteArray& rawAudioByteArray, int sampleRate) {
|
||||
|
||||
// we want to convert it to the format that the audio-mixer wants
|
||||
// which is signed, 16-bit, 24Khz
|
||||
|
||||
if (sampleRate == AudioConstants::SAMPLE_RATE) {
|
||||
|
||||
// no resampling needed
|
||||
_byteArray = rawAudioByteArray;
|
||||
|
||||
_data = rawAudioByteArray;
|
||||
} else {
|
||||
|
||||
int numChannels = _isAmbisonic ? AudioConstants::AMBISONIC : (_isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
|
@ -121,15 +135,15 @@ void Sound::downSample(const QByteArray& rawAudioByteArray, int sampleRate) {
|
|||
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
|
||||
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
|
||||
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(maxDestinationBytes);
|
||||
_data.resize(maxDestinationBytes);
|
||||
|
||||
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
|
||||
(int16_t*)_byteArray.data(),
|
||||
(int16_t*)_data.data(),
|
||||
numSourceFrames);
|
||||
|
||||
// truncate to actual output
|
||||
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||
_byteArray.resize(numDestinationBytes);
|
||||
_data.resize(numDestinationBytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,7 +192,7 @@ struct WAVEFormat {
|
|||
};
|
||||
|
||||
// returns wavfile sample rate, used for resampling
|
||||
int Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
||||
int SoundProcessor::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
||||
|
||||
// Create a data stream to analyze the data
|
||||
QDataStream waveStream(const_cast<QByteArray *>(&inputAudioByteArray), QIODevice::ReadOnly);
|
||||
|
|
|
@ -32,14 +32,12 @@ public:
|
|||
|
||||
const QByteArray& getByteArray() const { return _byteArray; }
|
||||
|
||||
void setStereo(bool stereo) { _isStereo = stereo; }
|
||||
void setReady(bool ready);
|
||||
|
||||
void downSample(const QByteArray& rawAudioByteArray, int sampleRate);
|
||||
int interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
|
||||
signals:
|
||||
void ready();
|
||||
|
||||
protected slots:
|
||||
void soundProcessSuccess(QByteArray data, bool stereo, bool ambisonic, float duration);
|
||||
void soundProcessError(int error, QString str);
|
||||
|
||||
private:
|
||||
QByteArray _byteArray;
|
||||
|
@ -55,18 +53,26 @@ class SoundProcessor : public QObject, public QRunnable {
|
|||
Q_OBJECT
|
||||
|
||||
public:
|
||||
SoundProcessor(const QUrl& url, const QByteArray& data, Sound* sound)
|
||||
: _url(url), _data(data), _sound(sound)
|
||||
SoundProcessor(const QUrl& url, const QByteArray& data, const bool stereo, const bool ambisonic)
|
||||
: _url(url), _data(data), _isStereo(stereo), _isAmbisonic(ambisonic)
|
||||
{
|
||||
}
|
||||
|
||||
virtual void run() override;
|
||||
|
||||
void downSample(const QByteArray& rawAudioByteArray, int sampleRate);
|
||||
int interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||
|
||||
signals:
|
||||
void onSuccess(QByteArray data, bool stereo, bool ambisonic, float duration);
|
||||
void onError(int error, QString str);
|
||||
|
||||
private:
|
||||
QUrl _url;
|
||||
QByteArray _data;
|
||||
Sound* _sound;
|
||||
|
||||
bool _isStereo;
|
||||
bool _isAmbisonic;
|
||||
float _duration;
|
||||
};
|
||||
|
||||
typedef QSharedPointer<Sound> SharedSoundPointer;
|
||||
|
|
Loading…
Reference in a new issue