mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 19:52:26 +02:00
Add support for WAV assets with arbitrary sample rate, not just 48khz. Fast path when already 24khz.
This commit is contained in:
parent
7ad384d731
commit
452f081366
2 changed files with 41 additions and 33 deletions
|
@ -62,8 +62,10 @@ void Sound::downloadFinished(const QByteArray& data) {
|
||||||
|
|
||||||
QByteArray outputAudioByteArray;
|
QByteArray outputAudioByteArray;
|
||||||
|
|
||||||
interpretAsWav(rawAudioByteArray, outputAudioByteArray);
|
int sampleRate = interpretAsWav(rawAudioByteArray, outputAudioByteArray);
|
||||||
downSample(outputAudioByteArray);
|
if (sampleRate != 0) {
|
||||||
|
downSample(outputAudioByteArray, sampleRate);
|
||||||
|
}
|
||||||
} else if (fileName.endsWith(RAW_EXTENSION)) {
|
} else if (fileName.endsWith(RAW_EXTENSION)) {
|
||||||
// check if this was a stereo raw file
|
// check if this was a stereo raw file
|
||||||
// since it's raw the only way for us to know that is if the file was called .stereo.raw
|
// since it's raw the only way for us to know that is if the file was called .stereo.raw
|
||||||
|
@ -73,7 +75,7 @@ void Sound::downloadFinished(const QByteArray& data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process as RAW file
|
// Process as RAW file
|
||||||
downSample(rawAudioByteArray);
|
downSample(rawAudioByteArray, 48000);
|
||||||
} else {
|
} else {
|
||||||
qCDebug(audio) << "Unknown sound file type";
|
qCDebug(audio) << "Unknown sound file type";
|
||||||
}
|
}
|
||||||
|
@ -84,29 +86,35 @@ void Sound::downloadFinished(const QByteArray& data) {
|
||||||
emit ready();
|
emit ready();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Sound::downSample(const QByteArray& rawAudioByteArray) {
|
void Sound::downSample(const QByteArray& rawAudioByteArray, int sampleRate) {
|
||||||
// assume that this was a RAW file and is now an array of samples that are
|
|
||||||
// signed, 16-bit, 48Khz
|
|
||||||
|
|
||||||
// we want to convert it to the format that the audio-mixer wants
|
// we want to convert it to the format that the audio-mixer wants
|
||||||
// which is signed, 16-bit, 24Khz
|
// which is signed, 16-bit, 24Khz
|
||||||
|
|
||||||
int numChannels = _isStereo ? 2 : 1;
|
if (sampleRate == AudioConstants::SAMPLE_RATE) {
|
||||||
AudioSRC resampler(48000, AudioConstants::SAMPLE_RATE, numChannels);
|
|
||||||
|
|
||||||
// resize to max possible output
|
// no resampling needed
|
||||||
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
|
_byteArray = rawAudioByteArray;
|
||||||
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
|
|
||||||
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
|
||||||
_byteArray.resize(maxDestinationBytes);
|
|
||||||
|
|
||||||
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
|
} else {
|
||||||
(int16_t*)_byteArray.data(),
|
|
||||||
numSourceFrames);
|
|
||||||
|
|
||||||
// truncate to actual output
|
int numChannels = _isStereo ? 2 : 1;
|
||||||
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
AudioSRC resampler(sampleRate, AudioConstants::SAMPLE_RATE, numChannels);
|
||||||
_byteArray.resize(numDestinationBytes);
|
|
||||||
|
// resize to max possible output
|
||||||
|
int numSourceFrames = rawAudioByteArray.size() / (numChannels * sizeof(AudioConstants::AudioSample));
|
||||||
|
int maxDestinationFrames = resampler.getMaxOutput(numSourceFrames);
|
||||||
|
int maxDestinationBytes = maxDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||||
|
_byteArray.resize(maxDestinationBytes);
|
||||||
|
|
||||||
|
int numDestinationFrames = resampler.render((int16_t*)rawAudioByteArray.data(),
|
||||||
|
(int16_t*)_byteArray.data(),
|
||||||
|
numSourceFrames);
|
||||||
|
|
||||||
|
// truncate to actual output
|
||||||
|
int numDestinationBytes = numDestinationFrames * numChannels * sizeof(AudioConstants::AudioSample);
|
||||||
|
_byteArray.resize(numDestinationBytes);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -160,7 +168,8 @@ struct CombinedHeader {
|
||||||
WAVEHeader wave;
|
WAVEHeader wave;
|
||||||
};
|
};
|
||||||
|
|
||||||
void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
// returns wavfile sample rate, used for resampling
|
||||||
|
int Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray) {
|
||||||
|
|
||||||
CombinedHeader fileHeader;
|
CombinedHeader fileHeader;
|
||||||
|
|
||||||
|
@ -174,36 +183,33 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
|
||||||
// descriptor.id == "RIFX" also signifies BigEndian file
|
// descriptor.id == "RIFX" also signifies BigEndian file
|
||||||
// waveStream.setByteOrder(QDataStream::BigEndian);
|
// waveStream.setByteOrder(QDataStream::BigEndian);
|
||||||
qCDebug(audio) << "Currently not supporting big-endian audio files.";
|
qCDebug(audio) << "Currently not supporting big-endian audio files.";
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (strncmp(fileHeader.riff.type, "WAVE", 4) != 0
|
if (strncmp(fileHeader.riff.type, "WAVE", 4) != 0
|
||||||
|| strncmp(fileHeader.wave.descriptor.id, "fmt", 3) != 0) {
|
|| strncmp(fileHeader.wave.descriptor.id, "fmt", 3) != 0) {
|
||||||
qCDebug(audio) << "Not a WAVE Audio file.";
|
qCDebug(audio) << "Not a WAVE Audio file.";
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// added the endianess check as an extra level of security
|
// added the endianess check as an extra level of security
|
||||||
|
|
||||||
if (qFromLittleEndian<quint16>(fileHeader.wave.audioFormat) != 1) {
|
if (qFromLittleEndian<quint16>(fileHeader.wave.audioFormat) != 1) {
|
||||||
qCDebug(audio) << "Currently not supporting non PCM audio files.";
|
qCDebug(audio) << "Currently not supporting non PCM audio files.";
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) == 2) {
|
if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) == 2) {
|
||||||
_isStereo = true;
|
_isStereo = true;
|
||||||
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) > 2) {
|
} else if (qFromLittleEndian<quint16>(fileHeader.wave.numChannels) > 2) {
|
||||||
qCDebug(audio) << "Currently not support audio files with more than 2 channels.";
|
qCDebug(audio) << "Currently not support audio files with more than 2 channels.";
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qFromLittleEndian<quint16>(fileHeader.wave.bitsPerSample) != 16) {
|
if (qFromLittleEndian<quint16>(fileHeader.wave.bitsPerSample) != 16) {
|
||||||
qCDebug(audio) << "Currently not supporting non 16bit audio files.";
|
qCDebug(audio) << "Currently not supporting non 16bit audio files.";
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
if (qFromLittleEndian<quint32>(fileHeader.wave.sampleRate) != 48000) {
|
|
||||||
qCDebug(audio) << "Currently not supporting non 48KHz audio files.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip any extra data in the WAVE chunk
|
// Skip any extra data in the WAVE chunk
|
||||||
waveStream.skipRawData(fileHeader.wave.descriptor.size - (sizeof(WAVEHeader) - sizeof(chunk)));
|
waveStream.skipRawData(fileHeader.wave.descriptor.size - (sizeof(WAVEHeader) - sizeof(chunk)));
|
||||||
|
|
||||||
|
@ -218,7 +224,7 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
|
||||||
waveStream.skipRawData(dataHeader.descriptor.size);
|
waveStream.skipRawData(dataHeader.descriptor.size);
|
||||||
} else {
|
} else {
|
||||||
qCDebug(audio) << "Could not read wav audio data header.";
|
qCDebug(audio) << "Could not read wav audio data header.";
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,12 +233,14 @@ void Sound::interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& ou
|
||||||
outputAudioByteArray.resize(outputAudioByteArraySize);
|
outputAudioByteArray.resize(outputAudioByteArraySize);
|
||||||
if (waveStream.readRawData(outputAudioByteArray.data(), outputAudioByteArraySize) != (int)outputAudioByteArraySize) {
|
if (waveStream.readRawData(outputAudioByteArray.data(), outputAudioByteArraySize) != (int)outputAudioByteArraySize) {
|
||||||
qCDebug(audio) << "Error reading WAV file";
|
qCDebug(audio) << "Error reading WAV file";
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
_duration = (float) (outputAudioByteArraySize / (fileHeader.wave.sampleRate * fileHeader.wave.numChannels * fileHeader.wave.bitsPerSample / 8.0f));
|
_duration = (float) (outputAudioByteArraySize / (fileHeader.wave.sampleRate * fileHeader.wave.numChannels * fileHeader.wave.bitsPerSample / 8.0f));
|
||||||
|
return fileHeader.wave.sampleRate;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
qCDebug(audio) << "Could not read wav audio file header.";
|
qCDebug(audio) << "Could not read wav audio file header.";
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,8 +40,8 @@ private:
|
||||||
bool _isReady;
|
bool _isReady;
|
||||||
float _duration; // In seconds
|
float _duration; // In seconds
|
||||||
|
|
||||||
void downSample(const QByteArray& rawAudioByteArray);
|
void downSample(const QByteArray& rawAudioByteArray, int sampleRate);
|
||||||
void interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
int interpretAsWav(const QByteArray& inputAudioByteArray, QByteArray& outputAudioByteArray);
|
||||||
|
|
||||||
virtual void downloadFinished(const QByteArray& data) override;
|
virtual void downloadFinished(const QByteArray& data) override;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in a new issue