Merge pull request #10876 from Atlante45/fix/ac-crash

Fix AC crash (buffer overflow)
This commit is contained in:
Andrew Meadows 2017-07-11 14:26:08 -07:00 committed by GitHub
commit 782155e9ea
10 changed files with 137 additions and 65 deletions

View file

@ -604,6 +604,24 @@ void Agent::processAgentAvatar() {
AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData;
QByteArray avatarByteArray = scriptedAvatar->toByteArrayStateful(dataDetail);
int maximumByteArraySize = NLPacket::maxPayloadSize(PacketType::AvatarData) - sizeof(AvatarDataSequenceNumber);
if (avatarByteArray.size() > maximumByteArraySize) {
qWarning() << " scriptedAvatar->toByteArrayStateful() resulted in very large buffer:" << avatarByteArray.size() << "... attempt to drop facial data";
avatarByteArray = scriptedAvatar->toByteArrayStateful(dataDetail, true);
if (avatarByteArray.size() > maximumByteArraySize) {
qWarning() << " scriptedAvatar->toByteArrayStateful() without facial data resulted in very large buffer:" << avatarByteArray.size() << "... reduce to MinimumData";
avatarByteArray = scriptedAvatar->toByteArrayStateful(AvatarData::MinimumData, true);
if (avatarByteArray.size() > maximumByteArraySize) {
qWarning() << " scriptedAvatar->toByteArrayStateful() MinimumData resulted in very large buffer:" << avatarByteArray.size() << "... FAIL!!";
return;
}
}
}
scriptedAvatar->doneEncoding(true);
static AvatarDataSequenceNumber sequenceNumber = 0;

View file

@ -383,11 +383,11 @@ void AvatarMixerSlave::broadcastAvatarDataToAgent(const SharedNodePointer& node)
qCWarning(avatars) << "otherAvatar.toByteArray() without facial data resulted in very large buffer:" << bytes.size() << "... reduce to MinimumData";
bytes = otherAvatar->toByteArray(AvatarData::MinimumData, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
}
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() MinimumData resulted in very large buffer:" << bytes.size() << "... FAIL!!";
includeThisAvatar = false;
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() MinimumData resulted in very large buffer:" << bytes.size() << "... FAIL!!";
includeThisAvatar = false;
}
}
}

View file

@ -19,7 +19,7 @@
#include "ScriptableAvatar.h"
QByteArray ScriptableAvatar::toByteArrayStateful(AvatarDataDetail dataDetail) {
QByteArray ScriptableAvatar::toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking) {
_globalPosition = getPosition();
return AvatarData::toByteArrayStateful(dataDetail);
}

View file

@ -28,7 +28,7 @@ public:
Q_INVOKABLE AnimationDetails getAnimationDetails();
virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail) override;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking = false) override;
private slots:

View file

@ -295,7 +295,7 @@ void MyAvatar::simulateAttachments(float deltaTime) {
// don't update attachments here, do it in harvestResultsFromPhysicsSimulation()
}
QByteArray MyAvatar::toByteArrayStateful(AvatarDataDetail dataDetail) {
QByteArray MyAvatar::toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking) {
CameraMode mode = qApp->getCamera().getMode();
_globalPosition = getPosition();
// This might not be right! Isn't the capsule local offset in avatar space, and don't we need to add the radius to the y as well? -HRS 5/26/17

View file

@ -612,7 +612,7 @@ private:
bool requiresSafeLanding(const glm::vec3& positionIn, glm::vec3& positionOut);
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail) override;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking) override;
void simulate(float deltaTime);
void updateFromTrackers(float deltaTime);

View file

@ -57,6 +57,27 @@ static const float DEFAULT_AVATAR_DENSITY = 1000.0f; // density of water
#define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0)
size_t AvatarDataPacket::maxFaceTrackerInfoSize(size_t numBlendshapeCoefficients) {
return FACE_TRACKER_INFO_SIZE + numBlendshapeCoefficients * sizeof(float);
}
size_t AvatarDataPacket::maxJointDataSize(size_t numJoints) {
const size_t validityBitsSize = (size_t)std::ceil(numJoints / (float)BITS_IN_BYTE);
size_t totalSize = sizeof(uint8_t); // numJoints
totalSize += validityBitsSize; // Orientations mask
totalSize += numJoints * sizeof(SixByteQuat); // Orientations
totalSize += validityBitsSize; // Translations mask
totalSize += numJoints * sizeof(SixByteTrans); // Translations
size_t NUM_FAUX_JOINT = 2;
totalSize += NUM_FAUX_JOINT * (sizeof(SixByteQuat) + sizeof(SixByteTrans)); // faux joints
return totalSize;
}
AvatarData::AvatarData() :
SpatiallyNestable(NestableType::Avatar, QUuid()),
_handPosition(0.0f),
@ -73,19 +94,6 @@ AvatarData::AvatarData() :
setBodyPitch(0.0f);
setBodyYaw(-90.0f);
setBodyRoll(0.0f);
ASSERT(sizeof(AvatarDataPacket::Header) == AvatarDataPacket::HEADER_SIZE);
ASSERT(sizeof(AvatarDataPacket::AvatarGlobalPosition) == AvatarDataPacket::AVATAR_GLOBAL_POSITION_SIZE);
ASSERT(sizeof(AvatarDataPacket::AvatarLocalPosition) == AvatarDataPacket::AVATAR_LOCAL_POSITION_SIZE);
ASSERT(sizeof(AvatarDataPacket::AvatarBoundingBox) == AvatarDataPacket::AVATAR_BOUNDING_BOX_SIZE);
ASSERT(sizeof(AvatarDataPacket::AvatarOrientation) == AvatarDataPacket::AVATAR_ORIENTATION_SIZE);
ASSERT(sizeof(AvatarDataPacket::AvatarScale) == AvatarDataPacket::AVATAR_SCALE_SIZE);
ASSERT(sizeof(AvatarDataPacket::LookAtPosition) == AvatarDataPacket::LOOK_AT_POSITION_SIZE);
ASSERT(sizeof(AvatarDataPacket::AudioLoudness) == AvatarDataPacket::AUDIO_LOUDNESS_SIZE);
ASSERT(sizeof(AvatarDataPacket::SensorToWorldMatrix) == AvatarDataPacket::SENSOR_TO_WORLD_SIZE);
ASSERT(sizeof(AvatarDataPacket::AdditionalFlags) == AvatarDataPacket::ADDITIONAL_FLAGS_SIZE);
ASSERT(sizeof(AvatarDataPacket::ParentInfo) == AvatarDataPacket::PARENT_INFO_SIZE);
ASSERT(sizeof(AvatarDataPacket::FaceTrackerInfo) == AvatarDataPacket::FACE_TRACKER_INFO_SIZE);
}
AvatarData::~AvatarData() {
@ -169,12 +177,12 @@ float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPositio
// we want to track outbound data in this case...
QByteArray AvatarData::toByteArrayStateful(AvatarDataDetail dataDetail) {
QByteArray AvatarData::toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking) {
AvatarDataPacket::HasFlags hasFlagsOut;
auto lastSentTime = _lastToByteArray;
_lastToByteArray = usecTimestampNow();
return AvatarData::toByteArray(dataDetail, lastSentTime, getLastSentJointData(),
hasFlagsOut, false, false, glm::vec3(0), nullptr,
hasFlagsOut, dropFaceTracking, false, glm::vec3(0), nullptr,
&_outboundDataRate);
}
@ -189,15 +197,11 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
lazyInitHeadData();
QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0);
unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data());
unsigned char* startPosition = destinationBuffer;
// special case, if we were asked for no data, then just include the flags all set to nothing
if (dataDetail == NoData) {
AvatarDataPacket::HasFlags packetStateFlags = 0;
memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags));
return avatarDataByteArray.left(sizeof(packetStateFlags));
QByteArray avatarDataByteArray(reinterpret_cast<char*>(&packetStateFlags), sizeof(packetStateFlags));
return avatarDataByteArray;
}
// FIXME -
@ -258,6 +262,15 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
hasJointData = sendAll || !sendMinimum;
}
const size_t byteArraySize = AvatarDataPacket::MAX_CONSTANT_HEADER_SIZE +
(hasFaceTrackerInfo ? AvatarDataPacket::maxFaceTrackerInfoSize(_headData->getNumSummedBlendshapeCoefficients()) : 0) +
(hasJointData ? AvatarDataPacket::maxJointDataSize(_jointData.size()) : 0);
QByteArray avatarDataByteArray((int)byteArraySize, 0);
unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data());
unsigned char* startPosition = destinationBuffer;
// Leading flags, to indicate how much data is actually included in the packet...
AvatarDataPacket::HasFlags packetStateFlags =
(hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0)
@ -478,12 +491,15 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
unsigned char* validityPosition = destinationBuffer;
unsigned char validity = 0;
int validityBit = 0;
int numValidityBytes = (int)std::ceil(numJoints / (float)BITS_IN_BYTE);
#ifdef WANT_DEBUG
int rotationSentCount = 0;
unsigned char* beforeRotations = destinationBuffer;
#endif
destinationBuffer += numValidityBytes; // Move pointer past the validity bytes
if (sentJointDataOut) {
sentJointDataOut->resize(_jointData.size()); // Make sure the destination is resized before using it
}
@ -503,6 +519,8 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
#ifdef WANT_DEBUG
rotationSentCount++;
#endif
destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, data.rotation);
if (sentJointDataOut) {
auto jointDataOut = *sentJointDataOut;
jointDataOut[i].rotation = data.rotation;
@ -512,28 +530,14 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
}
}
if (++validityBit == BITS_IN_BYTE) {
*destinationBuffer++ = validity;
*validityPosition++ = validity;
validityBit = validity = 0;
}
}
if (validityBit != 0) {
*destinationBuffer++ = validity;
*validityPosition++ = validity;
}
validityBit = 0;
validity = *validityPosition++;
for (int i = 0; i < _jointData.size(); i++) {
const JointData& data = _jointData[i];
if (validity & (1 << validityBit)) {
destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, data.rotation);
}
if (++validityBit == BITS_IN_BYTE) {
validityBit = 0;
validity = *validityPosition++;
}
}
// joint translation data
validityPosition = destinationBuffer;
validity = 0;
@ -544,6 +548,8 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
unsigned char* beforeTranslations = destinationBuffer;
#endif
destinationBuffer += numValidityBytes; // Move pointer past the validity bytes
float minTranslation = !distanceAdjust ? AVATAR_MIN_TRANSLATION : getDistanceBasedMinTranslationDistance(viewerPosition);
float maxTranslationDimension = 0.0;
@ -562,6 +568,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension);
maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension);
destinationBuffer +=
packFloatVec3ToSignedTwoByteFixed(destinationBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX);
if (sentJointDataOut) {
auto jointDataOut = *sentJointDataOut;
jointDataOut[i].translation = data.translation;
@ -571,27 +580,13 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
}
}
if (++validityBit == BITS_IN_BYTE) {
*destinationBuffer++ = validity;
*validityPosition++ = validity;
validityBit = validity = 0;
}
}
if (validityBit != 0) {
*destinationBuffer++ = validity;
}
validityBit = 0;
validity = *validityPosition++;
for (int i = 0; i < _jointData.size(); i++) {
const JointData& data = _jointData[i];
if (validity & (1 << validityBit)) {
destinationBuffer +=
packFloatVec3ToSignedTwoByteFixed(destinationBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX);
}
if (++validityBit == BITS_IN_BYTE) {
validityBit = 0;
validity = *validityPosition++;
}
*validityPosition++ = validity;
}
// faux joints
@ -624,6 +619,12 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
}
int avatarDataSize = destinationBuffer - startPosition;
if (avatarDataSize > (int)byteArraySize) {
qCCritical(avatars) << "AvatarData::toByteArray buffer overflow"; // We've overflown into the heap
ASSERT(false);
}
return avatarDataByteArray.left(avatarDataSize);
}
// NOTE: This is never used in a "distanceAdjust" mode, so it's ok that it doesn't use a variable minimum rotation/translation
@ -1743,6 +1744,24 @@ void AvatarData::sendAvatarDataPacket() {
bool cullSmallData = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
auto dataDetail = cullSmallData ? SendAllData : CullSmallData;
QByteArray avatarByteArray = toByteArrayStateful(dataDetail);
int maximumByteArraySize = NLPacket::maxPayloadSize(PacketType::AvatarData) - sizeof(AvatarDataSequenceNumber);
if (avatarByteArray.size() > maximumByteArraySize) {
qCWarning(avatars) << "toByteArrayStateful() resulted in very large buffer:" << avatarByteArray.size() << "... attempt to drop facial data";
avatarByteArray = toByteArrayStateful(dataDetail, true);
if (avatarByteArray.size() > maximumByteArraySize) {
qCWarning(avatars) << "toByteArrayStateful() without facial data resulted in very large buffer:" << avatarByteArray.size() << "... reduce to MinimumData";
avatarByteArray = toByteArrayStateful(MinimumData, true);
if (avatarByteArray.size() > maximumByteArraySize) {
qCWarning(avatars) << "toByteArrayStateful() MinimumData resulted in very large buffer:" << avatarByteArray.size() << "... FAIL!!";
return;
}
}
}
doneEncoding(cullSmallData);
static AvatarDataSequenceNumber sequenceNumber = 0;

View file

@ -140,35 +140,41 @@ namespace AvatarDataPacket {
const HasFlags PACKET_HAS_JOINT_DATA = 1U << 11;
const size_t AVATAR_HAS_FLAGS_SIZE = 2;
using SixByteQuat = uint8_t[6];
using SixByteTrans = uint8_t[6];
// NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure.
PACKED_BEGIN struct Header {
HasFlags packetHasFlags; // state flags, indicated which additional records are included in the packet
} PACKED_END;
const size_t HEADER_SIZE = 2;
static_assert(sizeof(Header) == HEADER_SIZE, "AvatarDataPacket::Header size doesn't match.");
PACKED_BEGIN struct AvatarGlobalPosition {
float globalPosition[3]; // avatar's position
} PACKED_END;
const size_t AVATAR_GLOBAL_POSITION_SIZE = 12;
static_assert(sizeof(AvatarGlobalPosition) == AVATAR_GLOBAL_POSITION_SIZE, "AvatarDataPacket::AvatarGlobalPosition size doesn't match.");
PACKED_BEGIN struct AvatarBoundingBox {
float avatarDimensions[3]; // avatar's bounding box in world space units, but relative to the position.
float boundOriginOffset[3]; // offset from the position of the avatar to the origin of the bounding box
} PACKED_END;
const size_t AVATAR_BOUNDING_BOX_SIZE = 24;
static_assert(sizeof(AvatarBoundingBox) == AVATAR_BOUNDING_BOX_SIZE, "AvatarDataPacket::AvatarBoundingBox size doesn't match.");
using SixByteQuat = uint8_t[6];
PACKED_BEGIN struct AvatarOrientation {
SixByteQuat avatarOrientation; // encodeded and compressed by packOrientationQuatToSixBytes()
} PACKED_END;
const size_t AVATAR_ORIENTATION_SIZE = 6;
static_assert(sizeof(AvatarOrientation) == AVATAR_ORIENTATION_SIZE, "AvatarDataPacket::AvatarOrientation size doesn't match.");
PACKED_BEGIN struct AvatarScale {
SmallFloat scale; // avatar's scale, compressed by packFloatRatioToTwoByte()
} PACKED_END;
const size_t AVATAR_SCALE_SIZE = 2;
static_assert(sizeof(AvatarScale) == AVATAR_SCALE_SIZE, "AvatarDataPacket::AvatarScale size doesn't match.");
PACKED_BEGIN struct LookAtPosition {
float lookAtPosition[3]; // world space position that eyes are focusing on.
@ -180,11 +186,13 @@ namespace AvatarDataPacket {
// POTENTIAL SAVINGS - 12 bytes
} PACKED_END;
const size_t LOOK_AT_POSITION_SIZE = 12;
static_assert(sizeof(LookAtPosition) == LOOK_AT_POSITION_SIZE, "AvatarDataPacket::LookAtPosition size doesn't match.");
PACKED_BEGIN struct AudioLoudness {
uint8_t audioLoudness; // current loudness of microphone compressed with packFloatGainToByte()
} PACKED_END;
const size_t AUDIO_LOUDNESS_SIZE = 1;
static_assert(sizeof(AudioLoudness) == AUDIO_LOUDNESS_SIZE, "AvatarDataPacket::AudioLoudness size doesn't match.");
PACKED_BEGIN struct SensorToWorldMatrix {
// FIXME - these 20 bytes are only used by viewers if my avatar has "attachments"
@ -199,11 +207,13 @@ namespace AvatarDataPacket {
// relative to the avatar position.
} PACKED_END;
const size_t SENSOR_TO_WORLD_SIZE = 20;
static_assert(sizeof(SensorToWorldMatrix) == SENSOR_TO_WORLD_SIZE, "AvatarDataPacket::SensorToWorldMatrix size doesn't match.");
PACKED_BEGIN struct AdditionalFlags {
uint8_t flags; // additional flags: hand state, key state, eye tracking
} PACKED_END;
const size_t ADDITIONAL_FLAGS_SIZE = 1;
static_assert(sizeof(AdditionalFlags) == ADDITIONAL_FLAGS_SIZE, "AvatarDataPacket::AdditionalFlags size doesn't match.");
// only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags
PACKED_BEGIN struct ParentInfo {
@ -211,6 +221,7 @@ namespace AvatarDataPacket {
uint16_t parentJointIndex;
} PACKED_END;
const size_t PARENT_INFO_SIZE = 18;
static_assert(sizeof(ParentInfo) == PARENT_INFO_SIZE, "AvatarDataPacket::ParentInfo size doesn't match.");
// will only ever be included if the avatar has a parent but can change independent of changes to parent info
// and so we keep it a separate record
@ -218,6 +229,22 @@ namespace AvatarDataPacket {
float localPosition[3]; // parent frame translation of the avatar
} PACKED_END;
const size_t AVATAR_LOCAL_POSITION_SIZE = 12;
static_assert(sizeof(AvatarLocalPosition) == AVATAR_LOCAL_POSITION_SIZE, "AvatarDataPacket::AvatarLocalPosition size doesn't match.");
const size_t MAX_CONSTANT_HEADER_SIZE = HEADER_SIZE +
AVATAR_GLOBAL_POSITION_SIZE +
AVATAR_BOUNDING_BOX_SIZE +
AVATAR_ORIENTATION_SIZE +
AVATAR_SCALE_SIZE +
LOOK_AT_POSITION_SIZE +
AUDIO_LOUDNESS_SIZE +
SENSOR_TO_WORLD_SIZE +
ADDITIONAL_FLAGS_SIZE +
PARENT_INFO_SIZE +
AVATAR_LOCAL_POSITION_SIZE;
// variable length structure follows
// only present if IS_FACE_TRACKER_CONNECTED flag is set in AvatarInfo.flags
PACKED_BEGIN struct FaceTrackerInfo {
@ -229,8 +256,9 @@ namespace AvatarDataPacket {
// float blendshapeCoefficients[numBlendshapeCoefficients];
} PACKED_END;
const size_t FACE_TRACKER_INFO_SIZE = 17;
static_assert(sizeof(FaceTrackerInfo) == FACE_TRACKER_INFO_SIZE, "AvatarDataPacket::FaceTrackerInfo size doesn't match.");
size_t maxFaceTrackerInfoSize(size_t numBlendshapeCoefficients);
// variable length structure follows
/*
struct JointData {
uint8_t numJoints;
@ -240,6 +268,7 @@ namespace AvatarDataPacket {
SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed()
};
*/
size_t maxJointDataSize(size_t numJoints);
}
static const float MAX_AVATAR_SCALE = 1000.0f;
@ -387,7 +416,7 @@ public:
SendAllData
} AvatarDataDetail;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail);
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking = false);
virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
AvatarDataPacket::HasFlags& hasFlagsOut, bool dropFaceTracking, bool distanceAdjust, glm::vec3 viewerPosition,

View file

@ -83,6 +83,11 @@ static const QMap<QString, int>& getBlendshapesLookupMap() {
return blendshapeLookupMap;
}
int HeadData::getNumSummedBlendshapeCoefficients() const {
int maxSize = std::max(_blendshapeCoefficients.size(), _transientBlendshapeCoefficients.size());
return maxSize;
}
const QVector<float>& HeadData::getSummedBlendshapeCoefficients() {
int maxSize = std::max(_blendshapeCoefficients.size(), _transientBlendshapeCoefficients.size());
if (_summedBlendshapeCoefficients.size() != maxSize) {

View file

@ -57,6 +57,7 @@ public:
void setBlendshape(QString name, float val);
const QVector<float>& getBlendshapeCoefficients() const { return _blendshapeCoefficients; }
const QVector<float>& getSummedBlendshapeCoefficients();
int getNumSummedBlendshapeCoefficients() const;
void setBlendshapeCoefficients(const QVector<float>& blendshapeCoefficients) { _blendshapeCoefficients = blendshapeCoefficients; }
const glm::vec3& getLookAtPosition() const { return _lookAtPosition; }