mirror of
https://github.com/overte-org/overte.git
synced 2025-04-23 11:53:34 +02:00
fix various bot related bugs
This commit is contained in:
parent
f0238ec4d7
commit
eedfc4fd38
8 changed files with 142 additions and 18 deletions
assignment-client/src
interface/src/avatar
libraries
|
@ -138,7 +138,6 @@ void Agent::handleJurisdictionPacket(QSharedPointer<ReceivedMessage> message, Sh
|
|||
|
||||
void Agent::handleAudioPacket(QSharedPointer<ReceivedMessage> message) {
|
||||
_receivedAudioStream.parseData(*message);
|
||||
|
||||
_lastReceivedAudioLoudness = _receivedAudioStream.getNextOutputFrameLoudness();
|
||||
_receivedAudioStream.clearBuffer();
|
||||
}
|
||||
|
@ -323,12 +322,14 @@ void Agent::scriptRequestFinished() {
|
|||
request->deleteLater();
|
||||
}
|
||||
|
||||
|
||||
void Agent::executeScript() {
|
||||
_scriptEngine = std::unique_ptr<ScriptEngine>(new ScriptEngine(ScriptEngine::AGENT_SCRIPT, _scriptContents, _payload));
|
||||
_scriptEngine->setParent(this); // be the parent of the script engine so it gets moved when we do
|
||||
|
||||
// setup an Avatar for the script to use
|
||||
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
|
||||
|
||||
connect(_scriptEngine.get(), SIGNAL(update(float)), scriptedAvatar.data(), SLOT(update(float)), Qt::ConnectionType::QueuedConnection);
|
||||
scriptedAvatar->setForceFaceTrackerConnected(true);
|
||||
|
||||
|
@ -338,11 +339,33 @@ void Agent::executeScript() {
|
|||
// give this AvatarData object to the script engine
|
||||
_scriptEngine->registerGlobalObject("Avatar", scriptedAvatar.data());
|
||||
|
||||
auto player = DependencyManager::get<recording::Deck>();
|
||||
connect(player.data(), &recording::Deck::playbackStateChanged, [=] {
|
||||
if (player->isPlaying()) {
|
||||
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
|
||||
if (recordingInterface->getPlayFromCurrentLocation()) {
|
||||
scriptedAvatar->setRecordingBasis();
|
||||
}
|
||||
} else {
|
||||
scriptedAvatar->clearRecordingBasis();
|
||||
}
|
||||
});
|
||||
|
||||
using namespace recording;
|
||||
static const FrameType AVATAR_FRAME_TYPE = Frame::registerFrameType(AvatarData::FRAME_NAME);
|
||||
// FIXME how to deal with driving multiple avatars locally?
|
||||
Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [this, scriptedAvatar](Frame::ConstPointer frame) {
|
||||
|
||||
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
|
||||
bool useFrameSkeleton = recordingInterface->getPlayerUseSkeletonModel();
|
||||
|
||||
// FIXME - the ability to switch the avatar URL is not actually supported when playing back from a recording
|
||||
if (!useFrameSkeleton) {
|
||||
static std::once_flag warning;
|
||||
std::call_once(warning, [] {
|
||||
qWarning() << "Recording.setPlayerUseSkeletonModel(false) is not currently supported.";
|
||||
});
|
||||
}
|
||||
|
||||
AvatarData::fromFrame(frame->data, *scriptedAvatar);
|
||||
});
|
||||
|
||||
|
@ -352,8 +375,12 @@ void Agent::executeScript() {
|
|||
const QByteArray& audio = frame->data;
|
||||
static quint16 audioSequenceNumber{ 0 };
|
||||
Transform audioTransform;
|
||||
|
||||
auto avatarOrientation = scriptedAvatar->getOrientation();
|
||||
glm::quat headOrientation = scriptedAvatar->getHeadOrientation(); // FIXME - should we be using head orientation of avatar orientation?
|
||||
audioTransform.setTranslation(scriptedAvatar->getPosition());
|
||||
audioTransform.setRotation(scriptedAvatar->getOrientation());
|
||||
audioTransform.setRotation(avatarOrientation);
|
||||
|
||||
QByteArray encodedBuffer;
|
||||
if (_encoder) {
|
||||
_encoder->encode(audio, encodedBuffer);
|
||||
|
@ -537,7 +564,10 @@ void Agent::encodeFrameOfZeros(QByteArray& encodedZeros) {
|
|||
}
|
||||
|
||||
void Agent::processAgentAvatarAudio() {
|
||||
if (_isAvatar && (_isListeningToAudioStream || _avatarSound)) {
|
||||
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
|
||||
bool isPlayingRecording = recordingInterface->isPlaying();
|
||||
|
||||
if (_isAvatar && ((_isListeningToAudioStream && !isPlayingRecording) || _avatarSound)) {
|
||||
// if we have an avatar audio stream then send it out to our audio-mixer
|
||||
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
|
||||
bool silentFrame = true;
|
||||
|
|
|
@ -1010,7 +1010,6 @@ void Avatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
|
|||
|
||||
void Avatar::setModelURLFinished(bool success) {
|
||||
if (!success && _skeletonModelURL != AvatarData::defaultFullAvatarModelUrl()) {
|
||||
qDebug() << "Using default after failing to load Avatar model: " << _skeletonModelURL;
|
||||
// call _skeletonModel.setURL, but leave our copy of _skeletonModelURL alone. This is so that
|
||||
// we don't redo this every time we receive an identity packet from the avatar with the bad url.
|
||||
QMetaObject::invokeMethod(_skeletonModel.get(), "setURL",
|
||||
|
|
|
@ -134,7 +134,7 @@ Q_LOGGING_CATEGORY(trace_simulation_avatar, "trace.simulation.avatar");
|
|||
|
||||
float AvatarManager::getAvatarDataRate(const QUuid& sessionID, const QString& rateName) {
|
||||
auto avatar = getAvatarBySessionID(sessionID);
|
||||
return avatar->getDataRate(rateName);
|
||||
return avatar ? avatar->getDataRate(rateName) : 0.0f;
|
||||
}
|
||||
|
||||
class AvatarPriority {
|
||||
|
|
|
@ -1165,7 +1165,6 @@ void MyAvatar::clearJointsData() {
|
|||
}
|
||||
|
||||
void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
|
||||
|
||||
Avatar::setSkeletonModelURL(skeletonModelURL);
|
||||
render::ScenePointer scene = qApp->getMain3DScene();
|
||||
_skeletonModel->setVisibleInScene(true, scene);
|
||||
|
|
|
@ -283,14 +283,20 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
destinationBuffer += sizeof(packetStateFlags);
|
||||
|
||||
if (hasAvatarGlobalPosition) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarGlobalPosition*>(destinationBuffer);
|
||||
data->globalPosition[0] = _globalPosition.x;
|
||||
data->globalPosition[1] = _globalPosition.y;
|
||||
data->globalPosition[2] = _globalPosition.z;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
|
||||
_globalPositionRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasAvatarBoundingBox) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarBoundingBox*>(destinationBuffer);
|
||||
|
||||
data->avatarDimensions[0] = _globalBoundingBoxDimensions.x;
|
||||
|
@ -302,36 +308,56 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
data->boundOriginOffset[2] = _globalBoundingBoxOffset.z;
|
||||
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarBoundingBox);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_avatarBoundingBoxRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasAvatarOrientation) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto localOrientation = getLocalOrientation();
|
||||
destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, localOrientation);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_avatarOrientationRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasAvatarScale) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarScale*>(destinationBuffer);
|
||||
auto scale = getDomainLimitedScale();
|
||||
packFloatRatioToTwoByte((uint8_t*)(&data->scale), scale);
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarScale);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_avatarScaleRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasLookAtPosition) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::LookAtPosition*>(destinationBuffer);
|
||||
auto lookAt = _headData->getLookAtPosition();
|
||||
data->lookAtPosition[0] = lookAt.x;
|
||||
data->lookAtPosition[1] = lookAt.y;
|
||||
data->lookAtPosition[2] = lookAt.z;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_lookAtPositionRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasAudioLoudness) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer);
|
||||
data->audioLoudness = packFloatGainToByte(_headData->getAudioLoudness() / AUDIO_LOUDNESS_SCALE);
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_audioLoudnessRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasSensorToWorldMatrix) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::SensorToWorldMatrix*>(destinationBuffer);
|
||||
glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix();
|
||||
packOrientationQuatToSixBytes(data->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix));
|
||||
|
@ -341,9 +367,13 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
data->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1];
|
||||
data->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2];
|
||||
destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_sensorToWorldRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasAdditionalFlags) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AdditionalFlags*>(destinationBuffer);
|
||||
|
||||
uint8_t flags { 0 };
|
||||
|
@ -370,27 +400,39 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
}
|
||||
data->flags = flags;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_additionalFlagsRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasAvatarLocalPosition) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarLocalPosition*>(destinationBuffer);
|
||||
auto localPosition = getLocalPosition();
|
||||
data->localPosition[0] = localPosition.x;
|
||||
data->localPosition[1] = localPosition.y;
|
||||
data->localPosition[2] = localPosition.z;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_localPositionRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
if (hasParentInfo) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto parentInfo = reinterpret_cast<AvatarDataPacket::ParentInfo*>(destinationBuffer);
|
||||
QByteArray referentialAsBytes = parentID.toRfc4122();
|
||||
memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size());
|
||||
parentInfo->parentJointIndex = _parentJointIndex;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::ParentInfo);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_parentInfoRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
// If it is connected, pack up the data
|
||||
if (hasFaceTrackerInfo) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer);
|
||||
|
||||
faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink;
|
||||
|
@ -403,10 +445,14 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
// followed by a variable number of float coefficients
|
||||
memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float));
|
||||
destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float);
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_faceTrackerRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
// If it is connected, pack up the data
|
||||
if (hasJointData) {
|
||||
auto startSection = destinationBuffer;
|
||||
QReadLocker readLock(&_jointDataLock);
|
||||
|
||||
// joint rotation data
|
||||
|
@ -554,6 +600,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
<< (destinationBuffer - startPosition);
|
||||
}
|
||||
#endif
|
||||
|
||||
int numBytes = destinationBuffer - startSection;
|
||||
_jointDataRateOutbound.increment(numBytes);
|
||||
}
|
||||
|
||||
int avatarDataSize = destinationBuffer - startPosition;
|
||||
|
@ -1028,6 +1077,30 @@ float AvatarData::getDataRate(const QString& rateName) {
|
|||
return _faceTrackerRate.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "jointData") {
|
||||
return _jointDataRate.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "globalPositionOutbound") {
|
||||
return _globalPositionRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "localPositionOutbound") {
|
||||
return _localPositionRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "avatarBoundingBoxOutbound") {
|
||||
return _avatarBoundingBoxRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "avatarOrientationOutbound") {
|
||||
return _avatarOrientationRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "avatarScaleOutbound") {
|
||||
return _avatarScaleRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "lookAtPositionOutbound") {
|
||||
return _lookAtPositionRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "audioLoudnessOutbound") {
|
||||
return _audioLoudnessRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "sensorToWorkMatrixOutbound") {
|
||||
return _sensorToWorldRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "additionalFlagsOutbound") {
|
||||
return _additionalFlagsRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "parentInfoOutbound") {
|
||||
return _parentInfoRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "faceTrackerOutbound") {
|
||||
return _faceTrackerRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
} else if (rateName == "jointDataOutbound") {
|
||||
return _jointDataRateOutbound.rate() / BYTES_PER_KILOBIT;
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
|
@ -1842,8 +1915,7 @@ QJsonObject AvatarData::toJson() const {
|
|||
return root;
|
||||
}
|
||||
|
||||
void AvatarData::fromJson(const QJsonObject& json) {
|
||||
|
||||
void AvatarData::fromJson(const QJsonObject& json, bool useFrameSkeleton) {
|
||||
int version;
|
||||
if (json.contains(JSON_AVATAR_VERSION)) {
|
||||
version = json[JSON_AVATAR_VERSION].toInt();
|
||||
|
@ -1865,7 +1937,7 @@ void AvatarData::fromJson(const QJsonObject& json) {
|
|||
|
||||
if (json.contains(JSON_AVATAR_BODY_MODEL)) {
|
||||
auto bodyModelURL = json[JSON_AVATAR_BODY_MODEL].toString();
|
||||
if (bodyModelURL != getSkeletonModelURL().toString()) {
|
||||
if (useFrameSkeleton && bodyModelURL != getSkeletonModelURL().toString()) {
|
||||
setSkeletonModelURL(bodyModelURL);
|
||||
}
|
||||
}
|
||||
|
@ -1958,8 +2030,9 @@ QByteArray AvatarData::toFrame(const AvatarData& avatar) {
|
|||
}
|
||||
|
||||
|
||||
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
|
||||
void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result, bool useFrameSkeleton) {
|
||||
QJsonDocument doc = QJsonDocument::fromBinaryData(frameData);
|
||||
|
||||
#ifdef WANT_JSON_DEBUG
|
||||
{
|
||||
QJsonObject obj = doc.object();
|
||||
|
@ -1967,7 +2040,7 @@ void AvatarData::fromFrame(const QByteArray& frameData, AvatarData& result) {
|
|||
qCDebug(avatars).noquote() << QJsonDocument(obj).toJson(QJsonDocument::JsonFormat::Indented);
|
||||
}
|
||||
#endif
|
||||
result.fromJson(doc.object());
|
||||
result.fromJson(doc.object(), useFrameSkeleton);
|
||||
}
|
||||
|
||||
float AvatarData::getBodyYaw() const {
|
||||
|
|
|
@ -329,7 +329,7 @@ public:
|
|||
|
||||
static const QString FRAME_NAME;
|
||||
|
||||
static void fromFrame(const QByteArray& frameData, AvatarData& avatar);
|
||||
static void fromFrame(const QByteArray& frameData, AvatarData& avatar, bool useFrameSkeleton = true);
|
||||
static QByteArray toFrame(const AvatarData& avatar);
|
||||
|
||||
AvatarData();
|
||||
|
@ -380,8 +380,13 @@ public:
|
|||
void nextAttitude(glm::vec3 position, glm::quat orientation); // Can be safely called at any time.
|
||||
virtual void updateAttitude() {} // Tell skeleton mesh about changes
|
||||
|
||||
glm::quat getHeadOrientation() const { return _headData->getOrientation(); }
|
||||
void setHeadOrientation(const glm::quat& orientation) { _headData->setOrientation(orientation); }
|
||||
glm::quat getHeadOrientation() {
|
||||
lazyInitHeadData();
|
||||
return _headData->getOrientation();
|
||||
}
|
||||
void setHeadOrientation(const glm::quat& orientation) { if (_headData) _headData->setOrientation(orientation); }
|
||||
void setLookAtPosition(const glm::vec3& lookAtPosition) { if (_headData) _headData->setLookAtPosition(lookAtPosition); }
|
||||
void setBlendshapeCoefficients(const QVector<float>& blendshapeCoefficients) { if (_headData) _headData->setBlendshapeCoefficients(blendshapeCoefficients); }
|
||||
|
||||
// access to Head().set/getMousePitch (degrees)
|
||||
float getHeadPitch() const { return _headData->getBasePitch(); }
|
||||
|
@ -513,7 +518,7 @@ public:
|
|||
TransformPointer getRecordingBasis() const;
|
||||
void setRecordingBasis(TransformPointer recordingBasis = TransformPointer());
|
||||
QJsonObject toJson() const;
|
||||
void fromJson(const QJsonObject& json);
|
||||
void fromJson(const QJsonObject& json, bool useFrameSkeleton = true);
|
||||
|
||||
glm::vec3 getClientGlobalPosition() { return _globalPosition; }
|
||||
glm::vec3 getGlobalBoundingBoxCorner() { return _globalPosition + _globalBoundingBoxOffset - _globalBoundingBoxDimensions; }
|
||||
|
@ -528,7 +533,7 @@ public:
|
|||
Q_INVOKABLE glm::mat4 getControllerLeftHandMatrix() const;
|
||||
Q_INVOKABLE glm::mat4 getControllerRightHandMatrix() const;
|
||||
|
||||
float getDataRate(const QString& rateName = QString(""));
|
||||
Q_INVOKABLE float getDataRate(const QString& rateName = QString(""));
|
||||
|
||||
int getJointCount() { return _jointData.size(); }
|
||||
|
||||
|
@ -596,7 +601,7 @@ protected:
|
|||
bool _forceFaceTrackerConnected;
|
||||
bool _hasNewJointData; // set in AvatarData, cleared in Avatar
|
||||
|
||||
HeadData* _headData;
|
||||
HeadData* _headData { nullptr };
|
||||
|
||||
QUrl _skeletonModelURL;
|
||||
bool _firstSkeletonCheck { true };
|
||||
|
@ -659,6 +664,21 @@ protected:
|
|||
RateCounter<> _faceTrackerRate;
|
||||
RateCounter<> _jointDataRate;
|
||||
|
||||
// Some rate data for outgoing data
|
||||
RateCounter<> _globalPositionRateOutbound;
|
||||
RateCounter<> _localPositionRateOutbound;
|
||||
RateCounter<> _avatarBoundingBoxRateOutbound;
|
||||
RateCounter<> _avatarOrientationRateOutbound;
|
||||
RateCounter<> _avatarScaleRateOutbound;
|
||||
RateCounter<> _lookAtPositionRateOutbound;
|
||||
RateCounter<> _audioLoudnessRateOutbound;
|
||||
RateCounter<> _sensorToWorldRateOutbound;
|
||||
RateCounter<> _additionalFlagsRateOutbound;
|
||||
RateCounter<> _parentInfoRateOutbound;
|
||||
RateCounter<> _faceTrackerRateOutbound;
|
||||
RateCounter<> _jointDataRateOutbound;
|
||||
|
||||
|
||||
glm::vec3 _globalBoundingBoxDimensions;
|
||||
glm::vec3 _globalBoundingBoxOffset;
|
||||
|
||||
|
|
|
@ -537,6 +537,8 @@ FBXGeometry* FBXReader::extractFBXGeometry(const QVariantHash& mapping, const QS
|
|||
FBXGeometry* geometryPtr = new FBXGeometry;
|
||||
FBXGeometry& geometry = *geometryPtr;
|
||||
|
||||
geometry.originalURL = url;
|
||||
|
||||
float unitScaleFactor = 1.0f;
|
||||
glm::vec3 ambientColor;
|
||||
QString hifiGlobalNodeID;
|
||||
|
|
|
@ -281,6 +281,7 @@ class FBXGeometry {
|
|||
public:
|
||||
using Pointer = std::shared_ptr<FBXGeometry>;
|
||||
|
||||
QString originalURL;
|
||||
QString author;
|
||||
QString applicationName; ///< the name of the application that generated the model
|
||||
|
||||
|
|
Loading…
Reference in a new issue