Merge remote-tracking branch 'upstream/master'

This commit is contained in:
James B. Pollack 2015-10-09 14:54:13 -07:00
commit dcb5f2369d
7 changed files with 40 additions and 19 deletions

View file

@ -80,12 +80,10 @@ const float MyAvatar::ZOOM_DEFAULT = 1.5f;
MyAvatar::MyAvatar(RigPointer rig) :
Avatar(rig),
_gravity(0.0f, 0.0f, 0.0f),
_wasPushing(false),
_isPushing(false),
_isBraking(false),
_boomLength(ZOOM_DEFAULT),
_trapDuration(0.0f),
_thrust(0.0f),
_keyboardMotorVelocity(0.0f),
_keyboardMotorTimescale(DEFAULT_KEYBOARD_MOTOR_TIMESCALE),
@ -142,24 +140,46 @@ QByteArray MyAvatar::toByteArray(bool cullSmallChanges, bool sendAll) {
}
void MyAvatar::reset() {
_skeletonModel.reset();
getHead()->reset();
_targetVelocity = glm::vec3(0.0f);
setThrust(glm::vec3(0.0f));
// Reset the pitch and roll components of the avatar's orientation, preserve yaw direction
glm::vec3 eulers = safeEulerAngles(getOrientation());
eulers.x = 0.0f;
eulers.z = 0.0f;
setOrientation(glm::quat(eulers));
// Gather animation mode...
// This should be simpler when we have only graph animations always on.
bool isRig = _rig->getEnableRig();
// seting rig animation to true, below, will clear the graph animation menu item, so grab it now.
bool isGraph = _rig->getEnableAnimGraph() || Menu::getInstance()->isOptionChecked(MenuOption::EnableAnimGraph);
// ... and get to sane configuration where other activity won't bother us.
qApp->setRawAvatarUpdateThreading(false);
_rig->disableHands = true;
setEnableRigAnimations(true);
// Reset dynamic state.
_wasPushing = _isPushing = _isBraking = _billboardValid = _goToPending = _straightingLean = false;
_skeletonModel.reset();
getHead()->reset();
_targetVelocity = glm::vec3(0.0f);
setThrust(glm::vec3(0.0f));
// Get fresh data, in case we're really slow and out of wack.
_hmdSensorMatrix = qApp->getHMDSensorPose();
_hmdSensorPosition = extractTranslation(_hmdSensorMatrix);
_hmdSensorOrientation = glm::quat_cast(_hmdSensorMatrix);
// Reset body position/orientation under the head.
auto newBodySensorMatrix = deriveBodyFromHMDSensor(); // Based on current cached HMD position/rotation..
auto worldBodyMatrix = _sensorToWorldMatrix * newBodySensorMatrix;
glm::vec3 worldBodyPos = extractTranslation(worldBodyMatrix);
glm::quat worldBodyRot = glm::normalize(glm::quat_cast(worldBodyMatrix));
// FIXME: Hack to retain the previous behavior wrt height.
// I'd like to make the body match head height, but that will have to wait for separate PR.
worldBodyPos.y = getPosition().y;
setPosition(worldBodyPos);
setOrientation(worldBodyRot);
// If there is any discrepency between positioning and the head (as there is in initial deriveBodyFromHMDSensor),
// we can make that right by setting _bodySensorMatrix = newBodySensorMatrix.
// However, doing so will make the head want to point to the previous body orientation, as cached above.
//_bodySensorMatrix = newBodySensorMatrix;
//updateSensorToWorldMatrix(); // Uses updated position/orientation and _bodySensorMatrix changes
_skeletonModel.simulate(0.1f); // non-zero
setEnableRigAnimations(false);
_skeletonModel.simulate(0.1f);

View file

@ -282,8 +282,6 @@ private:
// results are in sensor space
glm::mat4 deriveBodyFromHMDSensor() const;
glm::vec3 _gravity;
float _driveKeys[MAX_DRIVE_KEYS];
bool _wasPushing;
bool _isPushing;
@ -291,7 +289,6 @@ private:
float _boomLength;
float _trapDuration; // seconds that avatar has been trapped by collisions
glm::vec3 _thrust; // impulse accumulator for outside sources
glm::vec3 _keyboardMotorVelocity; // target local-frame velocity of avatar (keyboard)

View file

@ -663,7 +663,7 @@ void ParticleEffectEntityItem::stepSimulation(float deltaTime) {
}
// emit new particles, but only if we are emmitting
if (isEmittingParticles() && _emitRate > 0.0f && _lifespan > 0.0f && _polarStart <= _polarFinish) {
if (getIsEmitting() && _emitRate > 0.0f && _lifespan > 0.0f && _polarStart <= _polarFinish) {
float timeLeftInFrame = deltaTime;
while (_timeUntilNextEmit < timeLeftInFrame) {

View file

@ -30,7 +30,7 @@ private slots:
void onDownloadProgress(qint64 bytesReceived, qint64 bytesTotal);
private:
AssetRequest* _assetRequest;
AssetRequest* _assetRequest { nullptr };
};
#endif

View file

@ -91,6 +91,7 @@ SendQueue& Connection::getSendQueue() {
// set defaults on the send queue from our congestion control object and estimatedTimeout()
_sendQueue->setPacketSendPeriod(_congestionControl->_packetSendPeriod);
_sendQueue->setSyncInterval(_synInterval);
_sendQueue->setEstimatedTimeout(estimatedTimeout());
_sendQueue->setFlowWindowSize(std::min(_flowWindowSize, (int) _congestionControl->_congestionWindowSize));
}

View file

@ -439,7 +439,8 @@ bool SendQueue::isInactive(bool sentAPacket) {
} else {
// We think the client is still waiting for data (based on the sequence number gap)
// Let's wait either for a response from the client or until the estimated timeout
auto waitDuration = std::chrono::microseconds(_estimatedTimeout);
// (plus the sync interval to allow the client to respond) has elapsed
auto waitDuration = std::chrono::microseconds(_estimatedTimeout + _syncInterval);
// use our condition_variable_any to wait
auto cvStatus = _emptyCondition.wait_for(locker, waitDuration);

View file

@ -63,6 +63,7 @@ public:
void setPacketSendPeriod(int newPeriod) { _packetSendPeriod = newPeriod; }
void setEstimatedTimeout(int estimatedTimeout) { _estimatedTimeout = estimatedTimeout; }
void setSyncInterval(int syncInterval) { _syncInterval = syncInterval; }
public slots:
void stop();
@ -114,6 +115,7 @@ private:
std::atomic<State> _state { State::NotStarted };
std::atomic<int> _estimatedTimeout { 0 }; // Estimated timeout, set from CC
std::atomic<int> _syncInterval { udt::DEFAULT_SYN_INTERVAL_USECS }; // Sync interval, set from CC
std::atomic<int> _timeoutExpiryCount { 0 }; // The number of times the timeout has expired without response from client
std::atomic<uint64_t> _lastReceiverResponse { 0 }; // Timestamp for the last time we got new data from the receiver (ACK/NAK)