mirror of
https://github.com/JulianGro/overte.git
synced 2025-08-13 22:36:25 +02:00
Added camera data to AvatarData class, and include it in interface
This commit is contained in:
parent
6286e1b54f
commit
9b34427570
3 changed files with 100 additions and 2 deletions
|
@ -85,6 +85,7 @@
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
void reshape(int width, int height); // will be defined below
|
void reshape(int width, int height); // will be defined below
|
||||||
|
void loadViewFrustum(ViewFrustum& viewFrustum); // will be defined below
|
||||||
|
|
||||||
|
|
||||||
pthread_t networkReceiveThread;
|
pthread_t networkReceiveThread;
|
||||||
|
@ -522,6 +523,22 @@ void updateAvatar(float frametime)
|
||||||
myAvatar.setAverageLoudness(averageLoudness);
|
myAvatar.setAverageLoudness(averageLoudness);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// Update Avatar with latest camera and view frustum data...
|
||||||
|
// NOTE: we get this from the view frustum, to make it simpler, since the
|
||||||
|
// loadViewFrumstum() method will get the correct details from the camera
|
||||||
|
// We could optimize this to not actually load the viewFrustum, since we don't
|
||||||
|
// actually need to calculate the view frustum planes to send these details
|
||||||
|
// to the server.
|
||||||
|
loadViewFrustum(::viewFrustum);
|
||||||
|
myAvatar.setCameraPosition(::viewFrustum.getPosition());
|
||||||
|
myAvatar.setCameraDirection(::viewFrustum.getDirection());
|
||||||
|
myAvatar.setCameraUp(::viewFrustum.getUp());
|
||||||
|
myAvatar.setCameraRight(::viewFrustum.getRight());
|
||||||
|
myAvatar.setCameraFov(::viewFrustum.getFieldOfView());
|
||||||
|
myAvatar.setCameraAspectRatio(::viewFrustum.getAspectRatio());
|
||||||
|
myAvatar.setCameraNearClip(::viewFrustum.getNearClip());
|
||||||
|
myAvatar.setCameraFarClip(::viewFrustum.getFarClip());
|
||||||
|
|
||||||
// Send my stream of head/hand data to the avatar mixer and voxel server
|
// Send my stream of head/hand data to the avatar mixer and voxel server
|
||||||
unsigned char broadcastString[200];
|
unsigned char broadcastString[200];
|
||||||
*broadcastString = PACKET_HEADER_HEAD_DATA;
|
*broadcastString = PACKET_HEADER_HEAD_DATA;
|
||||||
|
@ -559,7 +576,7 @@ void updateAvatar(float frametime)
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////
|
||||||
// renderViewFrustum()
|
// loadViewFrustum()
|
||||||
//
|
//
|
||||||
// Description: this will load the view frustum bounds for EITHER the head
|
// Description: this will load the view frustum bounds for EITHER the head
|
||||||
// or the "myCamera".
|
// or the "myCamera".
|
||||||
|
|
|
@ -36,7 +36,15 @@ int unpackFloatAngleFromTwoByte(uint16_t* byteAnglePointer, float* destinationPo
|
||||||
AvatarData::AvatarData() :
|
AvatarData::AvatarData() :
|
||||||
_bodyYaw(-90.0),
|
_bodyYaw(-90.0),
|
||||||
_bodyPitch(0.0),
|
_bodyPitch(0.0),
|
||||||
_bodyRoll(0.0) {
|
_bodyRoll(0.0),
|
||||||
|
_cameraPosition(0,0,0),
|
||||||
|
_cameraDirection(0,0,0),
|
||||||
|
_cameraUp(0,0,0),
|
||||||
|
_cameraRight(0,0,0),
|
||||||
|
_cameraFov(0.0f),
|
||||||
|
_cameraAspectRatio(0.0f),
|
||||||
|
_cameraNearClip(0.0f),
|
||||||
|
_cameraFarClip(0.0f) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,14 +72,37 @@ int AvatarData::getBroadcastData(unsigned char* destinationBuffer) {
|
||||||
|
|
||||||
memcpy(destinationBuffer, &_handPosition, sizeof(float) * 3);
|
memcpy(destinationBuffer, &_handPosition, sizeof(float) * 3);
|
||||||
destinationBuffer += sizeof(float) * 3;
|
destinationBuffer += sizeof(float) * 3;
|
||||||
|
|
||||||
|
// camera details
|
||||||
|
memcpy(destinationBuffer, &_cameraPosition, sizeof(_cameraPosition));
|
||||||
|
destinationBuffer += sizeof(_cameraPosition);
|
||||||
|
memcpy(destinationBuffer, &_cameraDirection, sizeof(_cameraDirection));
|
||||||
|
destinationBuffer += sizeof(_cameraDirection);
|
||||||
|
memcpy(destinationBuffer, &_cameraRight, sizeof(_cameraRight));
|
||||||
|
destinationBuffer += sizeof(_cameraRight);
|
||||||
|
memcpy(destinationBuffer, &_cameraUp, sizeof(_cameraUp));
|
||||||
|
destinationBuffer += sizeof(_cameraUp);
|
||||||
|
memcpy(destinationBuffer, &_cameraFov, sizeof(_cameraFov));
|
||||||
|
destinationBuffer += sizeof(_cameraFov);
|
||||||
|
memcpy(destinationBuffer, &_cameraAspectRatio, sizeof(_cameraAspectRatio));
|
||||||
|
destinationBuffer += sizeof(_cameraAspectRatio);
|
||||||
|
memcpy(destinationBuffer, &_cameraNearClip, sizeof(_cameraNearClip));
|
||||||
|
destinationBuffer += sizeof(_cameraNearClip);
|
||||||
|
memcpy(destinationBuffer, &_cameraFarClip, sizeof(_cameraFarClip));
|
||||||
|
destinationBuffer += sizeof(_cameraFarClip);
|
||||||
|
|
||||||
//printLog("%f, %f, %f\n", _handPosition.x, _handPosition.y, _handPosition.z);
|
//printLog("%f, %f, %f\n", _handPosition.x, _handPosition.y, _handPosition.z);
|
||||||
|
|
||||||
|
//printf("AvatarData::getBroadcastData() numBytes=%ld\n",(destinationBuffer - bufferStart));
|
||||||
|
|
||||||
return destinationBuffer - bufferStart;
|
return destinationBuffer - bufferStart;
|
||||||
}
|
}
|
||||||
|
|
||||||
// called on the other agents - assigns it to my views of the others
|
// called on the other agents - assigns it to my views of the others
|
||||||
void AvatarData::parseData(unsigned char* sourceBuffer, int numBytes) {
|
void AvatarData::parseData(unsigned char* sourceBuffer, int numBytes) {
|
||||||
|
|
||||||
|
//printf("AvatarData::parseData() numBytes=%d\n",numBytes);
|
||||||
|
|
||||||
// increment to push past the packet header
|
// increment to push past the packet header
|
||||||
sourceBuffer++;
|
sourceBuffer++;
|
||||||
|
|
||||||
|
@ -85,6 +116,24 @@ void AvatarData::parseData(unsigned char* sourceBuffer, int numBytes) {
|
||||||
memcpy(&_handPosition, sourceBuffer, sizeof(float) * 3);
|
memcpy(&_handPosition, sourceBuffer, sizeof(float) * 3);
|
||||||
sourceBuffer += sizeof(float) * 3;
|
sourceBuffer += sizeof(float) * 3;
|
||||||
|
|
||||||
|
// camera details
|
||||||
|
memcpy(&_cameraPosition, sourceBuffer, sizeof(_cameraPosition));
|
||||||
|
sourceBuffer += sizeof(_cameraPosition);
|
||||||
|
memcpy(&_cameraDirection, sourceBuffer, sizeof(_cameraDirection));
|
||||||
|
sourceBuffer += sizeof(_cameraDirection);
|
||||||
|
memcpy(&_cameraRight, sourceBuffer, sizeof(_cameraRight));
|
||||||
|
sourceBuffer += sizeof(_cameraRight);
|
||||||
|
memcpy(&_cameraUp, sourceBuffer, sizeof(_cameraUp));
|
||||||
|
sourceBuffer += sizeof(_cameraUp);
|
||||||
|
memcpy(&_cameraFov, sourceBuffer, sizeof(_cameraFov));
|
||||||
|
sourceBuffer += sizeof(_cameraFov);
|
||||||
|
memcpy(&_cameraAspectRatio, sourceBuffer, sizeof(_cameraAspectRatio));
|
||||||
|
sourceBuffer += sizeof(_cameraAspectRatio);
|
||||||
|
memcpy(&_cameraNearClip, sourceBuffer, sizeof(_cameraNearClip));
|
||||||
|
sourceBuffer += sizeof(_cameraNearClip);
|
||||||
|
memcpy(&_cameraFarClip, sourceBuffer, sizeof(_cameraFarClip));
|
||||||
|
sourceBuffer += sizeof(_cameraFarClip);
|
||||||
|
|
||||||
//printLog( "_bodyYaw = %f", _bodyYaw );
|
//printLog( "_bodyYaw = %f", _bodyYaw );
|
||||||
|
|
||||||
//printLog("%f, %f, %f\n", _handPosition.x, _handPosition.y, _handPosition.z);
|
//printLog("%f, %f, %f\n", _handPosition.x, _handPosition.y, _handPosition.z);
|
||||||
|
|
|
@ -37,6 +37,26 @@ public:
|
||||||
|
|
||||||
float getBodyRoll();
|
float getBodyRoll();
|
||||||
void setBodyRoll(float bodyRoll);
|
void setBodyRoll(float bodyRoll);
|
||||||
|
|
||||||
|
// getters for camera details
|
||||||
|
const glm::vec3& getCameraPosition() const { return _cameraPosition; };
|
||||||
|
const glm::vec3& getCameraDirection() const { return _cameraDirection; }
|
||||||
|
const glm::vec3& getCameraUp() const { return _cameraUp; }
|
||||||
|
const glm::vec3& getCameraRight() const { return _cameraRight; }
|
||||||
|
float getCameraFov() const { return _cameraFov; }
|
||||||
|
float getCameraAspectRatio() const { return _cameraAspectRatio; }
|
||||||
|
float getCameraNearClip() const { return _cameraNearClip; }
|
||||||
|
float getCameraFarClip() const { return _cameraFarClip; }
|
||||||
|
|
||||||
|
// setters for camera details
|
||||||
|
void setCameraPosition(const glm::vec3& position) { _cameraPosition = position; };
|
||||||
|
void setCameraDirection(const glm::vec3& direction) { _cameraDirection = direction; }
|
||||||
|
void setCameraUp(const glm::vec3& up) { _cameraUp = up; }
|
||||||
|
void setCameraRight(const glm::vec3& right) { _cameraRight = right; }
|
||||||
|
void setCameraFov(float fov) { _cameraFov = fov; }
|
||||||
|
void setCameraAspectRatio(float aspectRatio) { _cameraAspectRatio = aspectRatio; }
|
||||||
|
void setCameraNearClip(float nearClip) { _cameraNearClip = nearClip; }
|
||||||
|
void setCameraFarClip(float farClip) { _cameraFarClip = farClip; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
glm::vec3 _bodyPosition;
|
glm::vec3 _bodyPosition;
|
||||||
|
@ -45,6 +65,18 @@ protected:
|
||||||
float _bodyYaw;
|
float _bodyYaw;
|
||||||
float _bodyPitch;
|
float _bodyPitch;
|
||||||
float _bodyRoll;
|
float _bodyRoll;
|
||||||
|
|
||||||
|
// camera details for the avatar
|
||||||
|
glm::vec3 _cameraPosition;
|
||||||
|
|
||||||
|
// can we describe this in less space? For example, a Quaternion? or Euler angles?
|
||||||
|
glm::vec3 _cameraDirection;
|
||||||
|
glm::vec3 _cameraUp;
|
||||||
|
glm::vec3 _cameraRight;
|
||||||
|
float _cameraFov;
|
||||||
|
float _cameraAspectRatio;
|
||||||
|
float _cameraNearClip;
|
||||||
|
float _cameraFarClip;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* defined(__hifi__AvatarData__) */
|
#endif /* defined(__hifi__AvatarData__) */
|
||||||
|
|
Loading…
Reference in a new issue