mirror of
https://github.com/overte-org/overte.git
synced 2025-04-14 14:47:19 +02:00
Merge branch 'master' of github.com:highfidelity/hifi into feat/create/editMultipleProperties
# Conflicts: # scripts/system/html/js/entityProperties.js
This commit is contained in:
commit
dc9f8895f8
64 changed files with 3772 additions and 860 deletions
CODING_STANDARD.mdCONTRIBUTING.md
domain-server/src
interface
resources/avatar
src
libraries
animation/src
avatars-renderer/src/avatars-renderer
avatars/src
baking/src
entities-renderer/src
image
material-networking/src/material-networking
networking/src
render-utils/src
AntialiasingEffect.cppDeferredLightingEffect.cppDeferredLightingEffect.hLightAmbient.slhLightingModel.cppLightingModel.hModel.cppRenderCommonTask.cppRenderDeferredTask.cppRenderForwardTask.cpp
render-utils
shared/src
scripts/system
tools
1008
CODING_STANDARD.md
Normal file
1008
CODING_STANDARD.md
Normal file
File diff suppressed because it is too large
Load diff
|
@ -16,7 +16,7 @@ Contributing
|
|||
git checkout -b new_branch_name
|
||||
```
|
||||
4. Code
|
||||
* Follow the [coding standard](https://docs.highfidelity.com/build-guide/coding-standards)
|
||||
* Follow the [coding standard](CODING_STANDARD.md)
|
||||
5. Commit
|
||||
* Use [well formed commit messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)
|
||||
6. Update your branch
|
||||
|
|
|
@ -1010,7 +1010,7 @@ void DomainGatekeeper::refreshGroupsCache() {
|
|||
nodeList->eachNode([this](const SharedNodePointer& node) {
|
||||
if (!node->getPermissions().isAssignment) {
|
||||
// this node is an agent
|
||||
const QString& verifiedUserName = node->getPermissions().getVerifiedUserName();
|
||||
QString verifiedUserName = node->getPermissions().getVerifiedUserName();
|
||||
if (!verifiedUserName.isEmpty()) {
|
||||
getGroupMemberships(verifiedUserName);
|
||||
}
|
||||
|
|
|
@ -197,260 +197,100 @@
|
|||
"id": "rightHandStateMachine",
|
||||
"type": "stateMachine",
|
||||
"data": {
|
||||
"currentState": "rightHandGrasp",
|
||||
"currentState": "rightHandAnimNone",
|
||||
"states": [
|
||||
{
|
||||
"id": "rightHandGrasp",
|
||||
"interpTarget": 3,
|
||||
"id": "rightHandAnimNone",
|
||||
"interpTarget": 1,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isRightIndexPoint", "state": "rightIndexPoint" },
|
||||
{ "var": "isRightThumbRaise", "state": "rightThumbRaise" },
|
||||
{ "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" }
|
||||
{ "var": "rightHandAnimA", "state": "rightHandAnimA" },
|
||||
{ "var": "rightHandAnimB", "state": "rightHandAnimB" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "rightIndexPoint",
|
||||
"interpTarget": 15,
|
||||
"id": "rightHandAnimA",
|
||||
"interpTarget": 1,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isRightHandGrasp", "state": "rightHandGrasp" },
|
||||
{ "var": "isRightThumbRaise", "state": "rightThumbRaise" },
|
||||
{ "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" }
|
||||
{ "var": "rightHandAnimNone", "state": "rightHandAnimNone" },
|
||||
{ "var": "rightHandAnimB", "state": "rightHandAnimB" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "rightThumbRaise",
|
||||
"interpTarget": 15,
|
||||
"id": "rightHandAnimB",
|
||||
"interpTarget": 1,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isRightHandGrasp", "state": "rightHandGrasp" },
|
||||
{ "var": "isRightIndexPoint", "state": "rightIndexPoint" },
|
||||
{ "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "rightIndexPointAndThumbRaise",
|
||||
"interpTarget": 15,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isRightHandGrasp", "state": "rightHandGrasp" },
|
||||
{ "var": "isRightIndexPoint", "state": "rightIndexPoint" },
|
||||
{ "var": "isRightThumbRaise", "state": "rightThumbRaise" }
|
||||
{ "var": "rightHandAnimNone", "state": "rightHandAnimNone" },
|
||||
{ "var": "rightHandAnimA", "state": "rightHandAnimA" }
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "rightHandGrasp",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "rightHandGraspOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/hydra_pose_open_right.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 0.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "rightHandGraspClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/hydra_pose_closed_right.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 0.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "rightIndexPoint",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "rightIndexPointOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_point_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "rightIndexPointClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_point_closed_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "rightThumbRaise",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "rightThumbRaiseOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "rightThumbRaiseClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_closed_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "rightIndexPointAndThumbRaise",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "rightIndexPointAndThumbRaiseOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "rightIndexPointAndThumbRaiseClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_closed_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftHandOverlay",
|
||||
"type": "overlay",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"boneSet": "leftHand",
|
||||
"alphaVar": "leftHandOverlayAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftHandStateMachine",
|
||||
"id": "rightHandAnimNone",
|
||||
"type": "stateMachine",
|
||||
"data": {
|
||||
"currentState": "leftHandGrasp",
|
||||
"currentState": "rightHandGrasp",
|
||||
"states": [
|
||||
{
|
||||
"id": "leftHandGrasp",
|
||||
"id": "rightHandGrasp",
|
||||
"interpTarget": 3,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftIndexPoint", "state": "leftIndexPoint" },
|
||||
{ "var": "isLeftThumbRaise", "state": "leftThumbRaise" },
|
||||
{ "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" }
|
||||
{ "var": "isRightIndexPoint", "state": "rightIndexPoint" },
|
||||
{ "var": "isRightThumbRaise", "state": "rightThumbRaise" },
|
||||
{ "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPoint",
|
||||
"id": "rightIndexPoint",
|
||||
"interpTarget": 15,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftHandGrasp", "state": "leftHandGrasp" },
|
||||
{ "var": "isLeftThumbRaise", "state": "leftThumbRaise" },
|
||||
{ "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" }
|
||||
{ "var": "isRightHandGrasp", "state": "rightHandGrasp" },
|
||||
{ "var": "isRightThumbRaise", "state": "rightThumbRaise" },
|
||||
{ "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftThumbRaise",
|
||||
"id": "rightThumbRaise",
|
||||
"interpTarget": 15,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftHandGrasp", "state": "leftHandGrasp" },
|
||||
{ "var": "isLeftIndexPoint", "state": "leftIndexPoint" },
|
||||
{ "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" }
|
||||
{ "var": "isRightHandGrasp", "state": "rightHandGrasp" },
|
||||
{ "var": "isRightIndexPoint", "state": "rightIndexPoint" },
|
||||
{ "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaise",
|
||||
"id": "rightIndexPointAndThumbRaise",
|
||||
"interpTarget": 15,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftHandGrasp", "state": "leftHandGrasp" },
|
||||
{ "var": "isLeftIndexPoint", "state": "leftIndexPoint" },
|
||||
{ "var": "isLeftThumbRaise", "state": "leftThumbRaise" }
|
||||
{ "var": "isRightHandGrasp", "state": "rightHandGrasp" },
|
||||
{ "var": "isRightIndexPoint", "state": "rightIndexPoint" },
|
||||
{ "var": "isRightThumbRaise", "state": "rightThumbRaise" }
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftHandGrasp",
|
||||
"id": "rightHandGrasp",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftHandGraspOpen",
|
||||
"id": "rightHandGraspOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/hydra_pose_open_left.fbx",
|
||||
"url": "qrc:///avatar/animations/hydra_pose_open_right.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 0.0,
|
||||
"timeScale": 1.0,
|
||||
|
@ -459,12 +299,12 @@
|
|||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftHandGraspClosed",
|
||||
"id": "rightHandGraspClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/hydra_pose_closed_left.fbx",
|
||||
"startFrame": 10.0,
|
||||
"endFrame": 10.0,
|
||||
"url": "qrc:///avatar/animations/hydra_pose_closed_right.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 0.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
|
@ -473,18 +313,18 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPoint",
|
||||
"id": "rightIndexPoint",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftIndexPointOpen",
|
||||
"id": "rightIndexPointOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_point_open_left.fbx",
|
||||
"url": "qrc:///avatar/animations/touch_point_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
|
@ -493,10 +333,10 @@
|
|||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointClosed",
|
||||
"id": "rightIndexPointClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_point_closed_left.fbx",
|
||||
"url": "qrc:///avatar/animations/touch_point_closed_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
|
@ -507,18 +347,18 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"id": "leftThumbRaise",
|
||||
"id": "rightThumbRaise",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftThumbRaiseOpen",
|
||||
"id": "rightThumbRaiseOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_open_left.fbx",
|
||||
"url": "qrc:///avatar/animations/touch_thumb_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
|
@ -527,10 +367,10 @@
|
|||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftThumbRaiseClosed",
|
||||
"id": "rightThumbRaiseClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_closed_left.fbx",
|
||||
"url": "qrc:///avatar/animations/touch_thumb_closed_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
|
@ -541,18 +381,18 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaise",
|
||||
"id": "rightIndexPointAndThumbRaise",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
"alphaVar": "rightHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaiseOpen",
|
||||
"id": "rightIndexPointAndThumbRaiseOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx",
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
|
@ -561,10 +401,10 @@
|
|||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaiseClosed",
|
||||
"id": "rightIndexPointAndThumbRaiseClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_closed_left.fbx",
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_closed_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
|
@ -577,6 +417,290 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"id": "rightHandAnimA",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "rightHandAnimB",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftHandOverlay",
|
||||
"type": "overlay",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"boneSet": "leftHand",
|
||||
"alphaVar": "leftHandOverlayAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftHandStateMachine",
|
||||
"type": "stateMachine",
|
||||
"data": {
|
||||
"currentState": "leftHandAnimNone",
|
||||
"states": [
|
||||
{
|
||||
"id": "leftHandAnimNone",
|
||||
"interpTarget": 1,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "leftHandAnimA", "state": "leftHandAnimA" },
|
||||
{ "var": "leftHandAnimB", "state": "leftHandAnimB" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftHandAnimA",
|
||||
"interpTarget": 1,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "leftHandAnimNone", "state": "leftHandAnimNone" },
|
||||
{ "var": "leftHandAnimB", "state": "leftHandAnimB" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftHandAnimB",
|
||||
"interpTarget": 1,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "leftHandAnimNone", "state": "leftHandAnimNone" },
|
||||
{ "var": "leftHandAnimA", "state": "leftHandAnimA" }
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftHandAnimNone",
|
||||
"type": "stateMachine",
|
||||
"data": {
|
||||
"currentState": "leftHandGrasp",
|
||||
"states": [
|
||||
{
|
||||
"id": "leftHandGrasp",
|
||||
"interpTarget": 3,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftIndexPoint", "state": "leftIndexPoint" },
|
||||
{ "var": "isLeftThumbRaise", "state": "leftThumbRaise" },
|
||||
{ "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPoint",
|
||||
"interpTarget": 15,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftHandGrasp", "state": "leftHandGrasp" },
|
||||
{ "var": "isLeftThumbRaise", "state": "leftThumbRaise" },
|
||||
{ "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftThumbRaise",
|
||||
"interpTarget": 15,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftHandGrasp", "state": "leftHandGrasp" },
|
||||
{ "var": "isLeftIndexPoint", "state": "leftIndexPoint" },
|
||||
{ "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaise",
|
||||
"interpTarget": 15,
|
||||
"interpDuration": 3,
|
||||
"transitions": [
|
||||
{ "var": "isLeftHandGrasp", "state": "leftHandGrasp" },
|
||||
{ "var": "isLeftIndexPoint", "state": "leftIndexPoint" },
|
||||
{ "var": "isLeftThumbRaise", "state": "leftThumbRaise" }
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftHandGrasp",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftHandGraspOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/hydra_pose_open_left.fbx",
|
||||
"startFrame": 0.0,
|
||||
"endFrame": 0.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftHandGraspClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/hydra_pose_closed_left.fbx",
|
||||
"startFrame": 10.0,
|
||||
"endFrame": 10.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPoint",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftIndexPointOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_point_open_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_point_closed_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftThumbRaise",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftThumbRaiseOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_open_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftThumbRaiseClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_closed_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaise",
|
||||
"type": "blendLinear",
|
||||
"data": {
|
||||
"alpha": 0.0,
|
||||
"alphaVar": "leftHandGraspAlpha"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaiseOpen",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftIndexPointAndThumbRaiseClosed",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_closed_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "leftHandAnimA",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"id": "leftHandAnimB",
|
||||
"type": "clip",
|
||||
"data": {
|
||||
"url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx",
|
||||
"startFrame": 15.0,
|
||||
"endFrame": 15.0,
|
||||
"timeScale": 1.0,
|
||||
"loopFlag": true
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "mainStateMachine",
|
||||
"type": "stateMachine",
|
||||
"data": {
|
||||
|
@ -1594,4 +1718,4 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,14 +19,22 @@ class FancyCamera : public Camera {
|
|||
Q_OBJECT
|
||||
|
||||
/**jsdoc
|
||||
* @namespace
|
||||
* @augments Camera
|
||||
*/
|
||||
|
||||
// FIXME: JSDoc 3.5.5 doesn't augment @property definitions. The following definition is repeated in Camera.h.
|
||||
/**jsdoc
|
||||
* @property {Uuid} cameraEntity The ID of the entity that the camera position and orientation follow when the camera is in
|
||||
* entity mode.
|
||||
* The <code>Camera</code> API provides access to the "camera" that defines your view in desktop and HMD display modes.
|
||||
*
|
||||
* @namespace Camera
|
||||
*
|
||||
* @hifi-interface
|
||||
* @hifi-client-entity
|
||||
* @hifi-avatar
|
||||
*
|
||||
* @property {Vec3} position - The position of the camera. You can set this value only when the camera is in independent
|
||||
* mode.
|
||||
* @property {Quat} orientation - The orientation of the camera. You can set this value only when the camera is in
|
||||
* independent mode.
|
||||
* @property {Camera.Mode} mode - The camera mode.
|
||||
* @property {ViewFrustum} frustum - The camera frustum.
|
||||
* @property {Uuid} cameraEntity - The ID of the entity that is used for the camera position and orientation when the
|
||||
* camera is in entity mode.
|
||||
*/
|
||||
Q_PROPERTY(QUuid cameraEntity READ getCameraEntity WRITE setCameraEntity)
|
||||
|
||||
|
@ -38,25 +46,25 @@ public:
|
|||
|
||||
|
||||
public slots:
|
||||
/**jsdoc
|
||||
* Get the ID of the entity that the camera is set to use the position and orientation from when it's in entity mode. You can
|
||||
* also get the entity ID using the <code>Camera.cameraEntity</code> property.
|
||||
* @function Camera.getCameraEntity
|
||||
* @returns {Uuid} The ID of the entity that the camera is set to follow when in entity mode; <code>null</code> if no camera
|
||||
* entity has been set.
|
||||
*/
|
||||
/**jsdoc
|
||||
* Gets the ID of the entity that the camera is set to follow (i.e., use the position and orientation from) when it's in
|
||||
* entity mode. You can also get the entity ID using the {@link Camera|Camera.cameraEntity} property.
|
||||
* @function Camera.getCameraEntity
|
||||
* @returns {Uuid} The ID of the entity that the camera is set to follow when in entity mode; <code>null</code> if no
|
||||
* camera entity has been set.
|
||||
*/
|
||||
QUuid getCameraEntity() const;
|
||||
|
||||
/**jsdoc
|
||||
* Set the entity that the camera should use the position and orientation from when it's in entity mode. You can also set the
|
||||
* entity using the <code>Camera.cameraEntity</code> property.
|
||||
* @function Camera.setCameraEntity
|
||||
* @param {Uuid} entityID The entity that the camera should follow when it's in entity mode.
|
||||
* @example <caption>Move your camera to the position and orientation of the closest entity.</caption>
|
||||
* Camera.setModeString("entity");
|
||||
* var entity = Entities.findClosestEntity(MyAvatar.position, 100.0);
|
||||
* Camera.setCameraEntity(entity);
|
||||
*/
|
||||
* Sets the entity that the camera should follow (i.e., use the position and orientation from) when it's in entity mode.
|
||||
* You can also set the entity using the {@link Camera|Camera.cameraEntity} property.
|
||||
* @function Camera.setCameraEntity
|
||||
* @param {Uuid} entityID - The entity that the camera should follow when it's in entity mode.
|
||||
* @example <caption>Move your camera to the position and orientation of the closest entity.</caption>
|
||||
* Camera.setModeString("entity");
|
||||
* var entity = Entities.findClosestEntity(MyAvatar.position, 100.0);
|
||||
* Camera.setCameraEntity(entity);
|
||||
*/
|
||||
void setCameraEntity(QUuid entityID);
|
||||
|
||||
private:
|
||||
|
|
|
@ -498,8 +498,10 @@ void AvatarManager::handleRemovedAvatar(const AvatarSharedPointer& removedAvatar
|
|||
// on the creation of entities for that avatar instance and the deletion of entities for this instance
|
||||
avatar->removeAvatarEntitiesFromTree();
|
||||
if (removalReason != KillAvatarReason::AvatarDisconnected) {
|
||||
emit AvatarInputs::getInstance()->avatarEnteredIgnoreRadius(avatar->getSessionUUID());
|
||||
emit DependencyManager::get<UsersScriptingInterface>()->enteredIgnoreRadius();
|
||||
if (removalReason == KillAvatarReason::TheirAvatarEnteredYourBubble) {
|
||||
emit AvatarInputs::getInstance()->avatarEnteredIgnoreRadius(avatar->getSessionUUID());
|
||||
emit DependencyManager::get<UsersScriptingInterface>()->enteredIgnoreRadius();
|
||||
}
|
||||
|
||||
workload::Transaction workloadTransaction;
|
||||
workloadTransaction.remove(avatar->getSpaceIndex());
|
||||
|
@ -932,6 +934,18 @@ void AvatarManager::setAvatarSortCoefficient(const QString& name, const QScriptV
|
|||
}
|
||||
}
|
||||
|
||||
/**jsdoc
|
||||
* PAL (People Access List) data for an avatar.
|
||||
* @typedef {object} AvatarManager.PalData
|
||||
* @property {Uuid} sessionUUID - The avatar's session ID. <code>""</code> if the avatar is your own.
|
||||
* @property {string} sessionDisplayName - The avatar's display name, sanitized and versioned, as defined by the avatar mixer.
|
||||
* It is unique among all avatars present in the domain at the time.
|
||||
* @property {number} audioLoudness - The instantaneous loudness of the audio input that the avatar is injecting into the
|
||||
* domain.
|
||||
* @property {boolean} isReplicated - <strong>Deprecated.</strong>
|
||||
* @property {Vec3} position - The position of the avatar.
|
||||
* @property {number} palOrbOffset - The vertical offset from the avatar's position that an overlay orb should be displayed at.
|
||||
*/
|
||||
QVariantMap AvatarManager::getPalData(const QStringList& specificAvatarIdentifiers) {
|
||||
QJsonArray palData;
|
||||
|
||||
|
|
|
@ -37,10 +37,11 @@
|
|||
using SortedAvatar = std::pair<float, std::shared_ptr<Avatar>>;
|
||||
|
||||
/**jsdoc
|
||||
* The AvatarManager API has properties and methods which manage Avatars within the same domain.
|
||||
* The <code>AvatarManager</code> API provides information about avatars within the current domain. The avatars available are
|
||||
* those that Interface has displayed and therefore knows about.
|
||||
*
|
||||
* <p><strong>Note:</strong> This API is also provided to Interface and client entity scripts as the synonym,
|
||||
* <code>AvatarList</code>. For assignment client scripts, see the separate {@link AvatarList} API.
|
||||
* <p><strong>Warning:</strong> This API is also provided to Interface, client entity, and avatar scripts as the synonym,
|
||||
* "<code>AvatarList</code>". For assignment client scripts, see the separate {@link AvatarList} API.</p>
|
||||
*
|
||||
* @namespace AvatarManager
|
||||
*
|
||||
|
@ -48,8 +49,9 @@ using SortedAvatar = std::pair<float, std::shared_ptr<Avatar>>;
|
|||
* @hifi-client-entity
|
||||
* @hifi-avatar
|
||||
*
|
||||
* @borrows AvatarList.getAvatarIdentifiers as getAvatarIdentifiers
|
||||
* @borrows AvatarList.getAvatarsInRange as getAvatarsInRange
|
||||
* @borrows AvatarList.getAvatar as getAvatar
|
||||
* @comment AvatarList.getAvatarIdentifiers as getAvatarIdentifiers - Don't borrow because behavior is slightly different.
|
||||
* @comment AvatarList.getAvatarsInRange as getAvatarsInRange - Don't borrow because behavior is slightly different.
|
||||
* @borrows AvatarList.avatarAddedEvent as avatarAddedEvent
|
||||
* @borrows AvatarList.avatarRemovedEvent as avatarRemovedEvent
|
||||
* @borrows AvatarList.avatarSessionChangedEvent as avatarSessionChangedEvent
|
||||
|
@ -67,6 +69,31 @@ class AvatarManager : public AvatarHashMap {
|
|||
|
||||
public:
|
||||
|
||||
/**jsdoc
|
||||
* Gets the IDs of all avatars known about in the domain.
|
||||
* Your own avatar is included in the list as a <code>null</code> value.
|
||||
* @function AvatarManager.getAvatarIdentifiers
|
||||
* @returns {Uuid[]} The IDs of all known avatars in the domain.
|
||||
* @example <caption>Report the IDS of all avatars within the domain.</caption>
|
||||
* var avatars = AvatarManager.getAvatarIdentifiers();
|
||||
* print("Avatars in the domain: " + JSON.stringify(avatars));
|
||||
* // A null item is included for your avatar.
|
||||
*/
|
||||
|
||||
/**jsdoc
|
||||
* Gets the IDs of all avatars known about within a specified distance from a point.
|
||||
* Your own avatar's ID is included in the list if it is in range.
|
||||
* @function AvatarManager.getAvatarsInRange
|
||||
* @param {Vec3} position - The point about which the search is performed.
|
||||
* @param {number} range - The search radius.
|
||||
* @returns {Uuid[]} The IDs of all known avatars within the search distance from the position.
|
||||
* @example <caption>Report the IDs of all avatars within 10m of your avatar.</caption>
|
||||
* var RANGE = 10;
|
||||
* var avatars = AvatarManager.getAvatarsInRange(MyAvatar.position, RANGE);
|
||||
* print("Nearby avatars: " + JSON.stringify(avatars));
|
||||
* print("Own avatar: " + MyAvatar.sessionUUID);
|
||||
*/
|
||||
|
||||
/// Registers the script types associated with the avatar manager.
|
||||
static void registerMetaTypes(QScriptEngine* engine);
|
||||
|
||||
|
@ -79,9 +106,7 @@ public:
|
|||
glm::vec3 getMyAvatarPosition() const { return _myAvatar->getWorldPosition(); }
|
||||
|
||||
/**jsdoc
|
||||
* @function AvatarManager.getAvatar
|
||||
* @param {Uuid} avatarID
|
||||
* @returns {AvatarData}
|
||||
* @comment Uses the base class's JSDoc.
|
||||
*/
|
||||
// Null/Default-constructed QUuids will return MyAvatar
|
||||
Q_INVOKABLE virtual ScriptAvatarData* getAvatar(QUuid avatarID) override { return new ScriptAvatar(getAvatarBySessionID(avatarID)); }
|
||||
|
@ -112,36 +137,53 @@ public:
|
|||
void handleCollisionEvents(const CollisionEvents& collisionEvents);
|
||||
|
||||
/**jsdoc
|
||||
* Gets the amount of avatar mixer data being generated by an avatar other than your own.
|
||||
* @function AvatarManager.getAvatarDataRate
|
||||
* @param {Uuid} sessionID
|
||||
* @param {string} [rateName=""]
|
||||
* @returns {number}
|
||||
* @param {Uuid} sessionID - The ID of the avatar whose data rate you're retrieving.
|
||||
* @param {AvatarDataRate} [rateName=""] - The type of avatar mixer data to get the data rate of.
|
||||
* @returns {number} The data rate in kbps; <code>0</code> if the avatar is your own.
|
||||
*/
|
||||
Q_INVOKABLE float getAvatarDataRate(const QUuid& sessionID, const QString& rateName = QString("")) const;
|
||||
|
||||
/**jsdoc
|
||||
* Gets the update rate of avatar mixer data being generated by an avatar other than your own.
|
||||
* @function AvatarManager.getAvatarUpdateRate
|
||||
* @param {Uuid} sessionID
|
||||
* @param {string} [rateName=""]
|
||||
* @returns {number}
|
||||
* @param {Uuid} sessionID - The ID of the avatar whose update rate you're retrieving.
|
||||
* @param {AvatarUpdateRate} [rateName=""] - The type of avatar mixer data to get the update rate of.
|
||||
* @returns {number} The update rate in Hz; <code>0</code> if the avatar is your own.
|
||||
*/
|
||||
Q_INVOKABLE float getAvatarUpdateRate(const QUuid& sessionID, const QString& rateName = QString("")) const;
|
||||
|
||||
/**jsdoc
|
||||
* Gets the simulation rate of an avatar other than your own.
|
||||
* @function AvatarManager.getAvatarSimulationRate
|
||||
* @param {Uuid} sessionID
|
||||
* @param {string} [rateName=""]
|
||||
* @returns {number}
|
||||
* @param {Uuid} sessionID - The ID of the avatar whose simulation you're retrieving.
|
||||
* @param {AvatarSimulationRate} [rateName=""] - The type of avatar data to get the simulation rate of.
|
||||
* @returns {number} The simulation rate in Hz; <code>0</code> if the avatar is your own.
|
||||
*/
|
||||
Q_INVOKABLE float getAvatarSimulationRate(const QUuid& sessionID, const QString& rateName = QString("")) const;
|
||||
|
||||
/**jsdoc
|
||||
* Find the first avatar intersected by a {@link PickRay}.
|
||||
* @function AvatarManager.findRayIntersection
|
||||
* @param {PickRay} ray
|
||||
* @param {Uuid[]} [avatarsToInclude=[]]
|
||||
* @param {Uuid[]} [avatarsToDiscard=[]]
|
||||
* @param {boolean} pickAgainstMesh
|
||||
* @returns {RayToAvatarIntersectionResult}
|
||||
* @param {PickRay} ray - The ray to use for finding avatars.
|
||||
* @param {Uuid[]} [avatarsToInclude=[]] - If not empty then search is restricted to these avatars.
|
||||
* @param {Uuid[]} [avatarsToDiscard=[]] - Avatars to ignore in the search.
|
||||
* @param {boolean} [pickAgainstMesh=true] - If <code>true</code> then the exact intersection with the avatar mesh is
|
||||
* calculated, if <code>false</code> then the intersection is approximate.
|
||||
* @returns {RayToAvatarIntersectionResult} The result of the search for the first intersected avatar.
|
||||
* @example <caption>Find the first avatar directly in front of you.</caption>
|
||||
* var pickRay = {
|
||||
* origin: MyAvatar.position,
|
||||
* direction: Quat.getFront(MyAvatar.orientation)
|
||||
* };
|
||||
*
|
||||
* var intersection = AvatarManager.findRayIntersection(pickRay);
|
||||
* if (intersection.intersects) {
|
||||
* print("Avatar found: " + JSON.stringify(intersection));
|
||||
* } else {
|
||||
* print("No avatar found.");
|
||||
* }
|
||||
*/
|
||||
Q_INVOKABLE RayToAvatarIntersectionResult findRayIntersection(const PickRay& ray,
|
||||
const QScriptValue& avatarIdsToInclude = QScriptValue(),
|
||||
|
@ -149,11 +191,12 @@ public:
|
|||
bool pickAgainstMesh = true);
|
||||
/**jsdoc
|
||||
* @function AvatarManager.findRayIntersectionVector
|
||||
* @param {PickRay} ray
|
||||
* @param {Uuid[]} avatarsToInclude
|
||||
* @param {Uuid[]} avatarsToDiscard
|
||||
* @param {boolean} pickAgainstMesh
|
||||
* @returns {RayToAvatarIntersectionResult}
|
||||
* @param {PickRay} ray - Ray.
|
||||
* @param {Uuid[]} avatarsToInclude - Avatars to include.
|
||||
* @param {Uuid[]} avatarsToDiscard - Avatars to discard.
|
||||
* @param {boolean} pickAgainstMesh - Pick against mesh.
|
||||
* @returns {RayToAvatarIntersectionResult} Intersection result.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
Q_INVOKABLE RayToAvatarIntersectionResult findRayIntersectionVector(const PickRay& ray,
|
||||
const QVector<EntityItemID>& avatarsToInclude,
|
||||
|
@ -162,10 +205,11 @@ public:
|
|||
|
||||
/**jsdoc
|
||||
* @function AvatarManager.findParabolaIntersectionVector
|
||||
* @param {PickParabola} pick
|
||||
* @param {Uuid[]} avatarsToInclude
|
||||
* @param {Uuid[]} avatarsToDiscard
|
||||
* @returns {ParabolaToAvatarIntersectionResult}
|
||||
* @param {PickParabola} pick - Pick.
|
||||
* @param {Uuid[]} avatarsToInclude - Avatars to include.
|
||||
* @param {Uuid[]} avatarsToDiscard - Avatars to discard.
|
||||
* @returns {ParabolaToAvatarIntersectionResult} Intersection result.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
Q_INVOKABLE ParabolaToAvatarIntersectionResult findParabolaIntersectionVector(const PickParabola& pick,
|
||||
const QVector<EntityItemID>& avatarsToInclude,
|
||||
|
@ -173,27 +217,31 @@ public:
|
|||
|
||||
/**jsdoc
|
||||
* @function AvatarManager.getAvatarSortCoefficient
|
||||
* @param {string} name
|
||||
* @returns {number}
|
||||
* @param {string} name - Name.
|
||||
* @returns {number} Value.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
// TODO: remove this HACK once we settle on optimal default sort coefficients
|
||||
Q_INVOKABLE float getAvatarSortCoefficient(const QString& name);
|
||||
|
||||
/**jsdoc
|
||||
* @function AvatarManager.setAvatarSortCoefficient
|
||||
* @param {string} name
|
||||
* @param {number} value
|
||||
* @param {string} name - Name
|
||||
* @param {number} value - Value.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
Q_INVOKABLE void setAvatarSortCoefficient(const QString& name, const QScriptValue& value);
|
||||
|
||||
/**jsdoc
|
||||
* Used in the PAL for getting PAL-related data about avatars nearby. Using this method is faster
|
||||
* than iterating over each avatar and obtaining data about them in JavaScript, as that method
|
||||
* locks and unlocks each avatar's data structure potentially hundreds of times per update tick.
|
||||
* Gets PAL (People Access List) data for one or more avatars. Using this method is faster than iterating over each avatar
|
||||
* and obtaining data about each individually.
|
||||
* @function AvatarManager.getPalData
|
||||
* @param {string[]} [specificAvatarIdentifiers=[]] - The list of IDs of the avatars you want the PAL data for.
|
||||
* If an empty list, the PAL data for all nearby avatars is returned.
|
||||
* @returns {object[]} An array of objects, each object being the PAL data for an avatar.
|
||||
* @param {string[]} [avatarIDs=[]] - The IDs of the avatars to get the PAL data for. If empty, then PAL data is obtained
|
||||
* for all avatars.
|
||||
* @returns {object<"data", AvatarManager.PalData[]>} An array of objects, each object being the PAL data for an avatar.
|
||||
* @example <caption>Report the PAL data for an avatar nearby.</caption>
|
||||
* var palData = AvatarManager.getPalData();
|
||||
* print("PAL data for one avatar: " + JSON.stringify(palData.data[0]));
|
||||
*/
|
||||
Q_INVOKABLE QVariantMap getPalData(const QStringList& specificAvatarIdentifiers = QStringList());
|
||||
|
||||
|
@ -209,7 +257,8 @@ public:
|
|||
public slots:
|
||||
/**jsdoc
|
||||
* @function AvatarManager.updateAvatarRenderStatus
|
||||
* @param {boolean} shouldRenderAvatars
|
||||
* @param {boolean} shouldRenderAvatars - Should render avatars.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
void updateAvatarRenderStatus(bool shouldRenderAvatars);
|
||||
|
||||
|
|
|
@ -1199,6 +1199,15 @@ void MyAvatar::overrideAnimation(const QString& url, float fps, bool loop, float
|
|||
_skeletonModel->getRig().overrideAnimation(url, fps, loop, firstFrame, lastFrame);
|
||||
}
|
||||
|
||||
void MyAvatar::overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame) {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QMetaObject::invokeMethod(this, "overrideHandAnimation", Q_ARG(bool, isLeft), Q_ARG(const QString&, url), Q_ARG(float, fps),
|
||||
Q_ARG(bool, loop), Q_ARG(float, firstFrame), Q_ARG(float, lastFrame));
|
||||
return;
|
||||
}
|
||||
_skeletonModel->getRig().overrideHandAnimation(isLeft, url, fps, loop, firstFrame, lastFrame);
|
||||
}
|
||||
|
||||
void MyAvatar::restoreAnimation() {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QMetaObject::invokeMethod(this, "restoreAnimation");
|
||||
|
@ -1207,6 +1216,14 @@ void MyAvatar::restoreAnimation() {
|
|||
_skeletonModel->getRig().restoreAnimation();
|
||||
}
|
||||
|
||||
void MyAvatar::restoreHandAnimation(bool isLeft) {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QMetaObject::invokeMethod(this, "restoreHandAnimation", Q_ARG(bool, isLeft));
|
||||
return;
|
||||
}
|
||||
_skeletonModel->getRig().restoreHandAnimation(isLeft);
|
||||
}
|
||||
|
||||
QStringList MyAvatar::getAnimationRoles() {
|
||||
if (QThread::currentThread() != thread()) {
|
||||
QStringList result;
|
||||
|
@ -3172,17 +3189,40 @@ int MyAvatar::sendAvatarDataPacket(bool sendAll) {
|
|||
return bytesSent;
|
||||
}
|
||||
|
||||
const float RENDER_HEAD_CUTOFF_DISTANCE = 0.47f;
|
||||
|
||||
bool MyAvatar::cameraInsideHead(const glm::vec3& cameraPosition) const {
|
||||
if (!_skeletonModel) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// transform cameraPosition into rig coordinates
|
||||
AnimPose rigToWorld = AnimPose(getWorldOrientation() * Quaternions::Y_180, getWorldPosition());
|
||||
AnimPose worldToRig = rigToWorld.inverse();
|
||||
glm::vec3 rigCameraPosition = worldToRig * cameraPosition;
|
||||
|
||||
// use head k-dop shape to determine if camera is inside head.
|
||||
const Rig& rig = _skeletonModel->getRig();
|
||||
int headJointIndex = rig.indexOfJoint("Head");
|
||||
if (headJointIndex >= 0) {
|
||||
const HFMModel& hfmModel = _skeletonModel->getHFMModel();
|
||||
AnimPose headPose;
|
||||
if (rig.getAbsoluteJointPoseInRigFrame(headJointIndex, headPose)) {
|
||||
glm::vec3 displacement;
|
||||
const HFMJointShapeInfo& headShapeInfo = hfmModel.joints[headJointIndex].shapeInfo;
|
||||
return findPointKDopDisplacement(rigCameraPosition, headPose, headShapeInfo, displacement);
|
||||
}
|
||||
}
|
||||
|
||||
// fall back to simple distance check.
|
||||
const float RENDER_HEAD_CUTOFF_DISTANCE = 0.47f;
|
||||
return glm::length(cameraPosition - getHeadPosition()) < (RENDER_HEAD_CUTOFF_DISTANCE * getModelScale());
|
||||
}
|
||||
|
||||
bool MyAvatar::shouldRenderHead(const RenderArgs* renderArgs) const {
|
||||
bool defaultMode = renderArgs->_renderMode == RenderArgs::DEFAULT_RENDER_MODE;
|
||||
bool firstPerson = qApp->getCamera().getMode() == CAMERA_MODE_FIRST_PERSON;
|
||||
bool overrideAnim = _skeletonModel ? _skeletonModel->getRig().isPlayingOverrideAnimation() : false;
|
||||
bool insideHead = cameraInsideHead(renderArgs->getViewFrustum().getPosition());
|
||||
return !defaultMode || !firstPerson || !insideHead;
|
||||
return !defaultMode || (!firstPerson && !insideHead) || (overrideAnim && !insideHead);
|
||||
}
|
||||
|
||||
void MyAvatar::setHasScriptedBlendshapes(bool hasScriptedBlendshapes) {
|
||||
|
@ -4798,7 +4838,12 @@ bool MyAvatar::isReadyForPhysics() const {
|
|||
}
|
||||
|
||||
void MyAvatar::setSprintMode(bool sprint) {
|
||||
_walkSpeedScalar = sprint ? AVATAR_SPRINT_SPEED_SCALAR : AVATAR_WALK_SPEED_SCALAR;
|
||||
if (qApp->isHMDMode()) {
|
||||
_walkSpeedScalar = sprint ? AVATAR_DESKTOP_SPRINT_SPEED_SCALAR : AVATAR_WALK_SPEED_SCALAR;
|
||||
}
|
||||
else {
|
||||
_walkSpeedScalar = sprint ? AVATAR_HMD_SPRINT_SPEED_SCALAR : AVATAR_WALK_SPEED_SCALAR;
|
||||
}
|
||||
}
|
||||
|
||||
void MyAvatar::setIsInWalkingState(bool isWalking) {
|
||||
|
|
|
@ -597,6 +597,26 @@ public:
|
|||
*/
|
||||
Q_INVOKABLE void overrideAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame);
|
||||
|
||||
/**jsdoc
|
||||
* <code>overrideHandAnimation()</code> Gets the overrides the default hand poses that are triggered with controller buttons.
|
||||
* use {@link MyAvatar.restoreHandAnimation}.</p> to restore the default poses.
|
||||
* @function MyAvatar.overrideHandAnimation
|
||||
* @param isLeft {boolean} Set true if using the left hand
|
||||
* @param url {string} The URL to the animation file. Animation files need to be FBX format, but only need to contain the
|
||||
* avatar skeleton and animation data.
|
||||
* @param fps {number} The frames per second (FPS) rate for the animation playback. 30 FPS is normal speed.
|
||||
* @param loop {boolean} Set to true if the animation should loop.
|
||||
* @param firstFrame {number} The frame the animation should start at.
|
||||
* @param lastFrame {number} The frame the animation should end at
|
||||
* @example <caption> Override left hand animation for three seconds. </caption>
|
||||
* // Override the left hand pose then restore the default pose.
|
||||
* MyAvatar.overrideHandAnimation(isLeft, ANIM_URL, 30, true, 0, 53);
|
||||
* Script.setTimeout(function () {
|
||||
* MyAvatar.restoreHandAnimation();
|
||||
* }, 3000);
|
||||
*/
|
||||
Q_INVOKABLE void overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame);
|
||||
|
||||
/**jsdoc
|
||||
* Restores the default animations.
|
||||
* <p>The avatar animation system includes a set of default animations along with rules for how those animations are blended
|
||||
|
@ -615,6 +635,24 @@ public:
|
|||
*/
|
||||
Q_INVOKABLE void restoreAnimation();
|
||||
|
||||
/**jsdoc
|
||||
* Restores the default hand animation state machine that is driven by the state machine in the avatar-animation json.
|
||||
* <p>The avatar animation system includes a set of default animations along with rules for how those animations are blended
|
||||
* together with procedural data (such as look at vectors, hand sensors etc.). Playing your own custom animations will
|
||||
* override the default animations. <code>restoreHandAnimation()</code> is used to restore the default hand poses
|
||||
* If you aren't currently playing an override hand
|
||||
* animation, this function has no effect.</p>
|
||||
* @function MyAvatar.restoreHandAnimation
|
||||
* @param isLeft {boolean} Set to true if using the left hand
|
||||
* @example <caption> Override left hand animation for three seconds. </caption>
|
||||
* // Override the left hand pose then restore the default pose.
|
||||
* MyAvatar.overrideHandAnimation(isLeft, ANIM_URL, 30, true, 0, 53);
|
||||
* Script.setTimeout(function () {
|
||||
* MyAvatar.restoreHandAnimation();
|
||||
* }, 3000);
|
||||
*/
|
||||
Q_INVOKABLE void restoreHandAnimation(bool isLeft);
|
||||
|
||||
/**jsdoc
|
||||
* Gets the current animation roles.
|
||||
* <p>Each avatar has an avatar-animation.json file that defines which animations are used and how they are blended together
|
||||
|
|
|
@ -334,7 +334,9 @@ void MySkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) {
|
|||
eyeParams.leftEyeJointIndex = _rig.indexOfJoint("LeftEye");
|
||||
eyeParams.rightEyeJointIndex = _rig.indexOfJoint("RightEye");
|
||||
|
||||
_rig.updateFromEyeParameters(eyeParams);
|
||||
if (_owningAvatar->getHasProceduralEyeFaceMovement()) {
|
||||
_rig.updateFromEyeParameters(eyeParams);
|
||||
}
|
||||
|
||||
updateFingers();
|
||||
}
|
||||
|
|
|
@ -96,28 +96,32 @@ int passwordCallback(char* password, int maxPasswordSize, int rwFlag, void* u) {
|
|||
}
|
||||
}
|
||||
|
||||
EC_KEY* readKeys(const char* filename) {
|
||||
FILE* fp;
|
||||
EC_KEY *key = NULL;
|
||||
if ((fp = fopen(filename, "rt"))) {
|
||||
EC_KEY* readKeys(QString filename) {
|
||||
QFile file(filename);
|
||||
EC_KEY* key = NULL;
|
||||
if (file.open(QFile::ReadOnly)) {
|
||||
// file opened successfully
|
||||
qCDebug(commerce) << "opened key file" << filename;
|
||||
|
||||
if ((key = PEM_read_EC_PUBKEY(fp, NULL, NULL, NULL))) {
|
||||
QByteArray pemKeyBytes = file.readAll();
|
||||
BIO* bufio = BIO_new_mem_buf((void*)pemKeyBytes.constData(), pemKeyBytes.length());
|
||||
if ((key = PEM_read_bio_EC_PUBKEY(bufio, NULL, NULL, NULL))) {
|
||||
// now read private key
|
||||
|
||||
qCDebug(commerce) << "read public key";
|
||||
|
||||
if ((key = PEM_read_ECPrivateKey(fp, &key, passwordCallback, NULL))) {
|
||||
if ((key = PEM_read_bio_ECPrivateKey(bufio, &key, passwordCallback, NULL))) {
|
||||
qCDebug(commerce) << "read private key";
|
||||
fclose(fp);
|
||||
return key;
|
||||
BIO_free(bufio);
|
||||
file.close();
|
||||
} else {
|
||||
qCDebug(commerce) << "failed to read private key";
|
||||
}
|
||||
qCDebug(commerce) << "failed to read private key";
|
||||
} else {
|
||||
qCDebug(commerce) << "failed to read public key";
|
||||
}
|
||||
fclose(fp);
|
||||
BIO_free(bufio);
|
||||
file.close();
|
||||
} else {
|
||||
qCDebug(commerce) << "failed to open key file" << filename;
|
||||
}
|
||||
|
@ -131,8 +135,7 @@ bool Wallet::writeBackupInstructions() {
|
|||
QFile outputFile(outputFilename);
|
||||
bool retval = false;
|
||||
|
||||
if (getKeyFilePath().isEmpty())
|
||||
{
|
||||
if (getKeyFilePath().isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -152,7 +155,7 @@ bool Wallet::writeBackupInstructions() {
|
|||
outputFile.write(text.toUtf8());
|
||||
|
||||
// Close the output file
|
||||
outputFile.close();
|
||||
outputFile.close();
|
||||
|
||||
retval = true;
|
||||
qCDebug(commerce) << "wrote html file successfully";
|
||||
|
@ -165,28 +168,35 @@ bool Wallet::writeBackupInstructions() {
|
|||
return retval;
|
||||
}
|
||||
|
||||
bool writeKeys(const char* filename, EC_KEY* keys) {
|
||||
FILE* fp;
|
||||
bool writeKeys(QString filename, EC_KEY* keys) {
|
||||
BIO* bio = BIO_new(BIO_s_mem());
|
||||
bool retval = false;
|
||||
if ((fp = fopen(filename, "wt"))) {
|
||||
if (!PEM_write_EC_PUBKEY(fp, keys)) {
|
||||
fclose(fp);
|
||||
qCCritical(commerce) << "failed to write public key";
|
||||
return retval;
|
||||
}
|
||||
if (!PEM_write_bio_EC_PUBKEY(bio, keys)) {
|
||||
BIO_free(bio);
|
||||
qCCritical(commerce) << "failed to write public key";
|
||||
return retval;
|
||||
}
|
||||
|
||||
if (!PEM_write_ECPrivateKey(fp, keys, EVP_des_ede3_cbc(), NULL, 0, passwordCallback, NULL)) {
|
||||
fclose(fp);
|
||||
qCCritical(commerce) << "failed to write private key";
|
||||
return retval;
|
||||
}
|
||||
if (!PEM_write_bio_ECPrivateKey(bio, keys, EVP_des_ede3_cbc(), NULL, 0, passwordCallback, NULL)) {
|
||||
BIO_free(bio);
|
||||
qCCritical(commerce) << "failed to write private key";
|
||||
return retval;
|
||||
}
|
||||
|
||||
QFile file(filename);
|
||||
if (!file.open(QIODevice::WriteOnly)) {
|
||||
const char* bio_data;
|
||||
long bio_size = BIO_get_mem_data(bio, &bio_data);
|
||||
|
||||
QByteArray keyBytes(bio_data, bio_size);
|
||||
file.write(keyBytes);
|
||||
retval = true;
|
||||
qCDebug(commerce) << "wrote keys successfully";
|
||||
fclose(fp);
|
||||
file.close();
|
||||
} else {
|
||||
qCDebug(commerce) << "failed to open key file" << filename;
|
||||
}
|
||||
BIO_free(bio);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -215,7 +225,6 @@ QByteArray Wallet::getWallet() {
|
|||
}
|
||||
|
||||
QPair<QByteArray*, QByteArray*> generateECKeypair() {
|
||||
|
||||
EC_KEY* keyPair = EC_KEY_new_by_curve_name(NID_secp256k1);
|
||||
QPair<QByteArray*, QByteArray*> retval{};
|
||||
|
||||
|
@ -235,7 +244,6 @@ QPair<QByteArray*, QByteArray*> generateECKeypair() {
|
|||
if (publicKeyLength <= 0 || privateKeyLength <= 0) {
|
||||
qCDebug(commerce) << "Error getting DER public or private key from EC struct -" << ERR_get_error();
|
||||
|
||||
|
||||
// cleanup the EC struct
|
||||
EC_KEY_free(keyPair);
|
||||
|
||||
|
@ -251,8 +259,7 @@ QPair<QByteArray*, QByteArray*> generateECKeypair() {
|
|||
return retval;
|
||||
}
|
||||
|
||||
|
||||
if (!writeKeys(keyFilePath().toStdString().c_str(), keyPair)) {
|
||||
if (!writeKeys(keyFilePath(), keyPair)) {
|
||||
qCDebug(commerce) << "couldn't save keys!";
|
||||
return retval;
|
||||
}
|
||||
|
@ -273,13 +280,18 @@ QPair<QByteArray*, QByteArray*> generateECKeypair() {
|
|||
// END copied code (which will soon change)
|
||||
|
||||
// the public key can just go into a byte array
|
||||
QByteArray readPublicKey(const char* filename) {
|
||||
FILE* fp;
|
||||
EC_KEY* key = NULL;
|
||||
if ((fp = fopen(filename, "r"))) {
|
||||
QByteArray readPublicKey(QString filename) {
|
||||
QByteArray retval;
|
||||
QFile file(filename);
|
||||
if (file.open(QIODevice::ReadOnly)) {
|
||||
// file opened successfully
|
||||
qCDebug(commerce) << "opened key file" << filename;
|
||||
if ((key = PEM_read_EC_PUBKEY(fp, NULL, NULL, NULL))) {
|
||||
|
||||
QByteArray pemKeyBytes = file.readAll();
|
||||
BIO* bufio = BIO_new_mem_buf((void*)pemKeyBytes.constData(), pemKeyBytes.length());
|
||||
|
||||
EC_KEY* key = PEM_read_bio_EC_PUBKEY(bufio, NULL, NULL, NULL);
|
||||
if (key) {
|
||||
// file read successfully
|
||||
unsigned char* publicKeyDER = NULL;
|
||||
int publicKeyLength = i2d_EC_PUBKEY(key, &publicKeyDER);
|
||||
|
@ -287,17 +299,19 @@ QByteArray readPublicKey(const char* filename) {
|
|||
|
||||
// cleanup
|
||||
EC_KEY_free(key);
|
||||
fclose(fp);
|
||||
|
||||
qCDebug(commerce) << "parsed public key file successfully";
|
||||
|
||||
QByteArray retval((char*)publicKeyDER, publicKeyLength);
|
||||
OPENSSL_free(publicKeyDER);
|
||||
BIO_free(bufio);
|
||||
file.close();
|
||||
return retval;
|
||||
} else {
|
||||
qCDebug(commerce) << "couldn't parse" << filename;
|
||||
}
|
||||
fclose(fp);
|
||||
BIO_free(bufio);
|
||||
file.close();
|
||||
} else {
|
||||
qCDebug(commerce) << "couldn't open" << filename;
|
||||
}
|
||||
|
@ -306,13 +320,17 @@ QByteArray readPublicKey(const char* filename) {
|
|||
|
||||
// the private key should be read/copied into heap memory. For now, we need the EC_KEY struct
|
||||
// so I'll return that.
|
||||
EC_KEY* readPrivateKey(const char* filename) {
|
||||
FILE* fp;
|
||||
EC_KEY* readPrivateKey(QString filename) {
|
||||
QFile file(filename);
|
||||
EC_KEY* key = NULL;
|
||||
if ((fp = fopen(filename, "r"))) {
|
||||
if (file.open(QIODevice::ReadOnly)) {
|
||||
// file opened successfully
|
||||
qCDebug(commerce) << "opened key file" << filename;
|
||||
if ((key = PEM_read_ECPrivateKey(fp, &key, passwordCallback, NULL))) {
|
||||
|
||||
QByteArray pemKeyBytes = file.readAll();
|
||||
BIO* bufio = BIO_new_mem_buf((void*)pemKeyBytes.constData(), pemKeyBytes.length());
|
||||
|
||||
if ((key = PEM_read_bio_ECPrivateKey(bufio, &key, passwordCallback, NULL))) {
|
||||
qCDebug(commerce) << "parsed private key file successfully";
|
||||
|
||||
} else {
|
||||
|
@ -320,7 +338,8 @@ EC_KEY* readPrivateKey(const char* filename) {
|
|||
// if the passphrase is wrong, then let's not cache it
|
||||
DependencyManager::get<Wallet>()->setPassphrase("");
|
||||
}
|
||||
fclose(fp);
|
||||
BIO_free(bufio);
|
||||
file.close();
|
||||
} else {
|
||||
qCDebug(commerce) << "couldn't open" << filename;
|
||||
}
|
||||
|
@ -361,7 +380,7 @@ Wallet::Wallet() {
|
|||
if (wallet->getKeyFilePath().isEmpty() || !wallet->getSecurityImage()) {
|
||||
if (keyStatus == "preexisting") {
|
||||
status = (uint) WalletStatus::WALLET_STATUS_PREEXISTING;
|
||||
} else{
|
||||
} else {
|
||||
status = (uint) WalletStatus::WALLET_STATUS_NOT_SET_UP;
|
||||
}
|
||||
} else if (!wallet->walletIsAuthenticatedWithPassphrase()) {
|
||||
|
@ -371,7 +390,6 @@ Wallet::Wallet() {
|
|||
} else {
|
||||
status = (uint) WalletStatus::WALLET_STATUS_READY;
|
||||
}
|
||||
|
||||
walletScriptingInterface->setWalletStatus(status);
|
||||
});
|
||||
|
||||
|
@ -569,10 +587,10 @@ bool Wallet::walletIsAuthenticatedWithPassphrase() {
|
|||
}
|
||||
|
||||
// otherwise, we have a passphrase but no keys, so we have to check
|
||||
auto publicKey = readPublicKey(keyFilePath().toStdString().c_str());
|
||||
auto publicKey = readPublicKey(keyFilePath());
|
||||
|
||||
if (publicKey.size() > 0) {
|
||||
if (auto key = readPrivateKey(keyFilePath().toStdString().c_str())) {
|
||||
if (auto key = readPrivateKey(keyFilePath())) {
|
||||
EC_KEY_free(key);
|
||||
|
||||
// be sure to add the public key so we don't do this over and over
|
||||
|
@ -631,8 +649,7 @@ QStringList Wallet::listPublicKeys() {
|
|||
QString Wallet::signWithKey(const QByteArray& text, const QString& key) {
|
||||
EC_KEY* ecPrivateKey = NULL;
|
||||
|
||||
auto keyFilePathString = keyFilePath().toStdString();
|
||||
if ((ecPrivateKey = readPrivateKey(keyFilePath().toStdString().c_str()))) {
|
||||
if ((ecPrivateKey = readPrivateKey(keyFilePath()))) {
|
||||
unsigned char* sig = new unsigned char[ECDSA_size(ecPrivateKey)];
|
||||
|
||||
unsigned int signatureBytes = 0;
|
||||
|
@ -641,12 +658,8 @@ QString Wallet::signWithKey(const QByteArray& text, const QString& key) {
|
|||
|
||||
QByteArray hashedPlaintext = QCryptographicHash::hash(text, QCryptographicHash::Sha256);
|
||||
|
||||
|
||||
int retrn = ECDSA_sign(0,
|
||||
reinterpret_cast<const unsigned char*>(hashedPlaintext.constData()),
|
||||
hashedPlaintext.size(),
|
||||
sig,
|
||||
&signatureBytes, ecPrivateKey);
|
||||
int retrn = ECDSA_sign(0, reinterpret_cast<const unsigned char*>(hashedPlaintext.constData()), hashedPlaintext.size(),
|
||||
sig, &signatureBytes, ecPrivateKey);
|
||||
|
||||
EC_KEY_free(ecPrivateKey);
|
||||
QByteArray signature(reinterpret_cast<const char*>(sig), signatureBytes);
|
||||
|
@ -682,7 +695,6 @@ void Wallet::updateImageProvider() {
|
|||
}
|
||||
|
||||
void Wallet::chooseSecurityImage(const QString& filename) {
|
||||
|
||||
if (_securityImage) {
|
||||
delete _securityImage;
|
||||
}
|
||||
|
@ -754,7 +766,7 @@ QString Wallet::getKeyFilePath() {
|
|||
}
|
||||
|
||||
bool Wallet::writeWallet(const QString& newPassphrase) {
|
||||
EC_KEY* keys = readKeys(keyFilePath().toStdString().c_str());
|
||||
EC_KEY* keys = readKeys(keyFilePath());
|
||||
auto ledger = DependencyManager::get<Ledger>();
|
||||
// Remove any existing locker, because it will be out of date.
|
||||
if (!_publicKeys.isEmpty() && !ledger->receiveAt(_publicKeys.first(), _publicKeys.first(), QByteArray())) {
|
||||
|
@ -768,7 +780,7 @@ bool Wallet::writeWallet(const QString& newPassphrase) {
|
|||
setPassphrase(newPassphrase);
|
||||
}
|
||||
|
||||
if (writeKeys(tempFileName.toStdString().c_str(), keys)) {
|
||||
if (writeKeys(tempFileName, keys)) {
|
||||
if (writeSecurityImage(_securityImage, tempFileName)) {
|
||||
// ok, now move the temp file to the correct spot
|
||||
QFile(QString(keyFilePath())).remove();
|
||||
|
@ -834,10 +846,10 @@ void Wallet::handleChallengeOwnershipPacket(QSharedPointer<ReceivedMessage> pack
|
|||
challengingNodeUUID = packet->read(challengingNodeUUIDByteArraySize);
|
||||
}
|
||||
|
||||
EC_KEY* ec = readKeys(keyFilePath().toStdString().c_str());
|
||||
EC_KEY* ec = readKeys(keyFilePath());
|
||||
QString sig;
|
||||
|
||||
if (ec) {
|
||||
if (ec) {
|
||||
ERR_clear_error();
|
||||
sig = signWithKey(text, ""); // base64 signature, QByteArray cast (on return) to QString FIXME should pass ec as string so we can tell which key to sign with
|
||||
status = 1;
|
||||
|
|
|
@ -233,16 +233,19 @@ PointerEvent LaserPointer::buildPointerEvent(const PickedObject& target, const P
|
|||
|
||||
// If we just started triggering and we haven't moved too much, don't update intersection and pos2D
|
||||
TriggerState& state = hover ? _latestState : _states[button];
|
||||
float sensorToWorldScale = DependencyManager::get<AvatarManager>()->getMyAvatar()->getSensorToWorldScale();
|
||||
float deadspotSquared = TOUCH_PRESS_TO_MOVE_DEADSPOT_SQUARED * sensorToWorldScale * sensorToWorldScale;
|
||||
bool withinDeadspot = usecTimestampNow() - state.triggerStartTime < POINTER_MOVE_DELAY && glm::distance2(pos2D, state.triggerPos2D) < deadspotSquared;
|
||||
if ((state.triggering || state.wasTriggering) && !state.deadspotExpired && withinDeadspot) {
|
||||
pos2D = state.triggerPos2D;
|
||||
intersection = state.intersection;
|
||||
surfaceNormal = state.surfaceNormal;
|
||||
}
|
||||
if (!withinDeadspot) {
|
||||
state.deadspotExpired = true;
|
||||
auto avatar = DependencyManager::get<AvatarManager>()->getMyAvatar();
|
||||
if (avatar) {
|
||||
float sensorToWorldScale = avatar->getSensorToWorldScale();
|
||||
float deadspotSquared = TOUCH_PRESS_TO_MOVE_DEADSPOT_SQUARED * sensorToWorldScale * sensorToWorldScale;
|
||||
bool withinDeadspot = usecTimestampNow() - state.triggerStartTime < POINTER_MOVE_DELAY && glm::distance2(pos2D, state.triggerPos2D) < deadspotSquared;
|
||||
if ((state.triggering || state.wasTriggering) && !state.deadspotExpired && withinDeadspot) {
|
||||
pos2D = state.triggerPos2D;
|
||||
intersection = state.intersection;
|
||||
surfaceNormal = state.surfaceNormal;
|
||||
}
|
||||
if (!withinDeadspot) {
|
||||
state.deadspotExpired = true;
|
||||
}
|
||||
}
|
||||
|
||||
return PointerEvent(pos2D, intersection, surfaceNormal, direction);
|
||||
|
|
|
@ -128,7 +128,7 @@ const AnimPoseVec& AnimTwoBoneIK::evaluate(const AnimVariantMap& animVars, const
|
|||
|
||||
if (triggersOut.hasKey(endEffectorPositionVar)) {
|
||||
targetPose.trans() = triggersOut.lookupRigToGeometry(endEffectorPositionVar, tipPose.trans());
|
||||
} else if (animVars.hasKey(endEffectorRotationVar)) {
|
||||
} else if (animVars.hasKey(endEffectorPositionVar)) {
|
||||
targetPose.trans() = animVars.lookupRigToGeometry(endEffectorPositionVar, tipPose.trans());
|
||||
}
|
||||
|
||||
|
@ -147,9 +147,11 @@ const AnimPoseVec& AnimTwoBoneIK::evaluate(const AnimVariantMap& animVars, const
|
|||
|
||||
// http://mathworld.wolfram.com/Circle-CircleIntersection.html
|
||||
float midAngle = 0.0f;
|
||||
if (d < r0 + r1) {
|
||||
if ((d < r0 + r1) && (d > 0.0f) && (r0 > 0.0f) && (r1 > 0.0f)) {
|
||||
float y = sqrtf((-d + r1 - r0) * (-d - r1 + r0) * (-d + r1 + r0) * (d + r1 + r0)) / (2.0f * d);
|
||||
midAngle = PI - (acosf(y / r0) + acosf(y / r1));
|
||||
float yR0Quotient = glm::clamp(y / r0, -1.0f, 1.0f);
|
||||
float yR1Quotient = glm::clamp(y / r1, -1.0f, 1.0f);
|
||||
midAngle = PI - (acosf(yR0Quotient) + acosf(yR1Quotient));
|
||||
}
|
||||
|
||||
// compute midJoint rotation
|
||||
|
|
|
@ -142,3 +142,72 @@ glm::quat computeBodyFacingFromHead(const glm::quat& headRot, const glm::vec3& u
|
|||
|
||||
return glmExtractRotation(bodyMat);
|
||||
}
|
||||
|
||||
|
||||
const float INV_SQRT_3 = 1.0f / sqrtf(3.0f);
|
||||
const int DOP14_COUNT = 14;
|
||||
const glm::vec3 DOP14_NORMALS[DOP14_COUNT] = {
|
||||
Vectors::UNIT_X,
|
||||
-Vectors::UNIT_X,
|
||||
Vectors::UNIT_Y,
|
||||
-Vectors::UNIT_Y,
|
||||
Vectors::UNIT_Z,
|
||||
-Vectors::UNIT_Z,
|
||||
glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3),
|
||||
glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3),
|
||||
glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3),
|
||||
glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3)
|
||||
};
|
||||
|
||||
// returns true if the given point lies inside of the k-dop, specified by shapeInfo & shapePose.
|
||||
// if the given point does lie within the k-dop, it also returns the amount of displacement necessary to push that point outward
|
||||
// such that it lies on the surface of the kdop.
|
||||
bool findPointKDopDisplacement(const glm::vec3& point, const AnimPose& shapePose, const HFMJointShapeInfo& shapeInfo, glm::vec3& displacementOut) {
|
||||
|
||||
// transform point into local space of jointShape.
|
||||
glm::vec3 localPoint = shapePose.inverse().xformPoint(point);
|
||||
|
||||
// Only works for 14-dop shape infos.
|
||||
if (shapeInfo.dots.size() != DOP14_COUNT) {
|
||||
return false;
|
||||
}
|
||||
|
||||
glm::vec3 minDisplacement(FLT_MAX);
|
||||
float minDisplacementLen = FLT_MAX;
|
||||
glm::vec3 p = localPoint - shapeInfo.avgPoint;
|
||||
float pLen = glm::length(p);
|
||||
if (pLen > 0.0f) {
|
||||
int slabCount = 0;
|
||||
for (int i = 0; i < DOP14_COUNT; i++) {
|
||||
float dot = glm::dot(p, DOP14_NORMALS[i]);
|
||||
if (dot > 0.0f && dot < shapeInfo.dots[i]) {
|
||||
slabCount++;
|
||||
float distToPlane = pLen * (shapeInfo.dots[i] / dot);
|
||||
float displacementLen = distToPlane - pLen;
|
||||
|
||||
// keep track of the smallest displacement
|
||||
if (displacementLen < minDisplacementLen) {
|
||||
minDisplacementLen = displacementLen;
|
||||
minDisplacement = (p / pLen) * displacementLen;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (slabCount == (DOP14_COUNT / 2) && minDisplacementLen != FLT_MAX) {
|
||||
// we are within the k-dop so push the point along the minimum displacement found
|
||||
displacementOut = shapePose.xformVectorFast(minDisplacement);
|
||||
return true;
|
||||
} else {
|
||||
// point is outside of kdop
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// point is directly on top of shapeInfo.avgPoint.
|
||||
// push the point out along the x axis.
|
||||
displacementOut = shapePose.xformVectorFast(shapeInfo.points[0]);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,4 +128,10 @@ protected:
|
|||
bool _snapshotValid { false };
|
||||
};
|
||||
|
||||
|
||||
// returns true if the given point lies inside of the k-dop, specified by shapeInfo & shapePose.
|
||||
// if the given point does lie within the k-dop, it also returns the amount of displacement necessary to push that point outward
|
||||
// such that it lies on the surface of the kdop.
|
||||
bool findPointKDopDisplacement(const glm::vec3& point, const AnimPose& shapePose, const HFMJointShapeInfo& shapeInfo, glm::vec3& displacementOut);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -370,6 +370,88 @@ void Rig::restoreAnimation() {
|
|||
}
|
||||
}
|
||||
|
||||
void Rig::overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame) {
|
||||
HandAnimState::ClipNodeEnum clipNodeEnum;
|
||||
if (isLeft) {
|
||||
if (_leftHandAnimState.clipNodeEnum == HandAnimState::None || _leftHandAnimState.clipNodeEnum == HandAnimState::B) {
|
||||
clipNodeEnum = HandAnimState::A;
|
||||
} else {
|
||||
clipNodeEnum = HandAnimState::B;
|
||||
}
|
||||
} else {
|
||||
if (_rightHandAnimState.clipNodeEnum == HandAnimState::None || _rightHandAnimState.clipNodeEnum == HandAnimState::B) {
|
||||
clipNodeEnum = HandAnimState::A;
|
||||
} else {
|
||||
clipNodeEnum = HandAnimState::B;
|
||||
}
|
||||
}
|
||||
|
||||
if (_animNode) {
|
||||
std::shared_ptr<AnimClip> clip;
|
||||
if (isLeft) {
|
||||
if (clipNodeEnum == HandAnimState::A) {
|
||||
clip = std::dynamic_pointer_cast<AnimClip>(_animNode->findByName("leftHandAnimA"));
|
||||
} else {
|
||||
clip = std::dynamic_pointer_cast<AnimClip>(_animNode->findByName("leftHandAnimB"));
|
||||
}
|
||||
} else {
|
||||
if (clipNodeEnum == HandAnimState::A) {
|
||||
clip = std::dynamic_pointer_cast<AnimClip>(_animNode->findByName("rightHandAnimA"));
|
||||
} else {
|
||||
clip = std::dynamic_pointer_cast<AnimClip>(_animNode->findByName("rightHandAnimB"));
|
||||
}
|
||||
}
|
||||
|
||||
if (clip) {
|
||||
// set parameters
|
||||
clip->setLoopFlag(loop);
|
||||
clip->setStartFrame(firstFrame);
|
||||
clip->setEndFrame(lastFrame);
|
||||
const float REFERENCE_FRAMES_PER_SECOND = 30.0f;
|
||||
float timeScale = fps / REFERENCE_FRAMES_PER_SECOND;
|
||||
clip->setTimeScale(timeScale);
|
||||
clip->loadURL(url);
|
||||
}
|
||||
}
|
||||
|
||||
// notify the handAnimStateMachine the desired state.
|
||||
if (isLeft) {
|
||||
// store current hand anim state.
|
||||
_leftHandAnimState = { clipNodeEnum, url, fps, loop, firstFrame, lastFrame };
|
||||
_animVars.set("leftHandAnimNone", false);
|
||||
_animVars.set("leftHandAnimA", clipNodeEnum == HandAnimState::A);
|
||||
_animVars.set("leftHandAnimB", clipNodeEnum == HandAnimState::B);
|
||||
} else {
|
||||
// store current hand anim state.
|
||||
_rightHandAnimState = { clipNodeEnum, url, fps, loop, firstFrame, lastFrame };
|
||||
_animVars.set("rightHandAnimNone", false);
|
||||
_animVars.set("rightHandAnimA", clipNodeEnum == HandAnimState::A);
|
||||
_animVars.set("rightHandAnimB", clipNodeEnum == HandAnimState::B);
|
||||
}
|
||||
}
|
||||
|
||||
void Rig::restoreHandAnimation(bool isLeft) {
|
||||
if (isLeft) {
|
||||
if (_leftHandAnimState.clipNodeEnum != HandAnimState::None) {
|
||||
_leftHandAnimState.clipNodeEnum = HandAnimState::None;
|
||||
|
||||
// notify the handAnimStateMachine the desired state.
|
||||
_animVars.set("leftHandAnimNone", true);
|
||||
_animVars.set("leftHandAnimA", false);
|
||||
_animVars.set("leftHandAnimB", false);
|
||||
}
|
||||
} else {
|
||||
if (_rightHandAnimState.clipNodeEnum != HandAnimState::None) {
|
||||
_rightHandAnimState.clipNodeEnum = HandAnimState::None;
|
||||
|
||||
// notify the handAnimStateMachine the desired state.
|
||||
_animVars.set("rightHandAnimNone", true);
|
||||
_animVars.set("rightHandAnimA", false);
|
||||
_animVars.set("rightHandAnimB", false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Rig::overrideNetworkAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame) {
|
||||
|
||||
NetworkAnimState::ClipNodeEnum clipNodeEnum = NetworkAnimState::None;
|
||||
|
@ -1521,74 +1603,6 @@ void Rig::updateHead(bool headEnabled, bool hipsEnabled, const AnimPose& headPos
|
|||
}
|
||||
}
|
||||
|
||||
const float INV_SQRT_3 = 1.0f / sqrtf(3.0f);
|
||||
const int DOP14_COUNT = 14;
|
||||
const glm::vec3 DOP14_NORMALS[DOP14_COUNT] = {
|
||||
Vectors::UNIT_X,
|
||||
-Vectors::UNIT_X,
|
||||
Vectors::UNIT_Y,
|
||||
-Vectors::UNIT_Y,
|
||||
Vectors::UNIT_Z,
|
||||
-Vectors::UNIT_Z,
|
||||
glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3),
|
||||
glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3),
|
||||
glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3),
|
||||
glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3),
|
||||
-glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3)
|
||||
};
|
||||
|
||||
// returns true if the given point lies inside of the k-dop, specified by shapeInfo & shapePose.
|
||||
// if the given point does lie within the k-dop, it also returns the amount of displacement necessary to push that point outward
|
||||
// such that it lies on the surface of the kdop.
|
||||
static bool findPointKDopDisplacement(const glm::vec3& point, const AnimPose& shapePose, const HFMJointShapeInfo& shapeInfo, glm::vec3& displacementOut) {
|
||||
|
||||
// transform point into local space of jointShape.
|
||||
glm::vec3 localPoint = shapePose.inverse().xformPoint(point);
|
||||
|
||||
// Only works for 14-dop shape infos.
|
||||
if (shapeInfo.dots.size() != DOP14_COUNT) {
|
||||
return false;
|
||||
}
|
||||
|
||||
glm::vec3 minDisplacement(FLT_MAX);
|
||||
float minDisplacementLen = FLT_MAX;
|
||||
glm::vec3 p = localPoint - shapeInfo.avgPoint;
|
||||
float pLen = glm::length(p);
|
||||
if (pLen > 0.0f) {
|
||||
int slabCount = 0;
|
||||
for (int i = 0; i < DOP14_COUNT; i++) {
|
||||
float dot = glm::dot(p, DOP14_NORMALS[i]);
|
||||
if (dot > 0.0f && dot < shapeInfo.dots[i]) {
|
||||
slabCount++;
|
||||
float distToPlane = pLen * (shapeInfo.dots[i] / dot);
|
||||
float displacementLen = distToPlane - pLen;
|
||||
|
||||
// keep track of the smallest displacement
|
||||
if (displacementLen < minDisplacementLen) {
|
||||
minDisplacementLen = displacementLen;
|
||||
minDisplacement = (p / pLen) * displacementLen;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (slabCount == (DOP14_COUNT / 2) && minDisplacementLen != FLT_MAX) {
|
||||
// we are within the k-dop so push the point along the minimum displacement found
|
||||
displacementOut = shapePose.xformVectorFast(minDisplacement);
|
||||
return true;
|
||||
} else {
|
||||
// point is outside of kdop
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// point is directly on top of shapeInfo.avgPoint.
|
||||
// push the point out along the x axis.
|
||||
displacementOut = shapePose.xformVectorFast(shapeInfo.points[0]);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
glm::vec3 Rig::deflectHandFromTorso(const glm::vec3& handPosition, const HFMJointShapeInfo& hipsShapeInfo, const HFMJointShapeInfo& spineShapeInfo,
|
||||
const HFMJointShapeInfo& spine1ShapeInfo, const HFMJointShapeInfo& spine2ShapeInfo) const {
|
||||
glm::vec3 position = handPosition;
|
||||
|
@ -2136,6 +2150,20 @@ void Rig::initAnimGraph(const QUrl& url) {
|
|||
overrideAnimation(origState.url, origState.fps, origState.loop, origState.firstFrame, origState.lastFrame);
|
||||
}
|
||||
|
||||
if (_rightHandAnimState.clipNodeEnum != HandAnimState::None) {
|
||||
// restore the right hand animation we had before reset.
|
||||
HandAnimState origState = _rightHandAnimState;
|
||||
_rightHandAnimState = { HandAnimState::None, "", 30.0f, false, 0.0f, 0.0f };
|
||||
overrideHandAnimation(false, origState.url, origState.fps, origState.loop, origState.firstFrame, origState.lastFrame);
|
||||
}
|
||||
|
||||
if (_leftHandAnimState.clipNodeEnum != HandAnimState::None) {
|
||||
// restore the left hand animation we had before reset.
|
||||
HandAnimState origState = _leftHandAnimState;
|
||||
_leftHandAnimState = { HandAnimState::None, "", 30.0f, false, 0.0f, 0.0f };
|
||||
overrideHandAnimation(true, origState.url, origState.fps, origState.loop, origState.firstFrame, origState.lastFrame);
|
||||
}
|
||||
|
||||
// restore the role animations we had before reset.
|
||||
for (auto& roleAnimState : _roleAnimStates) {
|
||||
auto roleState = roleAnimState.second;
|
||||
|
|
|
@ -116,8 +116,12 @@ public:
|
|||
void destroyAnimGraph();
|
||||
|
||||
void overrideAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame);
|
||||
bool isPlayingOverrideAnimation() const { return _userAnimState.clipNodeEnum != UserAnimState::None; };
|
||||
void restoreAnimation();
|
||||
|
||||
void overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame);
|
||||
void restoreHandAnimation(bool isLeft);
|
||||
|
||||
void overrideNetworkAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame);
|
||||
void triggerNetworkRole(const QString& role);
|
||||
void restoreNetworkAnimation();
|
||||
|
@ -333,7 +337,7 @@ protected:
|
|||
RigRole _state { RigRole::Idle };
|
||||
RigRole _desiredState { RigRole::Idle };
|
||||
float _desiredStateAge { 0.0f };
|
||||
|
||||
|
||||
struct NetworkAnimState {
|
||||
enum ClipNodeEnum {
|
||||
None = 0,
|
||||
|
@ -356,6 +360,27 @@ protected:
|
|||
float blendTime;
|
||||
};
|
||||
|
||||
struct HandAnimState {
|
||||
enum ClipNodeEnum {
|
||||
None = 0,
|
||||
A,
|
||||
B
|
||||
};
|
||||
|
||||
HandAnimState() : clipNodeEnum(HandAnimState::None) {}
|
||||
HandAnimState(ClipNodeEnum clipNodeEnumIn, const QString& urlIn, float fpsIn, bool loopIn, float firstFrameIn, float lastFrameIn) :
|
||||
clipNodeEnum(clipNodeEnumIn), url(urlIn), fps(fpsIn), loop(loopIn), firstFrame(firstFrameIn), lastFrame(lastFrameIn) {
|
||||
}
|
||||
|
||||
|
||||
ClipNodeEnum clipNodeEnum;
|
||||
QString url;
|
||||
float fps;
|
||||
bool loop;
|
||||
float firstFrame;
|
||||
float lastFrame;
|
||||
};
|
||||
|
||||
struct UserAnimState {
|
||||
enum ClipNodeEnum {
|
||||
None = 0,
|
||||
|
@ -390,6 +415,8 @@ protected:
|
|||
|
||||
UserAnimState _userAnimState;
|
||||
NetworkAnimState _networkAnimState;
|
||||
HandAnimState _rightHandAnimState;
|
||||
HandAnimState _leftHandAnimState;
|
||||
std::map<QString, RoleAnimState> _roleAnimStates;
|
||||
|
||||
float _leftHandOverlayAlpha { 0.0f };
|
||||
|
|
|
@ -509,6 +509,26 @@ void Avatar::relayJointDataToChildren() {
|
|||
_reconstructSoftEntitiesJointMap = false;
|
||||
}
|
||||
|
||||
/**jsdoc
|
||||
* An avatar has different types of data simulated at different rates, in Hz.
|
||||
*
|
||||
* <table>
|
||||
* <thead>
|
||||
* <tr><th>Rate Name</th><th>Description</th></tr>
|
||||
* </thead>
|
||||
* <tbody>
|
||||
* <tr><td><code>"avatar" or ""</code></td><td>The rate at which the avatar is updated even if not in view.</td></tr>
|
||||
* <tr><td><code>"avatarInView"</code></td><td>The rate at which the avatar is updated if in view.</td></tr>
|
||||
* <tr><td><code>"skeletonModel"</code></td><td>The rate at which the skeleton model is being updated, even if there are no
|
||||
* joint data available.</td></tr>
|
||||
* <tr><td><code>"jointData"</code></td><td>The rate at which joint data are being updated.</td></tr>
|
||||
* <tr><td><code>""</code></td><td>When no rate name is specified, the <code>"avatar"</code> update rate is
|
||||
* provided.</td></tr>
|
||||
* </tbody>
|
||||
* </table>
|
||||
*
|
||||
* @typedef {string} AvatarSimulationRate
|
||||
*/
|
||||
float Avatar::getSimulationRate(const QString& rateName) const {
|
||||
if (rateName == "") {
|
||||
return _simulationRate.rate();
|
||||
|
|
|
@ -501,8 +501,8 @@ public:
|
|||
|
||||
/**jsdoc
|
||||
* @function MyAvatar.getSimulationRate
|
||||
* @param {string} [rateName=""] - Rate name.
|
||||
* @returns {number} Simulation rate.
|
||||
* @param {AvatarSimulationRate} [rateName=""] - Rate name.
|
||||
* @returns {number} Simulation rate in Hz.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
Q_INVOKABLE float getSimulationRate(const QString& rateName = QString("")) const;
|
||||
|
|
|
@ -270,28 +270,19 @@ bool SkeletonModel::getEyeModelPositions(glm::vec3& firstEyePosition, glm::vec3&
|
|||
getJointPosition(_rig.indexOfJoint("RightEye"), secondEyePosition)) {
|
||||
return true;
|
||||
}
|
||||
// no eye joints; try to estimate based on head/neck joints
|
||||
glm::vec3 neckPosition, headPosition;
|
||||
if (getJointPosition(_rig.indexOfJoint("Neck"), neckPosition) &&
|
||||
getJointPosition(_rig.indexOfJoint("Head"), headPosition)) {
|
||||
const float EYE_PROPORTION = 0.6f;
|
||||
glm::vec3 baseEyePosition = glm::mix(neckPosition, headPosition, EYE_PROPORTION);
|
||||
|
||||
int headJointIndex = _rig.indexOfJoint("Head");
|
||||
glm::vec3 headPosition;
|
||||
if (getJointPosition(headJointIndex, headPosition)) {
|
||||
|
||||
// get head joint rotation.
|
||||
glm::quat headRotation;
|
||||
getJointRotation(_rig.indexOfJoint("Head"), headRotation);
|
||||
const float EYES_FORWARD = 0.25f;
|
||||
const float EYE_SEPARATION = 0.1f;
|
||||
float headHeight = glm::distance(neckPosition, headPosition);
|
||||
firstEyePosition = baseEyePosition + headRotation * glm::vec3(EYE_SEPARATION, 0.0f, EYES_FORWARD) * headHeight;
|
||||
secondEyePosition = baseEyePosition + headRotation * glm::vec3(-EYE_SEPARATION, 0.0f, EYES_FORWARD) * headHeight;
|
||||
return true;
|
||||
} else if (getJointPosition(_rig.indexOfJoint("Head"), headPosition)) {
|
||||
glm::vec3 baseEyePosition = headPosition;
|
||||
glm::quat headRotation;
|
||||
getJointRotation(_rig.indexOfJoint("Head"), headRotation);
|
||||
const float EYES_FORWARD_HEAD_ONLY = 0.30f;
|
||||
const float EYE_SEPARATION = 0.1f;
|
||||
firstEyePosition = baseEyePosition + headRotation * glm::vec3(EYE_SEPARATION, 0.0f, EYES_FORWARD_HEAD_ONLY);
|
||||
secondEyePosition = baseEyePosition + headRotation * glm::vec3(-EYE_SEPARATION, 0.0f, EYES_FORWARD_HEAD_ONLY);
|
||||
getJointRotation(headJointIndex, headRotation);
|
||||
|
||||
float heightRatio = _rig.getUnscaledEyeHeight() / DEFAULT_AVATAR_EYE_HEIGHT;
|
||||
glm::vec3 ipdOffset = glm::vec3(DEFAULT_AVATAR_IPD / 2.0f, 0.0f, 0.0f);
|
||||
firstEyePosition = headPosition + headRotation * heightRatio * (DEFAULT_AVATAR_HEAD_TO_MIDDLE_EYE_OFFSET + ipdOffset);
|
||||
secondEyePosition = headPosition + headRotation * heightRatio * (DEFAULT_AVATAR_HEAD_TO_MIDDLE_EYE_OFFSET - ipdOffset);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -1545,7 +1545,6 @@ float AvatarData::getDataRate(const QString& rateName) const {
|
|||
* <tr><th>Rate Name</th><th>Description</th></tr>
|
||||
* </thead>
|
||||
* <tbody>
|
||||
|
||||
* <tr><td><code>"globalPosition"</code></td><td>Global position.</td></tr>
|
||||
* <tr><td><code>"localPosition"</code></td><td>Local position.</td></tr>
|
||||
* <tr><td><code>"avatarBoundingBox"</code></td><td>Avatar bounding box.</td></tr>
|
||||
|
@ -1559,7 +1558,6 @@ float AvatarData::getDataRate(const QString& rateName) const {
|
|||
* <tr><td><code>"faceTracker"</code></td><td>Face tracker data.</td></tr>
|
||||
* <tr><td><code>"jointData"</code></td><td>Joint data.</td></tr>
|
||||
* <tr><td><code>"farGrabJointData"</code></td><td>Far grab joint data.</td></tr>
|
||||
|
||||
* <tr><td><code>""</code></td><td>When no rate name is specified, the overall update rate is provided.</td></tr>
|
||||
* </tbody>
|
||||
* </table>
|
||||
|
@ -1721,7 +1719,6 @@ glm::vec3 AvatarData::getJointTranslation(const QString& name) const {
|
|||
// on another thread in between the call to getJointIndex and getJointTranslation
|
||||
// return getJointTranslation(getJointIndex(name));
|
||||
return readLockWithNamedJointIndex<glm::vec3>(name, [this](int index) {
|
||||
return _jointData.at(index).translation;
|
||||
return getJointTranslation(index);
|
||||
});
|
||||
}
|
||||
|
@ -1809,8 +1806,8 @@ glm::quat AvatarData::getJointRotation(const QString& name) const {
|
|||
// Can't do this, not thread safe
|
||||
// return getJointRotation(getJointIndex(name));
|
||||
|
||||
return readLockWithNamedJointIndex<glm::quat>(name, [&](int index) {
|
||||
return _jointData.at(index).rotation;
|
||||
return readLockWithNamedJointIndex<glm::quat>(name, [this](int index) {
|
||||
return getJointRotation(index);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -2905,6 +2902,20 @@ glm::mat4 AvatarData::getControllerRightHandMatrix() const {
|
|||
return _controllerRightHandMatrixCache.get();
|
||||
}
|
||||
|
||||
/**jsdoc
|
||||
* Information about a ray-to-avatar intersection.
|
||||
* @typedef {object} RayToAvatarIntersectionResult
|
||||
* @property {boolean} intersects - <code>true</code> if an avatar is intersected, <code>false</code> if it isn't.
|
||||
* @property {string} avatarID - The ID of the avatar that is intersected.
|
||||
* @property {number} distance - The distance from the ray origin to the intersection.
|
||||
* @property {string} face - The name of the box face that is intersected; <code>"UNKNOWN_FACE"</code> if mesh was picked
|
||||
* against.
|
||||
* @property {Vec3} intersection - The ray intersection point in world coordinates.
|
||||
* @property {Vec3} surfaceNormal - The surface normal at the intersection point.
|
||||
* @property {number} jointIndex - The index of the joint intersected.
|
||||
* @property {SubmeshIntersection} extraInfo - Extra information on the mesh intersected if mesh was picked against,
|
||||
* <code>{}</code> if it wasn't.
|
||||
*/
|
||||
QScriptValue RayToAvatarIntersectionResultToScriptValue(QScriptEngine* engine, const RayToAvatarIntersectionResult& value) {
|
||||
QScriptValue obj = engine->newObject();
|
||||
obj.setProperty("intersects", value.intersects);
|
||||
|
|
|
@ -479,7 +479,8 @@ class AvatarData : public QObject, public SpatiallyNestable {
|
|||
* avatar. <em>Read-only.</em>
|
||||
* @property {number} sensorToWorldScale - The scale that transforms dimensions in the user's real world to the avatar's
|
||||
* size in the virtual world. <em>Read-only.</em>
|
||||
* @property {boolean} hasPriority - is the avatar in a Hero zone? <em>Read-only.</em>
|
||||
* @property {boolean} hasPriority - <code>true</code> if the avatar is in a "hero" zone, <code>false</code> if it isn't.
|
||||
* <em>Read-only.</em>
|
||||
*/
|
||||
Q_PROPERTY(glm::vec3 position READ getWorldPosition WRITE setPositionViaScript)
|
||||
Q_PROPERTY(float scale READ getDomainLimitedScale WRITE setTargetScale)
|
||||
|
@ -1751,14 +1752,11 @@ protected:
|
|||
|
||||
template <typename T, typename F>
|
||||
T readLockWithNamedJointIndex(const QString& name, const T& defaultValue, F f) const {
|
||||
int index = getFauxJointIndex(name);
|
||||
QReadLocker readLock(&_jointDataLock);
|
||||
|
||||
// The first conditional is superfluous, but illustrative
|
||||
if (index == -1 || index < _jointData.size()) {
|
||||
int index = getJointIndex(name);
|
||||
if (index == -1) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
return f(index);
|
||||
}
|
||||
|
||||
|
@ -1769,8 +1767,8 @@ protected:
|
|||
|
||||
template <typename F>
|
||||
void writeLockWithNamedJointIndex(const QString& name, F f) {
|
||||
int index = getFauxJointIndex(name);
|
||||
QWriteLocker writeLock(&_jointDataLock);
|
||||
int index = getJointIndex(name);
|
||||
if (index == -1) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -36,8 +36,10 @@ const int CLIENT_TO_AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND = 50;
|
|||
const quint64 MIN_TIME_BETWEEN_MY_AVATAR_DATA_SENDS = USECS_PER_SECOND / CLIENT_TO_AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND;
|
||||
|
||||
/**jsdoc
|
||||
* <strong>Note:</strong> An <code>AvatarList</code> API is also provided for Interface and client entity scripts: it is a
|
||||
* synonym for the {@link AvatarManager} API.
|
||||
* The <code>AvatarList</code> API provides information about avatars within the current domain.
|
||||
*
|
||||
* <p><strong>Warning:</strong> An API named "<code>AvatarList</code>" is also provided for Interface, client entity, and avatar
|
||||
* scripts, however, it is a synonym for the {@link AvatarManager} API.</p>
|
||||
*
|
||||
* @namespace AvatarList
|
||||
*
|
||||
|
@ -78,23 +80,37 @@ public:
|
|||
// Currently, your own avatar will be included as the null avatar id.
|
||||
|
||||
/**jsdoc
|
||||
* Gets the IDs of all avatars in the domain.
|
||||
* <p><strong>Warning:</strong> If the AC script is acting as an avatar (i.e., <code>Agent.isAvatar == true</code>) the
|
||||
* avatar's ID is NOT included in results.</p>
|
||||
* @function AvatarList.getAvatarIdentifiers
|
||||
* @returns {Uuid[]}
|
||||
* @returns {Uuid[]} The IDs of all avatars in the domain (excluding AC script's avatar).
|
||||
* @example <caption>Report the IDS of all avatars within the domain.</caption>
|
||||
* var avatars = AvatarList.getAvatarIdentifiers();
|
||||
* print("Avatars in the domain: " + JSON.stringify(avatars));
|
||||
*/
|
||||
Q_INVOKABLE QVector<QUuid> getAvatarIdentifiers();
|
||||
|
||||
/**jsdoc
|
||||
* Gets the IDs of all avatars within a specified distance from a point.
|
||||
* <p><strong>Warning:</strong> If the AC script is acting as an avatar (i.e., <code>Agent.isAvatar == true</code>) the
|
||||
* avatar's ID is NOT included in results.</p>
|
||||
* @function AvatarList.getAvatarsInRange
|
||||
* @param {Vec3} position
|
||||
* @param {number} range
|
||||
* @returns {Uuid[]}
|
||||
* @param {Vec3} position - The point about which the search is performed.
|
||||
* @param {number} range - The search radius.
|
||||
* @returns {Uuid[]} The IDs of all avatars within the search distance from the position (excluding AC script's avatar).
|
||||
* @example <caption>Report the IDs of all avatars within 10m of the origin.</caption>
|
||||
* var RANGE = 10;
|
||||
* var avatars = AvatarList.getAvatarsInRange(Vec3.ZERO, RANGE);
|
||||
* print("Avatars near the origin: " + JSON.stringify(avatars));
|
||||
*/
|
||||
Q_INVOKABLE QVector<QUuid> getAvatarsInRange(const glm::vec3& position, float rangeMeters) const;
|
||||
|
||||
/**jsdoc
|
||||
* Gets information about an avatar.
|
||||
* @function AvatarList.getAvatar
|
||||
* @param {Uuid} avatarID
|
||||
* @returns {AvatarData}
|
||||
* @param {Uuid} avatarID - The ID of the avatar.
|
||||
* @returns {AvatarData} Information about the avatar.
|
||||
*/
|
||||
// Null/Default-constructed QUuids will return MyAvatar
|
||||
Q_INVOKABLE virtual ScriptAvatarData* getAvatar(QUuid avatarID) { return new ScriptAvatarData(getAvatarBySessionID(avatarID)); }
|
||||
|
@ -110,34 +126,57 @@ public:
|
|||
signals:
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when an avatar arrives in the domain.
|
||||
* @function AvatarList.avatarAddedEvent
|
||||
* @param {Uuid} sessionUUID
|
||||
* @param {Uuid} sessionUUID - The ID of the avatar that arrived in the domain.
|
||||
* @returns {Signal}
|
||||
* @example <caption>Report when an avatar arrives in the domain.</caption>
|
||||
* AvatarManager.avatarAddedEvent.connect(function (sessionID) {
|
||||
* print("Avatar arrived: " + sessionID);
|
||||
* });
|
||||
*
|
||||
* // Note: If using from the AvatarList API, replace "AvatarManager" with "AvatarList".
|
||||
*/
|
||||
void avatarAddedEvent(const QUuid& sessionUUID);
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when an avatar leaves the domain.
|
||||
* @function AvatarList.avatarRemovedEvent
|
||||
* @param {Uuid} sessionUUID
|
||||
* @param {Uuid} sessionUUID - The ID of the avatar that left the domain.
|
||||
* @returns {Signal}
|
||||
* @example <caption>Report when an avatar leaves the domain.</caption>
|
||||
* AvatarManager.avatarRemovedEvent.connect(function (sessionID) {
|
||||
* print("Avatar left: " + sessionID);
|
||||
* });
|
||||
*
|
||||
* // Note: If using from the AvatarList API, replace "AvatarManager" with "AvatarList".
|
||||
*/
|
||||
void avatarRemovedEvent(const QUuid& sessionUUID);
|
||||
|
||||
/**jsdoc
|
||||
* Triggered when an avatar's session ID changes.
|
||||
* @function AvatarList.avatarSessionChangedEvent
|
||||
* @param {Uuid} sessionUUID
|
||||
* @param {Uuid} oldSessionUUID
|
||||
* @param {Uuid} newSessionUUID - The new session ID.
|
||||
* @param {Uuid} oldSessionUUID - The old session ID.
|
||||
* @returns {Signal}
|
||||
* @example <caption>Report when an avatar's session ID changes.</caption>
|
||||
* AvatarManager.avatarSessionChangedEvent.connect(function (newSessionID, oldSessionID) {
|
||||
* print("Avatar session ID changed from " + oldSessionID + " to " + newSessionID);
|
||||
* });
|
||||
*
|
||||
* // Note: If using from the AvatarList API, replace "AvatarManager" with "AvatarList".
|
||||
*/
|
||||
void avatarSessionChangedEvent(const QUuid& sessionUUID,const QUuid& oldUUID);
|
||||
|
||||
public slots:
|
||||
|
||||
/**jsdoc
|
||||
* Checks whether there is an avatar within a specified distance from a point.
|
||||
* @function AvatarList.isAvatarInRange
|
||||
* @param {string} position
|
||||
* @param {string} range
|
||||
* @returns {boolean}
|
||||
* @param {string} position - The test position.
|
||||
* @param {string} range - The test distance.
|
||||
* @returns {boolean} <code>true</code> if there's an avatar within the specified distance of the point, <code>false</code>
|
||||
* if not.
|
||||
*/
|
||||
bool isAvatarInRange(const glm::vec3 & position, const float range);
|
||||
|
||||
|
@ -145,36 +184,41 @@ protected slots:
|
|||
|
||||
/**jsdoc
|
||||
* @function AvatarList.sessionUUIDChanged
|
||||
* @param {Uuid} sessionUUID
|
||||
* @param {Uuid} oldSessionUUID
|
||||
* @param {Uuid} sessionUUID - New session ID.
|
||||
* @param {Uuid} oldSessionUUID - Old session ID.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
void sessionUUIDChanged(const QUuid& sessionUUID, const QUuid& oldUUID);
|
||||
|
||||
/**jsdoc
|
||||
* @function AvatarList.processAvatarDataPacket
|
||||
* @param {} message
|
||||
* @param {} sendingNode
|
||||
* @param {object} message - Message.
|
||||
* @param {object} sendingNode - Sending node.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
void processAvatarDataPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
||||
|
||||
/**jsdoc
|
||||
* @function AvatarList.processAvatarIdentityPacket
|
||||
* @param {} message
|
||||
* @param {} sendingNode
|
||||
* @param {object} message - Message.
|
||||
* @param {object} sendingNode - Sending node.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
void processAvatarIdentityPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
||||
|
||||
/**jsdoc
|
||||
* @function AvatarList.processBulkAvatarTraits
|
||||
* @param {} message
|
||||
* @param {} sendingNode
|
||||
* @param {object} message - Message.
|
||||
* @param {object} sendingNode - Sending node.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
void processBulkAvatarTraits(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
||||
|
||||
/**jsdoc
|
||||
* @function AvatarList.processKillAvatar
|
||||
* @param {} message
|
||||
* @param {} sendingNode
|
||||
* @param {object} message - Message.
|
||||
* @param {object} sendingNode - Sending node.
|
||||
* @deprecated This function is deprecated and will be removed.
|
||||
*/
|
||||
void processKillAvatar(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
||||
|
||||
|
|
|
@ -16,6 +16,52 @@
|
|||
|
||||
#include "AvatarData.h"
|
||||
|
||||
/**jsdoc
|
||||
* Information about an avatar.
|
||||
* @typedef {object} AvatarData
|
||||
* @property {Vec3} position - The avatar's position.
|
||||
* @property {number} scale - The target scale of the avatar without any restrictions on permissible values imposed by the
|
||||
* domain.
|
||||
* @property {Vec3} handPosition - A user-defined hand position, in world coordinates. The position moves with the avatar but
|
||||
* is otherwise not used or changed by Interface.
|
||||
* @property {number} bodyPitch - The pitch of the avatar's body, in degrees.
|
||||
* @property {number} bodyYaw - The yaw of the avatar's body, in degrees.
|
||||
* @property {number} bodyRoll - The roll of the avatar's body, in degrees.
|
||||
* @property {Quat} orientation - The orientation of the avatar's body.
|
||||
* @property {Quat} headOrientation - The orientation of the avatar's head.
|
||||
* @property {number} headPitch - The pitch of the avatar's head relative to the body, in degrees.
|
||||
* @property {number} headYaw - The yaw of the avatar's head relative to the body, in degrees.
|
||||
* @property {number} headRoll - The roll of the avatar's head relative to the body, in degrees.
|
||||
*
|
||||
* @property {Vec3} velocity - The linear velocity of the avatar.
|
||||
* @property {Vec3} angularVelocity - The angular velocity of the avatar.
|
||||
*
|
||||
* @property {Uuid} sessionUUID - The avatar's session ID.
|
||||
* @property {string} displayName - The avatar's display name.
|
||||
* @property {string} sessionDisplayName - The avatar's display name, sanitized and versioned, as defined by the avatar mixer.
|
||||
* It is unique among all avatars present in the domain at the time.
|
||||
* @property {boolean} isReplicated - <strong>Deprecated.</strong>
|
||||
* @property {boolean} lookAtSnappingEnabled - <code>true</code> if the avatar's eyes snap to look at another avatar's eyes
|
||||
* when the other avatar is in the line of sight and also has <code>lookAtSnappingEnabled == true</code>.
|
||||
*
|
||||
* @property {string} skeletonModelURL - The avatar's FST file.
|
||||
* @property {AttachmentData[]} attachmentData - Information on the avatar's attachments.<br />
|
||||
* <strong>Deprecated:</strong> Use avatar entities instead.
|
||||
* @property {string[]} jointNames - The list of joints in the current avatar model.
|
||||
*
|
||||
* @property {number} audioLoudness - The instantaneous loudness of the audio input that the avatar is injecting into the
|
||||
* domain.
|
||||
* @property {number} audioAverageLoudness - The rolling average loudness of the audio input that the avatar is injecting into
|
||||
* the domain.
|
||||
*
|
||||
* @property {Mat4} sensorToWorldMatrix - The scale, rotation, and translation transform from the user's real world to the
|
||||
* avatar's size, orientation, and position in the virtual world.
|
||||
* @property {Mat4} controllerLeftHandMatrix - The rotation and translation of the left hand controller relative to the avatar.
|
||||
* @property {Mat4} controllerRightHandMatrix - The rotation and translation of the right hand controller relative to the
|
||||
* avatar.
|
||||
*
|
||||
* @property {boolean} hasPriority - <code>true</code> if the avatar is in a "hero" zone, <code>false</code> if it isn't.
|
||||
*/
|
||||
class ScriptAvatarData : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
|
|
|
@ -144,7 +144,12 @@ void MaterialBaker::processMaterial() {
|
|||
connect(textureBaker.data(), &TextureBaker::finished, this, &MaterialBaker::handleFinishedTextureBaker);
|
||||
_textureBakers.insert(textureKey, textureBaker);
|
||||
textureBaker->moveToThread(_getNextOvenWorkerThreadOperator ? _getNextOvenWorkerThreadOperator() : thread());
|
||||
QMetaObject::invokeMethod(textureBaker.data(), "bake");
|
||||
// By default, Qt will invoke this bake immediately if the TextureBaker is on the same worker thread as this MaterialBaker.
|
||||
// We don't want that, because threads may be waiting for work while this thread is stuck processing a TextureBaker.
|
||||
// On top of that, _textureBakers isn't fully populated.
|
||||
// So, use Qt::QueuedConnection.
|
||||
// TODO: Better thread utilization at the top level, not just the MaterialBaker level
|
||||
QMetaObject::invokeMethod(textureBaker.data(), "bake", Qt::QueuedConnection);
|
||||
}
|
||||
_materialsNeedingRewrite.insert(textureKey, networkMaterial.second);
|
||||
} else {
|
||||
|
|
|
@ -131,7 +131,10 @@ void TextureBaker::handleTextureNetworkReply() {
|
|||
void TextureBaker::processTexture() {
|
||||
// the baked textures need to have the source hash added for cache checks in Interface
|
||||
// so we add that to the processed texture before handling it off to be serialized
|
||||
auto hashData = QCryptographicHash::hash(_originalTexture, QCryptographicHash::Md5);
|
||||
QCryptographicHash hasher(QCryptographicHash::Md5);
|
||||
hasher.addData(_originalTexture);
|
||||
hasher.addData((const char*)&_textureType, sizeof(_textureType));
|
||||
auto hashData = hasher.result();
|
||||
std::string hash = hashData.toHex().toStdString();
|
||||
|
||||
TextureMeta meta;
|
||||
|
@ -206,7 +209,7 @@ void TextureBaker::processTexture() {
|
|||
}
|
||||
|
||||
// Uncompressed KTX
|
||||
if (_textureType == image::TextureUsage::Type::CUBE_TEXTURE) {
|
||||
if (_textureType == image::TextureUsage::Type::SKY_TEXTURE || _textureType == image::TextureUsage::Type::AMBIENT_TEXTURE) {
|
||||
buffer->reset();
|
||||
auto processedTexture = image::processImage(std::move(buffer), _textureURL.toString().toStdString(), image::ColorChannel::NONE,
|
||||
ABSOLUTE_MAX_TEXTURE_NUM_PIXELS, _textureType, false, gpu::BackendTarget::GL45, _abortProcessing);
|
||||
|
|
|
@ -465,7 +465,7 @@ void ZoneEntityRenderer::setAmbientURL(const QString& ambientUrl) {
|
|||
} else {
|
||||
_pendingAmbientTexture = true;
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
_ambientTexture = textureCache->getTexture(_ambientTextureURL, image::TextureUsage::CUBE_TEXTURE);
|
||||
_ambientTexture = textureCache->getTexture(_ambientTextureURL, image::TextureUsage::AMBIENT_TEXTURE);
|
||||
|
||||
// keep whatever is assigned on the ambient map/sphere until texture is loaded
|
||||
}
|
||||
|
@ -506,7 +506,7 @@ void ZoneEntityRenderer::setSkyboxURL(const QString& skyboxUrl) {
|
|||
} else {
|
||||
_pendingSkyboxTexture = true;
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
_skyboxTexture = textureCache->getTexture(_skyboxTextureURL, image::TextureUsage::CUBE_TEXTURE);
|
||||
_skyboxTexture = textureCache->getTexture(_skyboxTextureURL, image::TextureUsage::SKY_TEXTURE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ set(TARGET_NAME image)
|
|||
setup_hifi_library()
|
||||
link_hifi_libraries(shared gpu)
|
||||
target_nvtt()
|
||||
target_tbb()
|
||||
target_etc2comp()
|
||||
target_openexr()
|
||||
|
||||
|
|
660
libraries/image/src/image/CubeMap.cpp
Normal file
660
libraries/image/src/image/CubeMap.cpp
Normal file
|
@ -0,0 +1,660 @@
|
|||
//
|
||||
// CubeMap.h
|
||||
// image/src/image
|
||||
//
|
||||
// Created by Olivier Prat on 03/27/2019.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "CubeMap.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <TBBHelpers.h>
|
||||
|
||||
#include "RandomAndNoise.h"
|
||||
#include "BRDF.h"
|
||||
#include "ImageLogging.h"
|
||||
|
||||
#ifndef M_PI
|
||||
#define M_PI 3.14159265359
|
||||
#endif
|
||||
|
||||
#include <nvtt/nvtt.h>
|
||||
|
||||
using namespace image;
|
||||
|
||||
static const glm::vec3 FACE_NORMALS[24] = {
|
||||
// POSITIVE X
|
||||
glm::vec3(1.0f, 1.0f, 1.0f),
|
||||
glm::vec3(1.0f, 1.0f, -1.0f),
|
||||
glm::vec3(1.0f, -1.0f, 1.0f),
|
||||
glm::vec3(1.0f, -1.0f, -1.0f),
|
||||
// NEGATIVE X
|
||||
glm::vec3(-1.0f, 1.0f, -1.0f),
|
||||
glm::vec3(-1.0f, 1.0f, 1.0f),
|
||||
glm::vec3(-1.0f, -1.0f, -1.0f),
|
||||
glm::vec3(-1.0f, -1.0f, 1.0f),
|
||||
// POSITIVE Y
|
||||
glm::vec3(-1.0f, 1.0f, -1.0f),
|
||||
glm::vec3(1.0f, 1.0f, -1.0f),
|
||||
glm::vec3(-1.0f, 1.0f, 1.0f),
|
||||
glm::vec3(1.0f, 1.0f, 1.0f),
|
||||
// NEGATIVE Y
|
||||
glm::vec3(-1.0f, -1.0f, 1.0f),
|
||||
glm::vec3(1.0f, -1.0f, 1.0f),
|
||||
glm::vec3(-1.0f, -1.0f, -1.0f),
|
||||
glm::vec3(1.0f, -1.0f, -1.0f),
|
||||
// POSITIVE Z
|
||||
glm::vec3(-1.0f, 1.0f, 1.0f),
|
||||
glm::vec3(1.0f, 1.0f, 1.0f),
|
||||
glm::vec3(-1.0f, -1.0f, 1.0f),
|
||||
glm::vec3(1.0f, -1.0f, 1.0f),
|
||||
// NEGATIVE Z
|
||||
glm::vec3(1.0f, 1.0f, -1.0f),
|
||||
glm::vec3(-1.0f, 1.0f, -1.0f),
|
||||
glm::vec3(1.0f, -1.0f, -1.0f),
|
||||
glm::vec3(-1.0f, -1.0f, -1.0f)
|
||||
};
|
||||
|
||||
struct CubeFaceMip {
|
||||
|
||||
CubeFaceMip(gpu::uint16 level, const CubeMap* cubemap) {
|
||||
_dims = cubemap->getMipDimensions(level);
|
||||
_lineStride = cubemap->getMipLineStride(level);
|
||||
}
|
||||
|
||||
CubeFaceMip(const CubeFaceMip& other) : _dims(other._dims), _lineStride(other._lineStride) {
|
||||
|
||||
}
|
||||
|
||||
gpu::Vec2i _dims;
|
||||
size_t _lineStride;
|
||||
};
|
||||
|
||||
class CubeMap::ConstMip : public CubeFaceMip {
|
||||
public:
|
||||
|
||||
ConstMip(gpu::uint16 level, const CubeMap* cubemap) :
|
||||
CubeFaceMip(level, cubemap), _faces(cubemap->_mips[level]) {
|
||||
}
|
||||
|
||||
glm::vec4 fetch(int face, glm::vec2 uv) const {
|
||||
glm::vec2 coordFrac = uv * glm::vec2(_dims) - 0.5f;
|
||||
glm::vec2 coords = glm::floor(coordFrac);
|
||||
|
||||
coordFrac -= coords;
|
||||
|
||||
coords += (float)EDGE_WIDTH;
|
||||
|
||||
const auto& pixels = _faces[face];
|
||||
gpu::Vec2i loCoords(coords);
|
||||
gpu::Vec2i hiCoords;
|
||||
|
||||
hiCoords = glm::clamp(loCoords + 1, gpu::Vec2i(0, 0), _dims - 1 + (int)EDGE_WIDTH);
|
||||
loCoords = glm::clamp(loCoords, gpu::Vec2i(0, 0), _dims - 1 + (int)EDGE_WIDTH);
|
||||
|
||||
const size_t offsetLL = loCoords.x + loCoords.y * _lineStride;
|
||||
const size_t offsetHL = hiCoords.x + loCoords.y * _lineStride;
|
||||
const size_t offsetLH = loCoords.x + hiCoords.y * _lineStride;
|
||||
const size_t offsetHH = hiCoords.x + hiCoords.y * _lineStride;
|
||||
assert(offsetLL >= 0 && offsetLL < _lineStride * (_dims.y + 2 * EDGE_WIDTH));
|
||||
assert(offsetHL >= 0 && offsetHL < _lineStride * (_dims.y + 2 * EDGE_WIDTH));
|
||||
assert(offsetLH >= 0 && offsetLH < _lineStride * (_dims.y + 2 * EDGE_WIDTH));
|
||||
assert(offsetHH >= 0 && offsetHH < _lineStride * (_dims.y + 2 * EDGE_WIDTH));
|
||||
glm::vec4 colorLL = pixels[offsetLL];
|
||||
glm::vec4 colorHL = pixels[offsetHL];
|
||||
glm::vec4 colorLH = pixels[offsetLH];
|
||||
glm::vec4 colorHH = pixels[offsetHH];
|
||||
|
||||
colorLL += (colorHL - colorLL) * coordFrac.x;
|
||||
colorLH += (colorHH - colorLH) * coordFrac.x;
|
||||
return colorLL + (colorLH - colorLL) * coordFrac.y;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
const Faces& _faces;
|
||||
|
||||
};
|
||||
|
||||
class CubeMap::Mip : public CubeFaceMip {
|
||||
public:
|
||||
|
||||
explicit Mip(gpu::uint16 level, CubeMap* cubemap) :
|
||||
CubeFaceMip(level, cubemap), _faces(cubemap->_mips[level]) {
|
||||
}
|
||||
|
||||
Mip(const Mip& other) : CubeFaceMip(other), _faces(other._faces) {
|
||||
}
|
||||
|
||||
void applySeams() {
|
||||
if (EDGE_WIDTH == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Copy edge rows and columns from neighbouring faces to fix seam filtering issues
|
||||
seamColumnAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, -1, -1);
|
||||
seamColumnAndRow(gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, _dims.y, 1);
|
||||
seamColumnAndColumn(gpu::Texture::CUBE_FACE_FRONT_NEG_Z, -1, gpu::Texture::CUBE_FACE_RIGHT_POS_X, _dims.x, 1);
|
||||
seamColumnAndColumn(gpu::Texture::CUBE_FACE_BACK_POS_Z, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, -1, 1);
|
||||
|
||||
seamRowAndRow(gpu::Texture::CUBE_FACE_BACK_POS_Z, -1, gpu::Texture::CUBE_FACE_TOP_POS_Y, _dims.y, 1);
|
||||
seamRowAndRow(gpu::Texture::CUBE_FACE_BACK_POS_Z, _dims.y, gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, -1, 1);
|
||||
seamColumnAndColumn(gpu::Texture::CUBE_FACE_BACK_POS_Z, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, _dims.x, 1);
|
||||
|
||||
seamRowAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, -1, gpu::Texture::CUBE_FACE_FRONT_NEG_Z, -1, -1);
|
||||
seamColumnAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, -1, 1);
|
||||
|
||||
seamColumnAndColumn(gpu::Texture::CUBE_FACE_LEFT_NEG_X, -1, gpu::Texture::CUBE_FACE_FRONT_NEG_Z, _dims.x, 1);
|
||||
seamColumnAndRow(gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, _dims.y, -1);
|
||||
|
||||
seamRowAndRow(gpu::Texture::CUBE_FACE_FRONT_NEG_Z, _dims.y, gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, _dims.y, -1);
|
||||
|
||||
// Duplicate corner pixels
|
||||
for (int face = 0; face < 6; face++) {
|
||||
auto& pixels = _faces[face];
|
||||
|
||||
pixels[0] = pixels[1];
|
||||
pixels[_dims.x + 1] = pixels[_dims.x];
|
||||
pixels[(_dims.y + 1)*(_dims.x + 2)] = pixels[(_dims.y + 1)*(_dims.x + 2) + 1];
|
||||
pixels[(_dims.y + 2)*(_dims.x + 2) - 1] = pixels[(_dims.y + 2)*(_dims.x + 2) - 2];
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
Faces& _faces;
|
||||
|
||||
inline static void copy(CubeMap::Face::const_iterator srcFirst, CubeMap::Face::const_iterator srcLast, size_t srcStride, CubeMap::Face::iterator dstBegin, size_t dstStride) {
|
||||
while (srcFirst <= srcLast) {
|
||||
*dstBegin = *srcFirst;
|
||||
srcFirst += srcStride;
|
||||
dstBegin += dstStride;
|
||||
}
|
||||
}
|
||||
|
||||
static std::pair<int, int> getSrcAndDst(int dim, int value) {
|
||||
int src;
|
||||
int dst;
|
||||
|
||||
if (value < 0) {
|
||||
src = 1;
|
||||
dst = 0;
|
||||
} else if (value >= dim) {
|
||||
src = dim;
|
||||
dst = dim + 1;
|
||||
}
|
||||
return std::make_pair(src, dst);
|
||||
}
|
||||
|
||||
void seamColumnAndColumn(int face0, int col0, int face1, int col1, int inc) {
|
||||
auto coords0 = getSrcAndDst(_dims.x, col0);
|
||||
auto coords1 = getSrcAndDst(_dims.x, col1);
|
||||
|
||||
copyColumnToColumn(face0, coords0.first, face1, coords1.second, inc);
|
||||
copyColumnToColumn(face1, coords1.first, face0, coords0.second, inc);
|
||||
}
|
||||
|
||||
void seamColumnAndRow(int face0, int col0, int face1, int row1, int inc) {
|
||||
auto coords0 = getSrcAndDst(_dims.x, col0);
|
||||
auto coords1 = getSrcAndDst(_dims.y, row1);
|
||||
|
||||
copyColumnToRow(face0, coords0.first, face1, coords1.second, inc);
|
||||
copyRowToColumn(face1, coords1.first, face0, coords0.second, inc);
|
||||
}
|
||||
|
||||
void seamRowAndRow(int face0, int row0, int face1, int row1, int inc) {
|
||||
auto coords0 = getSrcAndDst(_dims.y, row0);
|
||||
auto coords1 = getSrcAndDst(_dims.y, row1);
|
||||
|
||||
copyRowToRow(face0, coords0.first, face1, coords1.second, inc);
|
||||
copyRowToRow(face1, coords1.first, face0, coords0.second, inc);
|
||||
}
|
||||
|
||||
void copyColumnToColumn(int srcFace, int srcCol, int dstFace, int dstCol, const int dstInc) {
|
||||
const auto lastOffset = _lineStride * (_dims.y - 1);
|
||||
auto srcFirst = _faces[srcFace].begin() + srcCol + _lineStride;
|
||||
auto srcLast = srcFirst + lastOffset;
|
||||
|
||||
auto dstFirst = _faces[dstFace].begin() + dstCol + _lineStride;
|
||||
auto dstLast = dstFirst + lastOffset;
|
||||
const auto dstStride = _lineStride * dstInc;
|
||||
|
||||
assert(srcFirst < _faces[srcFace].end());
|
||||
assert(srcLast < _faces[srcFace].end());
|
||||
assert(dstFirst < _faces[dstFace].end());
|
||||
assert(dstLast < _faces[dstFace].end());
|
||||
|
||||
if (dstInc < 0) {
|
||||
std::swap(dstFirst, dstLast);
|
||||
}
|
||||
|
||||
copy(srcFirst, srcLast, _lineStride, dstFirst, dstStride);
|
||||
}
|
||||
|
||||
void copyRowToRow(int srcFace, int srcRow, int dstFace, int dstRow, const int dstInc) {
|
||||
const auto lastOffset =(_dims.x - 1);
|
||||
auto srcFirst = _faces[srcFace].begin() + srcRow * _lineStride + 1;
|
||||
auto srcLast = srcFirst + lastOffset;
|
||||
|
||||
auto dstFirst = _faces[dstFace].begin() + dstRow * _lineStride + 1;
|
||||
auto dstLast = dstFirst + lastOffset;
|
||||
|
||||
assert(srcFirst < _faces[srcFace].end());
|
||||
assert(srcLast < _faces[srcFace].end());
|
||||
assert(dstFirst < _faces[dstFace].end());
|
||||
assert(dstLast < _faces[dstFace].end());
|
||||
|
||||
if (dstInc < 0) {
|
||||
std::swap(dstFirst, dstLast);
|
||||
}
|
||||
|
||||
copy(srcFirst, srcLast, 1, dstFirst, dstInc);
|
||||
}
|
||||
|
||||
void copyColumnToRow(int srcFace, int srcCol, int dstFace, int dstRow, int dstInc) {
|
||||
const auto srcLastOffset = _lineStride * (_dims.y - 1);
|
||||
auto srcFirst = _faces[srcFace].begin() + srcCol + _lineStride;
|
||||
auto srcLast = srcFirst + srcLastOffset;
|
||||
|
||||
const auto dstLastOffset = (_dims.x - 1);
|
||||
auto dstFirst = _faces[dstFace].begin() + dstRow * _lineStride + 1;
|
||||
auto dstLast = dstFirst + dstLastOffset;
|
||||
|
||||
assert(srcFirst < _faces[srcFace].end());
|
||||
assert(srcLast < _faces[srcFace].end());
|
||||
assert(dstFirst < _faces[dstFace].end());
|
||||
assert(dstLast < _faces[dstFace].end());
|
||||
|
||||
if (dstInc < 0) {
|
||||
std::swap(dstFirst, dstLast);
|
||||
}
|
||||
|
||||
copy(srcFirst, srcLast, _lineStride, dstFirst, dstInc);
|
||||
}
|
||||
|
||||
void copyRowToColumn(int srcFace, int srcRow, int dstFace, int dstCol, int dstInc) {
|
||||
const auto srcLastOffset = (_dims.x - 1);
|
||||
auto srcFirst = _faces[srcFace].begin() + srcRow * _lineStride + 1;
|
||||
auto srcLast = srcFirst + srcLastOffset;
|
||||
|
||||
const auto dstLastOffset = _lineStride * (_dims.y - 1);
|
||||
auto dstFirst = _faces[dstFace].begin() + dstCol + _lineStride;
|
||||
auto dstLast = dstFirst + dstLastOffset;
|
||||
const auto dstStride = _lineStride * dstInc;
|
||||
|
||||
assert(srcFirst < _faces[srcFace].end());
|
||||
assert(srcLast < _faces[srcFace].end());
|
||||
assert(dstFirst < _faces[dstFace].end());
|
||||
assert(dstLast < _faces[dstFace].end());
|
||||
|
||||
if (dstInc < 0) {
|
||||
std::swap(dstFirst, dstLast);
|
||||
}
|
||||
|
||||
copy(srcFirst, srcLast, 1, dstFirst, dstStride);
|
||||
}
|
||||
};
|
||||
|
||||
static void copySurface(const nvtt::Surface& source, glm::vec4* dest, size_t dstLineStride) {
|
||||
const float* srcRedIt = source.channel(0);
|
||||
const float* srcGreenIt = source.channel(1);
|
||||
const float* srcBlueIt = source.channel(2);
|
||||
const float* srcAlphaIt = source.channel(3);
|
||||
|
||||
for (int y = 0; y < source.height(); y++) {
|
||||
glm::vec4* dstColIt = dest;
|
||||
for (int x = 0; x < source.width(); x++) {
|
||||
*dstColIt = glm::vec4(*srcRedIt, *srcGreenIt, *srcBlueIt, *srcAlphaIt);
|
||||
dstColIt++;
|
||||
srcRedIt++;
|
||||
srcGreenIt++;
|
||||
srcBlueIt++;
|
||||
srcAlphaIt++;
|
||||
}
|
||||
dest += dstLineStride;
|
||||
}
|
||||
}
|
||||
|
||||
CubeMap::CubeMap(int width, int height, int mipCount) {
|
||||
reset(width, height, mipCount);
|
||||
}
|
||||
|
||||
CubeMap::CubeMap(const std::vector<Image>& faces, int mipCount, const std::atomic<bool>& abortProcessing) {
|
||||
reset(faces.front().getWidth(), faces.front().getHeight(), mipCount);
|
||||
|
||||
int face;
|
||||
|
||||
nvtt::Surface surface;
|
||||
surface.setAlphaMode(nvtt::AlphaMode_None);
|
||||
surface.setWrapMode(nvtt::WrapMode_Mirror);
|
||||
|
||||
// Compute mips
|
||||
for (face = 0; face < 6; face++) {
|
||||
Image faceImage = faces[face].getConvertedToFormat(Image::Format_RGBAF);
|
||||
|
||||
surface.setImage(nvtt::InputFormat_RGBA_32F, _width, _height, 1, faceImage.editBits());
|
||||
|
||||
auto mipLevel = 0;
|
||||
copySurface(surface, editFace(0, face), getMipLineStride(0));
|
||||
|
||||
while (surface.canMakeNextMipmap() && !abortProcessing.load()) {
|
||||
surface.buildNextMipmap(nvtt::MipmapFilter_Box);
|
||||
mipLevel++;
|
||||
|
||||
copySurface(surface, editFace(mipLevel, face), getMipLineStride(mipLevel));
|
||||
}
|
||||
}
|
||||
|
||||
if (abortProcessing.load()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (gpu::uint16 mipLevel = 0; mipLevel < mipCount; ++mipLevel) {
|
||||
Mip mip(mipLevel, this);
|
||||
mip.applySeams();
|
||||
}
|
||||
}
|
||||
|
||||
void CubeMap::applyGamma(float value) {
|
||||
for (auto& mip : _mips) {
|
||||
for (auto& face : mip) {
|
||||
for (auto& pixel : face) {
|
||||
pixel.r = std::pow(pixel.r, value);
|
||||
pixel.g = std::pow(pixel.g, value);
|
||||
pixel.b = std::pow(pixel.b, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CubeMap::copyFace(int width, int height, const glm::vec4* source, size_t srcLineStride, glm::vec4* dest, size_t dstLineStride) {
|
||||
for (int y = 0; y < height; y++) {
|
||||
std::copy(source, source + width, dest);
|
||||
source += srcLineStride;
|
||||
dest += dstLineStride;
|
||||
}
|
||||
}
|
||||
|
||||
Image CubeMap::getFaceImage(gpu::uint16 mipLevel, int face) const {
|
||||
auto mipDims = getMipDimensions(mipLevel);
|
||||
Image faceImage(mipDims.x, mipDims.y, Image::Format_RGBAF);
|
||||
copyFace(mipDims.x, mipDims.y, getFace(mipLevel, face), getMipLineStride(mipLevel), (glm::vec4*)faceImage.editBits(), faceImage.getBytesPerLineCount() / sizeof(glm::vec4));
|
||||
return faceImage;
|
||||
}
|
||||
|
||||
void CubeMap::reset(int width, int height, int mipCount) {
|
||||
assert(mipCount >0 && width > 0 && height > 0);
|
||||
_width = width;
|
||||
_height = height;
|
||||
_mips.resize(mipCount);
|
||||
for (auto mipLevel = 0; mipLevel < mipCount; mipLevel++) {
|
||||
auto mipDimensions = getMipDimensions(mipLevel);
|
||||
// Add extra pixels on edges to perform edge seam fixup (we will duplicate pixels from
|
||||
// neighbouring faces)
|
||||
auto mipPixelCount = (mipDimensions.x + 2 * EDGE_WIDTH) * (mipDimensions.y + 2 * EDGE_WIDTH);
|
||||
|
||||
for (auto& face : _mips[mipLevel]) {
|
||||
face.resize(mipPixelCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CubeMap::copyTo(CubeMap& other) const {
|
||||
other._width = _width;
|
||||
other._height = _height;
|
||||
other._mips = _mips;
|
||||
}
|
||||
|
||||
void CubeMap::getFaceUV(const glm::vec3& dir, int* index, glm::vec2* uv) {
|
||||
// Taken from https://en.wikipedia.org/wiki/Cube_mapping
|
||||
float absX = std::abs(dir.x);
|
||||
float absY = std::abs(dir.y);
|
||||
float absZ = std::abs(dir.z);
|
||||
|
||||
auto isXPositive = dir.x > 0;
|
||||
auto isYPositive = dir.y > 0;
|
||||
auto isZPositive = dir.z > 0;
|
||||
|
||||
float maxAxis = 1.0f;
|
||||
float uc = 0.0f;
|
||||
float vc = 0.0f;
|
||||
|
||||
// POSITIVE X
|
||||
if (isXPositive && absX >= absY && absX >= absZ) {
|
||||
// u (0 to 1) goes from +z to -z
|
||||
// v (0 to 1) goes from -y to +y
|
||||
maxAxis = absX;
|
||||
uc = -dir.z;
|
||||
vc = -dir.y;
|
||||
*index = 0;
|
||||
}
|
||||
// NEGATIVE X
|
||||
else if (!isXPositive && absX >= absY && absX >= absZ) {
|
||||
// u (0 to 1) goes from -z to +z
|
||||
// v (0 to 1) goes from -y to +y
|
||||
maxAxis = absX;
|
||||
uc = dir.z;
|
||||
vc = -dir.y;
|
||||
*index = 1;
|
||||
}
|
||||
// POSITIVE Y
|
||||
else if (isYPositive && absY >= absX && absY >= absZ) {
|
||||
// u (0 to 1) goes from -x to +x
|
||||
// v (0 to 1) goes from +z to -z
|
||||
maxAxis = absY;
|
||||
uc = dir.x;
|
||||
vc = dir.z;
|
||||
*index = 2;
|
||||
}
|
||||
// NEGATIVE Y
|
||||
else if (!isYPositive && absY >= absX && absY >= absZ) {
|
||||
// u (0 to 1) goes from -x to +x
|
||||
// v (0 to 1) goes from -z to +z
|
||||
maxAxis = absY;
|
||||
uc = dir.x;
|
||||
vc = -dir.z;
|
||||
*index = 3;
|
||||
}
|
||||
// POSITIVE Z
|
||||
else if (isZPositive && absZ >= absX && absZ >= absY) {
|
||||
// u (0 to 1) goes from -x to +x
|
||||
// v (0 to 1) goes from -y to +y
|
||||
maxAxis = absZ;
|
||||
uc = dir.x;
|
||||
vc = -dir.y;
|
||||
*index = 4;
|
||||
}
|
||||
// NEGATIVE Z
|
||||
else if (!isZPositive && absZ >= absX && absZ >= absY) {
|
||||
// u (0 to 1) goes from +x to -x
|
||||
// v (0 to 1) goes from -y to +y
|
||||
maxAxis = absZ;
|
||||
uc = -dir.x;
|
||||
vc = -dir.y;
|
||||
*index = 5;
|
||||
}
|
||||
|
||||
// Convert range from -1 to 1 to 0 to 1
|
||||
uv->x = 0.5f * (uc / maxAxis + 1.0f);
|
||||
uv->y = 0.5f * (vc / maxAxis + 1.0f);
|
||||
}
|
||||
|
||||
glm::vec4 CubeMap::fetchLod(const glm::vec3& dir, float lod) const {
|
||||
lod = glm::clamp<float>(lod, 0.0f, _mips.size() - 1);
|
||||
|
||||
gpu::uint16 loLevel = (gpu::uint16)std::floor(lod);
|
||||
gpu::uint16 hiLevel = (gpu::uint16)std::ceil(lod);
|
||||
float lodFrac = lod - (float)loLevel;
|
||||
ConstMip loMip(loLevel, this);
|
||||
ConstMip hiMip(hiLevel, this);
|
||||
int face;
|
||||
glm::vec2 uv;
|
||||
glm::vec4 loColor;
|
||||
glm::vec4 hiColor;
|
||||
|
||||
getFaceUV(dir, &face, &uv);
|
||||
|
||||
loColor = loMip.fetch(face, uv);
|
||||
hiColor = hiMip.fetch(face, uv);
|
||||
|
||||
return loColor + (hiColor - loColor) * lodFrac;
|
||||
}
|
||||
|
||||
struct CubeMap::GGXSamples {
|
||||
float invTotalWeight;
|
||||
std::vector<glm::vec4> points;
|
||||
};
|
||||
|
||||
// All the GGX convolution code is inspired from:
|
||||
// https://placeholderart.wordpress.com/2015/07/28/implementation-notes-runtime-environment-map-filtering-for-image-based-lighting/
|
||||
// Computation is done in tangent space so normal is always (0,0,1) which simplifies a lot of things
|
||||
|
||||
void CubeMap::generateGGXSamples(GGXSamples& data, float roughness, const int resolution) {
|
||||
glm::vec2 xi;
|
||||
glm::vec3 L;
|
||||
glm::vec3 H;
|
||||
const float saTexel = (float)(4.0 * M_PI / (6.0 * resolution * resolution));
|
||||
const float mipBias = 3.0f;
|
||||
const auto sampleCount = data.points.size();
|
||||
const auto hammersleySequenceLength = data.points.size();
|
||||
size_t sampleIndex = 0;
|
||||
size_t hammersleySampleIndex = 0;
|
||||
float NdotL;
|
||||
|
||||
data.invTotalWeight = 0.0f;
|
||||
|
||||
// Do some computation in tangent space
|
||||
while (sampleIndex < sampleCount) {
|
||||
if (hammersleySampleIndex < hammersleySequenceLength) {
|
||||
xi = hammersley::evaluate((int)hammersleySampleIndex, (int)hammersleySequenceLength);
|
||||
H = ggx::sample(xi, roughness);
|
||||
L = H * (2.0f * H.z) - glm::vec3(0.0f, 0.0f, 1.0f);
|
||||
NdotL = L.z;
|
||||
hammersleySampleIndex++;
|
||||
} else {
|
||||
NdotL = -1.0f;
|
||||
}
|
||||
|
||||
while (NdotL <= 0.0f) {
|
||||
// Create a purely random sample
|
||||
xi.x = rand() / float(RAND_MAX);
|
||||
xi.y = rand() / float(RAND_MAX);
|
||||
H = ggx::sample(xi, roughness);
|
||||
L = H * (2.0f * H.z) - glm::vec3(0.0f, 0.0f, 1.0f);
|
||||
NdotL = L.z;
|
||||
}
|
||||
|
||||
float NdotH = std::max(0.0f, H.z);
|
||||
float HdotV = NdotH;
|
||||
float D = ggx::evaluate(NdotH, roughness);
|
||||
float pdf = (D * NdotH / (4.0f * HdotV)) + 0.0001f;
|
||||
float saSample = 1.0f / (float(sampleCount) * pdf + 0.0001f);
|
||||
float mipLevel = std::max(0.5f * std::log2(saSample / saTexel) + mipBias, 0.0f);
|
||||
|
||||
auto& sample = data.points[sampleIndex];
|
||||
sample.x = L.x;
|
||||
sample.y = L.y;
|
||||
sample.z = L.z;
|
||||
sample.w = mipLevel;
|
||||
|
||||
data.invTotalWeight += NdotL;
|
||||
|
||||
sampleIndex++;
|
||||
}
|
||||
data.invTotalWeight = 1.0f / data.invTotalWeight;
|
||||
}
|
||||
|
||||
void CubeMap::convolveForGGX(CubeMap& output, const std::atomic<bool>& abortProcessing) const {
|
||||
// This should match the value in the getMipLevelFromRoughness function (LightAmbient.slh)
|
||||
static const float ROUGHNESS_1_MIP_RESOLUTION = 1.5f;
|
||||
static const size_t MAX_SAMPLE_COUNT = 4000;
|
||||
|
||||
const auto mipCount = getMipCount();
|
||||
GGXSamples params;
|
||||
|
||||
params.points.reserve(MAX_SAMPLE_COUNT);
|
||||
|
||||
for (gpu::uint16 mipLevel = 0; mipLevel < mipCount; ++mipLevel) {
|
||||
// This is the inverse code found in LightAmbient.slh in getMipLevelFromRoughness
|
||||
float levelAlpha = float(mipLevel) / (mipCount - ROUGHNESS_1_MIP_RESOLUTION);
|
||||
float mipRoughness = levelAlpha * (1.0f + 2.0f * levelAlpha) / 3.0f;
|
||||
|
||||
mipRoughness = std::max(1e-3f, mipRoughness);
|
||||
mipRoughness = std::min(1.0f, mipRoughness);
|
||||
|
||||
size_t mipTotalPixelCount = getMipWidth(mipLevel) * getMipHeight(mipLevel) * 6;
|
||||
size_t sampleCount = 1U + size_t(4000 * mipRoughness * mipRoughness);
|
||||
|
||||
sampleCount = std::min(sampleCount, 2 * mipTotalPixelCount);
|
||||
sampleCount = std::min(MAX_SAMPLE_COUNT, sampleCount);
|
||||
|
||||
params.points.resize(sampleCount);
|
||||
generateGGXSamples(params, mipRoughness, _width);
|
||||
|
||||
for (int face = 0; face < 6; face++) {
|
||||
convolveMipFaceForGGX(params, output, mipLevel, face, abortProcessing);
|
||||
if (abortProcessing.load()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CubeMap::convolveMipFaceForGGX(const GGXSamples& samples, CubeMap& output, gpu::uint16 mipLevel, int face, const std::atomic<bool>& abortProcessing) const {
|
||||
const glm::vec3* faceNormals = FACE_NORMALS + face * 4;
|
||||
const glm::vec3 deltaYNormalLo = faceNormals[2] - faceNormals[0];
|
||||
const glm::vec3 deltaYNormalHi = faceNormals[3] - faceNormals[1];
|
||||
const auto mipDimensions = output.getMipDimensions(mipLevel);
|
||||
const auto outputLineStride = output.getMipLineStride(mipLevel);
|
||||
auto outputFacePixels = output.editFace(mipLevel, face);
|
||||
|
||||
tbb::parallel_for(tbb::blocked_range2d<int, int>(0, mipDimensions.y, 32, 0, mipDimensions.x, 32), [&](const tbb::blocked_range2d<int, int>& range) {
|
||||
auto rowRange = range.rows();
|
||||
auto colRange = range.cols();
|
||||
|
||||
for (auto y = rowRange.begin(); y < rowRange.end(); y++) {
|
||||
if (abortProcessing.load()) {
|
||||
break;
|
||||
}
|
||||
|
||||
const float yAlpha = (y + 0.5f) / mipDimensions.y;
|
||||
const glm::vec3 normalXLo = faceNormals[0] + deltaYNormalLo * yAlpha;
|
||||
const glm::vec3 normalXHi = faceNormals[1] + deltaYNormalHi * yAlpha;
|
||||
const glm::vec3 deltaXNormal = normalXHi - normalXLo;
|
||||
|
||||
for (auto x = colRange.begin(); x < colRange.end(); x++) {
|
||||
const float xAlpha = (x + 0.5f) / mipDimensions.x;
|
||||
// Interpolate normal for this pixel
|
||||
const glm::vec3 normal = glm::normalize(normalXLo + deltaXNormal * xAlpha);
|
||||
|
||||
outputFacePixels[x + y * outputLineStride] = computeConvolution(normal, samples);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
glm::vec4 CubeMap::computeConvolution(const glm::vec3& N, const GGXSamples& samples) const {
|
||||
// from tangent-space vector to world-space
|
||||
glm::vec3 bitangent = std::abs(N.z) < 0.999f ? glm::vec3(0.0f, 0.0f, 1.0f) : glm::vec3(1.0f, 0.0f, 0.0f);
|
||||
glm::vec3 tangent = glm::normalize(glm::cross(bitangent, N));
|
||||
bitangent = glm::cross(N, tangent);
|
||||
|
||||
const size_t sampleCount = samples.points.size();
|
||||
glm::vec4 prefilteredColor = glm::vec4(0.0f);
|
||||
|
||||
for (size_t i = 0; i < sampleCount; ++i) {
|
||||
const auto& sample = samples.points[i];
|
||||
glm::vec3 L(sample.x, sample.y, sample.z);
|
||||
float NdotL = L.z;
|
||||
float mipLevel = sample.w;
|
||||
// Now back to world space
|
||||
L = tangent * L.x + bitangent * L.y + N * L.z;
|
||||
prefilteredColor += fetchLod(L, mipLevel) * NdotL;
|
||||
}
|
||||
prefilteredColor = prefilteredColor * samples.invTotalWeight;
|
||||
prefilteredColor.a = 1.0f;
|
||||
return prefilteredColor;
|
||||
}
|
92
libraries/image/src/image/CubeMap.h
Normal file
92
libraries/image/src/image/CubeMap.h
Normal file
|
@ -0,0 +1,92 @@
|
|||
//
|
||||
// CubeMap.h
|
||||
// image/src/image
|
||||
//
|
||||
// Created by Olivier Prat on 03/27/2019.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_image_CubeMap_h
|
||||
#define hifi_image_CubeMap_h
|
||||
|
||||
#include <gpu/Texture.h>
|
||||
#include <glm/vec4.hpp>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
|
||||
#include "Image.h"
|
||||
|
||||
namespace image {
|
||||
|
||||
class CubeMap {
|
||||
|
||||
enum {
|
||||
EDGE_WIDTH = 1
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
CubeMap(int width, int height, int mipCount);
|
||||
CubeMap(const std::vector<Image>& faces, int mipCount, const std::atomic<bool>& abortProcessing = false);
|
||||
|
||||
void reset(int width, int height, int mipCount);
|
||||
void copyTo(CubeMap& other) const;
|
||||
|
||||
void applyGamma(float value);
|
||||
|
||||
gpu::uint16 getMipCount() const { return (gpu::uint16)_mips.size(); }
|
||||
int getMipWidth(gpu::uint16 mipLevel) const {
|
||||
return std::max(1, _width >> mipLevel);
|
||||
}
|
||||
int getMipHeight(gpu::uint16 mipLevel) const {
|
||||
return std::max(1, _height >> mipLevel);
|
||||
}
|
||||
gpu::Vec2i getMipDimensions(gpu::uint16 mipLevel) const {
|
||||
return gpu::Vec2i(getMipWidth(mipLevel), getMipHeight(mipLevel));
|
||||
}
|
||||
|
||||
size_t getMipLineStride(gpu::uint16 mipLevel) const {
|
||||
return getMipWidth(mipLevel) + 2 * EDGE_WIDTH;
|
||||
}
|
||||
|
||||
glm::vec4* editFace(gpu::uint16 mipLevel, int face) {
|
||||
return _mips[mipLevel][face].data() + (getMipLineStride(mipLevel) + 1)*EDGE_WIDTH;
|
||||
}
|
||||
|
||||
const glm::vec4* getFace(gpu::uint16 mipLevel, int face) const {
|
||||
return _mips[mipLevel][face].data() + (getMipLineStride(mipLevel) + 1)*EDGE_WIDTH;
|
||||
}
|
||||
|
||||
Image getFaceImage(gpu::uint16 mipLevel, int face) const;
|
||||
|
||||
void convolveForGGX(CubeMap& output, const std::atomic<bool>& abortProcessing) const;
|
||||
glm::vec4 fetchLod(const glm::vec3& dir, float lod) const;
|
||||
|
||||
private:
|
||||
|
||||
struct GGXSamples;
|
||||
class Mip;
|
||||
class ConstMip;
|
||||
|
||||
using Face = std::vector<glm::vec4>;
|
||||
using Faces = std::array<Face, 6>;
|
||||
|
||||
int _width;
|
||||
int _height;
|
||||
std::vector<Faces> _mips;
|
||||
|
||||
static void getFaceUV(const glm::vec3& dir, int* index, glm::vec2* uv);
|
||||
static void generateGGXSamples(GGXSamples& data, float roughness, const int resolution);
|
||||
static void copyFace(int width, int height, const glm::vec4* source, size_t srcLineStride, glm::vec4* dest, size_t dstLineStride);
|
||||
void convolveMipFaceForGGX(const GGXSamples& samples, CubeMap& output, gpu::uint16 mipLevel, int face, const std::atomic<bool>& abortProcessing) const;
|
||||
glm::vec4 computeConvolution(const glm::vec3& normal, const GGXSamples& samples) const;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // hifi_image_CubeMap_h
|
|
@ -6,28 +6,91 @@
|
|||
|
||||
using namespace image;
|
||||
|
||||
Image::Image(int width, int height, Format format) :
|
||||
_dims(width, height),
|
||||
_format(format) {
|
||||
if (_format == Format_RGBAF) {
|
||||
_floatData.resize(width*height);
|
||||
} else {
|
||||
_packedData = QImage(width, height, (QImage::Format)format);
|
||||
}
|
||||
}
|
||||
|
||||
size_t Image::getByteCount() const {
|
||||
if (_format == Format_RGBAF) {
|
||||
return sizeof(FloatPixels::value_type) * _floatData.size();
|
||||
} else {
|
||||
return _packedData.byteCount();
|
||||
}
|
||||
}
|
||||
|
||||
size_t Image::getBytesPerLineCount() const {
|
||||
if (_format == Format_RGBAF) {
|
||||
return sizeof(FloatPixels::value_type) * _dims.x;
|
||||
} else {
|
||||
return _packedData.bytesPerLine();
|
||||
}
|
||||
}
|
||||
|
||||
glm::uint8* Image::editScanLine(int y) {
|
||||
if (_format == Format_RGBAF) {
|
||||
return reinterpret_cast<glm::uint8*>(_floatData.data() + y * _dims.x);
|
||||
} else {
|
||||
return _packedData.scanLine(y);
|
||||
}
|
||||
}
|
||||
|
||||
const glm::uint8* Image::getScanLine(int y) const {
|
||||
if (_format == Format_RGBAF) {
|
||||
return reinterpret_cast<const glm::uint8*>(_floatData.data() + y * _dims.x);
|
||||
} else {
|
||||
return _packedData.scanLine(y);
|
||||
}
|
||||
}
|
||||
|
||||
glm::uint8* Image::editBits() {
|
||||
if (_format == Format_RGBAF) {
|
||||
return reinterpret_cast<glm::uint8*>(_floatData.data());
|
||||
} else {
|
||||
return _packedData.bits();
|
||||
}
|
||||
}
|
||||
|
||||
const glm::uint8* Image::getBits() const {
|
||||
if (_format == Format_RGBAF) {
|
||||
return reinterpret_cast<const glm::uint8*>(_floatData.data());
|
||||
} else {
|
||||
return _packedData.bits();
|
||||
}
|
||||
}
|
||||
|
||||
Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, TransformationMode transformMode) const {
|
||||
if ((Image::Format)_data.format() == Image::Format_PACKED_FLOAT) {
|
||||
// Start by converting to full float
|
||||
glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()];
|
||||
auto unpackFunc = getHDRUnpackingFunction();
|
||||
auto floatDataIt = floatPixels;
|
||||
for (glm::uint32 lineNb = 0; lineNb < getHeight(); lineNb++) {
|
||||
const glm::uint32* srcPixelIt = reinterpret_cast<const glm::uint32*>(getScanLine((int)lineNb));
|
||||
const glm::uint32* srcPixelEnd = srcPixelIt + getWidth();
|
||||
|
||||
while (srcPixelIt < srcPixelEnd) {
|
||||
*floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f);
|
||||
++srcPixelIt;
|
||||
++floatDataIt;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform filtered resize with NVTT
|
||||
static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats");
|
||||
if (_format == Format_PACKED_FLOAT || _format == Format_RGBAF) {
|
||||
nvtt::Surface surface;
|
||||
surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels);
|
||||
delete[] floatPixels;
|
||||
|
||||
if (_format == Format_RGBAF) {
|
||||
surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, _floatData.data());
|
||||
} else {
|
||||
// Start by converting to full float
|
||||
glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()];
|
||||
auto unpackFunc = getHDRUnpackingFunction();
|
||||
auto floatDataIt = floatPixels;
|
||||
for (glm::uint32 lineNb = 0; lineNb < getHeight(); lineNb++) {
|
||||
const glm::uint32* srcPixelIt = reinterpret_cast<const glm::uint32*>(getScanLine((int)lineNb));
|
||||
const glm::uint32* srcPixelEnd = srcPixelIt + getWidth();
|
||||
|
||||
while (srcPixelIt < srcPixelEnd) {
|
||||
*floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f);
|
||||
++srcPixelIt;
|
||||
++floatDataIt;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform filtered resize with NVTT
|
||||
static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats");
|
||||
surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels);
|
||||
delete[] floatPixels;
|
||||
}
|
||||
|
||||
nvtt::ResizeFilter filter = nvtt::ResizeFilter_Kaiser;
|
||||
if (transformMode == Qt::TransformationMode::FastTransformation) {
|
||||
|
@ -35,44 +98,148 @@ Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, Transforma
|
|||
}
|
||||
surface.resize(dstSize.x, dstSize.y, 1, filter);
|
||||
|
||||
// And convert back to original format
|
||||
QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT);
|
||||
|
||||
auto packFunc = getHDRPackingFunction();
|
||||
auto srcRedIt = reinterpret_cast<const float*>(surface.channel(0));
|
||||
auto srcGreenIt = reinterpret_cast<const float*>(surface.channel(1));
|
||||
auto srcBlueIt = reinterpret_cast<const float*>(surface.channel(2));
|
||||
for (glm::uint32 lineNb = 0; lineNb < dstSize.y; lineNb++) {
|
||||
glm::uint32* dstPixelIt = reinterpret_cast<glm::uint32*>(resizedImage.scanLine((int)lineNb));
|
||||
glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x;
|
||||
auto srcAlphaIt = reinterpret_cast<const float*>(surface.channel(3));
|
||||
|
||||
if (_format == Format_RGBAF) {
|
||||
Image output(_dims.x, _dims.y, _format);
|
||||
auto dstPixelIt = output._floatData.begin();
|
||||
auto dstPixelEnd = output._floatData.end();
|
||||
|
||||
while (dstPixelIt < dstPixelEnd) {
|
||||
*dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt));
|
||||
*dstPixelIt = glm::vec4(*srcRedIt, *srcGreenIt, *srcBlueIt, *srcAlphaIt);
|
||||
++srcRedIt;
|
||||
++srcGreenIt;
|
||||
++srcBlueIt;
|
||||
++srcAlphaIt;
|
||||
|
||||
++dstPixelIt;
|
||||
}
|
||||
|
||||
return output;
|
||||
} else {
|
||||
// And convert back to original format
|
||||
QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT);
|
||||
|
||||
auto packFunc = getHDRPackingFunction();
|
||||
for (glm::uint32 lineNb = 0; lineNb < dstSize.y; lineNb++) {
|
||||
glm::uint32* dstPixelIt = reinterpret_cast<glm::uint32*>(resizedImage.scanLine((int)lineNb));
|
||||
glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x;
|
||||
|
||||
while (dstPixelIt < dstPixelEnd) {
|
||||
*dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt));
|
||||
++srcRedIt;
|
||||
++srcGreenIt;
|
||||
++srcBlueIt;
|
||||
++dstPixelIt;
|
||||
}
|
||||
}
|
||||
return resizedImage;
|
||||
}
|
||||
return resizedImage;
|
||||
} else {
|
||||
return _data.scaled(fromGlm(dstSize), ratioMode, transformMode);
|
||||
return _packedData.scaled(fromGlm(dstSize), ratioMode, transformMode);
|
||||
}
|
||||
}
|
||||
|
||||
Image Image::getConvertedToFormat(Format newFormat) const {
|
||||
assert(getFormat() != Format_PACKED_FLOAT);
|
||||
return _data.convertToFormat((QImage::Format)newFormat);
|
||||
const float MAX_COLOR_VALUE = 255.0f;
|
||||
|
||||
if (newFormat == _format) {
|
||||
return *this;
|
||||
} else if ((_format != Format_R11G11B10F && _format != Format_RGBAF) && (newFormat != Format_R11G11B10F && newFormat != Format_RGBAF)) {
|
||||
return _packedData.convertToFormat((QImage::Format)newFormat);
|
||||
} else if (_format == Format_PACKED_FLOAT) {
|
||||
Image newImage(_dims.x, _dims.y, newFormat);
|
||||
|
||||
switch (newFormat) {
|
||||
case Format_RGBAF:
|
||||
convertToFloatFromPacked(getBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, newImage._floatData.data(), _dims.x);
|
||||
break;
|
||||
|
||||
default:
|
||||
{
|
||||
auto unpackFunc = getHDRUnpackingFunction();
|
||||
const glm::uint32* srcIt = reinterpret_cast<const glm::uint32*>(getBits());
|
||||
|
||||
for (int y = 0; y < _dims.y; y++) {
|
||||
for (int x = 0; x < _dims.x; x++) {
|
||||
auto color = glm::clamp(unpackFunc(*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f);
|
||||
newImage.setPackedPixel(x, y, qRgb(color.r, color.g, color.b));
|
||||
srcIt++;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return newImage;
|
||||
} else if (_format == Format_RGBAF) {
|
||||
Image newImage(_dims.x, _dims.y, newFormat);
|
||||
|
||||
switch (newFormat) {
|
||||
case Format_R11G11B10F:
|
||||
convertToPackedFromFloat(newImage.editBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, _floatData.data(), _dims.x);
|
||||
break;
|
||||
|
||||
default:
|
||||
{
|
||||
FloatPixels::const_iterator srcIt = _floatData.begin();
|
||||
|
||||
for (int y = 0; y < _dims.y; y++) {
|
||||
for (int x = 0; x < _dims.x; x++) {
|
||||
auto color = glm::clamp((*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f);
|
||||
newImage.setPackedPixel(x, y, qRgba(color.r, color.g, color.b, color.a));
|
||||
srcIt++;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
return newImage;
|
||||
} else {
|
||||
Image newImage(_dims.x, _dims.y, newFormat);
|
||||
assert(newImage.hasFloatFormat());
|
||||
|
||||
if (newFormat == Format_RGBAF) {
|
||||
FloatPixels::iterator dstIt = newImage._floatData.begin();
|
||||
|
||||
for (int y = 0; y < _dims.y; y++) {
|
||||
auto line = (const QRgb*)getScanLine(y);
|
||||
for (int x = 0; x < _dims.x; x++) {
|
||||
QRgb pixel = line[x];
|
||||
*dstIt = glm::vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR_VALUE;
|
||||
dstIt++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
auto packFunc = getHDRPackingFunction();
|
||||
glm::uint32* dstIt = reinterpret_cast<glm::uint32*>( newImage.editBits() );
|
||||
|
||||
for (int y = 0; y < _dims.y; y++) {
|
||||
auto line = (const QRgb*)getScanLine(y);
|
||||
for (int x = 0; x < _dims.x; x++) {
|
||||
QRgb pixel = line[x];
|
||||
*dstIt = packFunc(glm::vec3(qRed(pixel), qGreen(pixel), qBlue(pixel)) / MAX_COLOR_VALUE);
|
||||
dstIt++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return newImage;
|
||||
}
|
||||
}
|
||||
|
||||
void Image::invertPixels() {
|
||||
_data.invertPixels(QImage::InvertRgba);
|
||||
assert(_format != Format_PACKED_FLOAT && _format != Format_RGBAF);
|
||||
_packedData.invertPixels(QImage::InvertRgba);
|
||||
}
|
||||
|
||||
Image Image::getSubImage(QRect rect) const {
|
||||
return _data.copy(rect);
|
||||
assert(_format != Format_RGBAF);
|
||||
return _packedData.copy(rect);
|
||||
}
|
||||
|
||||
Image Image::getMirrored(bool horizontal, bool vertical) const {
|
||||
return _data.mirrored(horizontal, vertical);
|
||||
assert(_format != Format_RGBAF);
|
||||
return _packedData.mirrored(horizontal, vertical);
|
||||
}
|
||||
|
|
|
@ -48,37 +48,69 @@ namespace image {
|
|||
Format_RGBA8888_Premultiplied = QImage::Format_RGBA8888_Premultiplied,
|
||||
Format_Grayscale8 = QImage::Format_Grayscale8,
|
||||
Format_R11G11B10F = QImage::Format_RGB30,
|
||||
Format_PACKED_FLOAT = Format_R11G11B10F
|
||||
Format_PACKED_FLOAT = Format_R11G11B10F,
|
||||
// RGBA 32 bit single precision float per component
|
||||
Format_RGBAF = 100
|
||||
};
|
||||
|
||||
using AspectRatioMode = Qt::AspectRatioMode;
|
||||
using TransformationMode = Qt::TransformationMode;
|
||||
|
||||
Image() {}
|
||||
Image(int width, int height, Format format) : _data(width, height, (QImage::Format)format) {}
|
||||
Image(const QImage& data) : _data(data) {}
|
||||
void operator=(const QImage& image) {
|
||||
_data = image;
|
||||
Image() : _dims(0,0) {}
|
||||
Image(int width, int height, Format format);
|
||||
Image(const QImage& data) : _packedData(data), _dims(data.width(), data.height()), _format((Format)data.format()) {}
|
||||
|
||||
void operator=(const QImage& other) {
|
||||
_packedData = other;
|
||||
_floatData.clear();
|
||||
_dims.x = other.width();
|
||||
_dims.y = other.height();
|
||||
_format = (Format)other.format();
|
||||
}
|
||||
|
||||
bool isNull() const { return _data.isNull(); }
|
||||
|
||||
Format getFormat() const { return (Format)_data.format(); }
|
||||
bool hasAlphaChannel() const { return _data.hasAlphaChannel(); }
|
||||
|
||||
glm::uint32 getWidth() const { return (glm::uint32)_data.width(); }
|
||||
glm::uint32 getHeight() const { return (glm::uint32)_data.height(); }
|
||||
glm::uvec2 getSize() const { return toGlm(_data.size()); }
|
||||
size_t getByteCount() const { return _data.byteCount(); }
|
||||
|
||||
QRgb getPixel(int x, int y) const { return _data.pixel(x, y); }
|
||||
void setPixel(int x, int y, QRgb value) {
|
||||
_data.setPixel(x, y, value);
|
||||
void operator=(const Image& other) {
|
||||
if (&other != this) {
|
||||
_packedData = other._packedData;
|
||||
_floatData = other._floatData;
|
||||
_dims = other._dims;
|
||||
_format = other._format;
|
||||
}
|
||||
}
|
||||
|
||||
glm::uint8* editScanLine(int y) { return _data.scanLine(y); }
|
||||
const glm::uint8* getScanLine(int y) const { return _data.scanLine(y); }
|
||||
const glm::uint8* getBits() const { return _data.constBits(); }
|
||||
bool isNull() const { return _packedData.isNull() && _floatData.empty(); }
|
||||
|
||||
Format getFormat() const { return _format; }
|
||||
bool hasAlphaChannel() const { return _packedData.hasAlphaChannel() || _format == Format_RGBAF; }
|
||||
bool hasFloatFormat() const { return _format == Format_R11G11B10F || _format == Format_RGBAF; }
|
||||
|
||||
glm::uint32 getWidth() const { return (glm::uint32)_dims.x; }
|
||||
glm::uint32 getHeight() const { return (glm::uint32)_dims.y; }
|
||||
glm::uvec2 getSize() const { return glm::uvec2(_dims); }
|
||||
size_t getByteCount() const;
|
||||
size_t getBytesPerLineCount() const;
|
||||
|
||||
QRgb getPackedPixel(int x, int y) const {
|
||||
assert(_format != Format_RGBAF);
|
||||
return _packedData.pixel(x, y);
|
||||
}
|
||||
void setPackedPixel(int x, int y, QRgb value) {
|
||||
assert(_format != Format_RGBAF);
|
||||
_packedData.setPixel(x, y, value);
|
||||
}
|
||||
|
||||
glm::vec4 getFloatPixel(int x, int y) const {
|
||||
assert(_format == Format_RGBAF);
|
||||
return _floatData[x + y*_dims.x];
|
||||
}
|
||||
void setFloatPixel(int x, int y, const glm::vec4& value) {
|
||||
assert(_format == Format_RGBAF);
|
||||
_floatData[x + y * _dims.x] = value;
|
||||
}
|
||||
|
||||
glm::uint8* editScanLine(int y);
|
||||
const glm::uint8* getScanLine(int y) const;
|
||||
glm::uint8* editBits();
|
||||
const glm::uint8* getBits() const;
|
||||
|
||||
Image getScaled(glm::uvec2 newSize, AspectRatioMode ratioMode, TransformationMode transformationMode = Qt::SmoothTransformation) const;
|
||||
Image getConvertedToFormat(Format newFormat) const;
|
||||
|
@ -90,7 +122,13 @@ namespace image {
|
|||
|
||||
private:
|
||||
|
||||
QImage _data;
|
||||
using FloatPixels = std::vector<glm::vec4>;
|
||||
|
||||
// For QImage supported formats
|
||||
QImage _packedData;
|
||||
FloatPixels _floatData;
|
||||
glm::ivec2 _dims;
|
||||
Format _format;
|
||||
};
|
||||
|
||||
} // namespace image
|
||||
|
|
|
@ -29,10 +29,10 @@
|
|||
#include "OpenEXRReader.h"
|
||||
#endif
|
||||
#include "ImageLogging.h"
|
||||
#include "CubeMap.h"
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
#define CPU_MIPMAPS 1
|
||||
#include <nvtt/nvtt.h>
|
||||
|
||||
#undef _CRT_SECURE_NO_WARNINGS
|
||||
|
@ -111,11 +111,13 @@ TextureUsage::TextureLoader TextureUsage::getTextureLoaderForType(Type type, con
|
|||
return image::TextureUsage::createEmissiveTextureFromImage;
|
||||
case LIGHTMAP_TEXTURE:
|
||||
return image::TextureUsage::createLightmapTextureFromImage;
|
||||
case CUBE_TEXTURE:
|
||||
case SKY_TEXTURE:
|
||||
return image::TextureUsage::createCubeTextureFromImage;
|
||||
case AMBIENT_TEXTURE:
|
||||
if (options.value("generateIrradiance", true).toBool()) {
|
||||
return image::TextureUsage::createCubeTextureFromImage;
|
||||
return image::TextureUsage::createAmbientCubeTextureAndIrradianceFromImage;
|
||||
} else {
|
||||
return image::TextureUsage::createCubeTextureFromImageWithoutIrradiance;
|
||||
return image::TextureUsage::createAmbientCubeTextureFromImage;
|
||||
}
|
||||
case BUMP_TEXTURE:
|
||||
return image::TextureUsage::createNormalTextureFromBumpImage;
|
||||
|
@ -186,14 +188,24 @@ gpu::TexturePointer TextureUsage::createMetallicTextureFromImage(Image&& srcImag
|
|||
return process2DTextureGrayscaleFromImage(std::move(srcImage), srcImageName, compress, target, false, abortProcessing);
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureUsage::createCubeTextureFromImage(Image&& srcImage, const std::string& srcImageName,
|
||||
gpu::TexturePointer TextureUsage::createCubeTextureAndIrradianceFromImage(Image&& srcImage, const std::string& srcImageName,
|
||||
bool compress, BackendTarget target, const std::atomic<bool>& abortProcessing) {
|
||||
return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, true, abortProcessing);
|
||||
return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, CUBE_GENERATE_IRRADIANCE, abortProcessing);
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureUsage::createCubeTextureFromImageWithoutIrradiance(Image&& srcImage, const std::string& srcImageName,
|
||||
bool compress, BackendTarget target, const std::atomic<bool>& abortProcessing) {
|
||||
return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, false, abortProcessing);
|
||||
gpu::TexturePointer TextureUsage::createCubeTextureFromImage(Image&& srcImage, const std::string& srcImageName,
|
||||
bool compress, BackendTarget target, const std::atomic<bool>& abortProcessing) {
|
||||
return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, CUBE_DEFAULT, abortProcessing);
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureUsage::createAmbientCubeTextureFromImage(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing) {
|
||||
return processCubeTextureColorFromImage(std::move(image), srcImageName, compress, target, CUBE_GGX_CONVOLVE, abortProcessing);
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureUsage::createAmbientCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing) {
|
||||
return processCubeTextureColorFromImage(std::move(image), srcImageName, compress, target, CUBE_GENERATE_IRRADIANCE | CUBE_GGX_CONVOLVE, abortProcessing);
|
||||
}
|
||||
|
||||
static float denormalize(float value, const float minValue) {
|
||||
|
@ -215,11 +227,17 @@ static uint32 packR11G11B10F(const glm::vec3& color) {
|
|||
return glm::packF2x11_1x10(ucolor);
|
||||
}
|
||||
|
||||
static uint32 packUnorm4x8(const glm::vec3& color) {
|
||||
return glm::packUnorm4x8(glm::vec4(color, 1.0f));
|
||||
}
|
||||
|
||||
static std::function<uint32(const glm::vec3&)> getHDRPackingFunction(const gpu::Element& format) {
|
||||
if (format == gpu::Element::COLOR_RGB9E5) {
|
||||
return glm::packF3x9_E1x5;
|
||||
} else if (format == gpu::Element::COLOR_R11G11B10) {
|
||||
return packR11G11B10F;
|
||||
} else if (format == gpu::Element::COLOR_RGBA_32 || format == gpu::Element::COLOR_SRGBA_32 || format == gpu::Element::COLOR_BGRA_32 || format == gpu::Element::COLOR_SBGRA_32) {
|
||||
return packUnorm4x8;
|
||||
} else {
|
||||
qCWarning(imagelogging) << "Unknown handler format";
|
||||
Q_UNREACHABLE();
|
||||
|
@ -231,18 +249,24 @@ std::function<uint32(const glm::vec3&)> getHDRPackingFunction() {
|
|||
return getHDRPackingFunction(GPU_CUBEMAP_HDR_FORMAT);
|
||||
}
|
||||
|
||||
std::function<glm::vec3(gpu::uint32)> getHDRUnpackingFunction() {
|
||||
if (GPU_CUBEMAP_HDR_FORMAT == gpu::Element::COLOR_RGB9E5) {
|
||||
std::function<glm::vec3(gpu::uint32)> getHDRUnpackingFunction(const gpu::Element& format) {
|
||||
if (format == gpu::Element::COLOR_RGB9E5) {
|
||||
return glm::unpackF3x9_E1x5;
|
||||
} else if (GPU_CUBEMAP_HDR_FORMAT == gpu::Element::COLOR_R11G11B10) {
|
||||
} else if (format == gpu::Element::COLOR_R11G11B10) {
|
||||
return glm::unpackF2x11_1x10;
|
||||
} else if (format == gpu::Element::COLOR_RGBA_32 || format == gpu::Element::COLOR_SRGBA_32 || format == gpu::Element::COLOR_BGRA_32 || format == gpu::Element::COLOR_SBGRA_32) {
|
||||
return glm::unpackUnorm4x8;
|
||||
} else {
|
||||
qCWarning(imagelogging) << "Unknown HDR encoding format in Image";
|
||||
qCWarning(imagelogging) << "Unknown handler format";
|
||||
Q_UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
std::function<glm::vec3(gpu::uint32)> getHDRUnpackingFunction() {
|
||||
return getHDRUnpackingFunction(GPU_CUBEMAP_HDR_FORMAT);
|
||||
}
|
||||
|
||||
Image processRawImageData(QIODevice& content, const std::string& filename) {
|
||||
// Help the Image loader by extracting the image file format from the url filename ext.
|
||||
// Some tga are not created properly without it.
|
||||
|
@ -490,13 +514,15 @@ struct MyErrorHandler : public nvtt::ErrorHandler {
|
|||
}
|
||||
};
|
||||
|
||||
#if defined(NVTT_API)
|
||||
class SequentialTaskDispatcher : public nvtt::TaskDispatcher {
|
||||
public:
|
||||
SequentialTaskDispatcher(const std::atomic<bool>& abortProcessing) : _abortProcessing(abortProcessing) {};
|
||||
SequentialTaskDispatcher(const std::atomic<bool>& abortProcessing = false) : _abortProcessing(abortProcessing) {
|
||||
}
|
||||
|
||||
const std::atomic<bool>& _abortProcessing;
|
||||
|
||||
virtual void dispatch(nvtt::Task* task, void* context, int count) override {
|
||||
void dispatch(nvtt::Task* task, void* context, int count) override {
|
||||
for (int i = 0; i < count; i++) {
|
||||
if (!_abortProcessing.load()) {
|
||||
task(context, i);
|
||||
|
@ -506,108 +532,137 @@ public:
|
|||
}
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
void generateHDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face) {
|
||||
// Take a local copy to force move construction
|
||||
// https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter
|
||||
Image localCopy = std::move(image);
|
||||
void convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat,
|
||||
glm::vec4* output, size_t outputLinePixelStride) {
|
||||
glm::vec4* outputIt;
|
||||
auto unpackFunc = getHDRUnpackingFunction(sourceFormat);
|
||||
|
||||
assert(localCopy.getFormat() == Image::Format_PACKED_FLOAT);
|
||||
|
||||
const int width = localCopy.getWidth(), height = localCopy.getHeight();
|
||||
std::vector<glm::vec4> data;
|
||||
std::vector<glm::vec4>::iterator dataIt;
|
||||
auto mipFormat = texture->getStoredMipFormat();
|
||||
std::function<glm::vec3(uint32)> unpackFunc = getHDRUnpackingFunction();
|
||||
|
||||
nvtt::InputFormat inputFormat = nvtt::InputFormat_RGBA_32F;
|
||||
nvtt::WrapMode wrapMode = nvtt::WrapMode_Mirror;
|
||||
nvtt::AlphaMode alphaMode = nvtt::AlphaMode_None;
|
||||
|
||||
nvtt::CompressionOptions compressionOptions;
|
||||
compressionOptions.setQuality(nvtt::Quality_Production);
|
||||
|
||||
// TODO: gles: generate ETC mips instead?
|
||||
if (mipFormat == gpu::Element::COLOR_COMPRESSED_BCX_HDR_RGB) {
|
||||
compressionOptions.setFormat(nvtt::Format_BC6);
|
||||
} else if (mipFormat == gpu::Element::COLOR_RGB9E5) {
|
||||
compressionOptions.setFormat(nvtt::Format_RGB);
|
||||
compressionOptions.setPixelType(nvtt::PixelType_Float);
|
||||
compressionOptions.setPixelFormat(32, 32, 32, 0);
|
||||
} else if (mipFormat == gpu::Element::COLOR_R11G11B10) {
|
||||
compressionOptions.setFormat(nvtt::Format_RGB);
|
||||
compressionOptions.setPixelType(nvtt::PixelType_Float);
|
||||
compressionOptions.setPixelFormat(32, 32, 32, 0);
|
||||
} else {
|
||||
qCWarning(imagelogging) << "Unknown mip format";
|
||||
Q_UNREACHABLE();
|
||||
return;
|
||||
}
|
||||
|
||||
data.resize(width * height);
|
||||
dataIt = data.begin();
|
||||
outputLinePixelStride -= width;
|
||||
outputIt = output;
|
||||
for (auto lineNb = 0; lineNb < height; lineNb++) {
|
||||
const uint32* srcPixelIt = reinterpret_cast<const uint32*>(localCopy.getScanLine(lineNb));
|
||||
const uint32* srcPixelIt = reinterpret_cast<const uint32*>(source + lineNb * srcLineByteStride);
|
||||
const uint32* srcPixelEnd = srcPixelIt + width;
|
||||
|
||||
while (srcPixelIt < srcPixelEnd) {
|
||||
*dataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f);
|
||||
*outputIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f);
|
||||
++srcPixelIt;
|
||||
++dataIt;
|
||||
++outputIt;
|
||||
}
|
||||
outputIt += outputLinePixelStride;
|
||||
}
|
||||
assert(dataIt == data.end());
|
||||
}
|
||||
|
||||
// We're done with the localCopy, free up the memory to avoid bloating the heap
|
||||
localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one.
|
||||
void convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat,
|
||||
const glm::vec4* source, size_t srcLinePixelStride) {
|
||||
const glm::vec4* sourceIt;
|
||||
auto packFunc = getHDRPackingFunction(outputFormat);
|
||||
|
||||
srcLinePixelStride -= width;
|
||||
sourceIt = source;
|
||||
for (auto lineNb = 0; lineNb < height; lineNb++) {
|
||||
uint32* outPixelIt = reinterpret_cast<uint32*>(output + lineNb * outputLineByteStride);
|
||||
uint32* outPixelEnd = outPixelIt + width;
|
||||
|
||||
while (outPixelIt < outPixelEnd) {
|
||||
*outPixelIt = packFunc(*sourceIt);
|
||||
++outPixelIt;
|
||||
++sourceIt;
|
||||
}
|
||||
sourceIt += srcLinePixelStride;
|
||||
}
|
||||
}
|
||||
|
||||
nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture, int face, nvtt::CompressionOptions& compressionOptions) {
|
||||
auto outputFormat = outputTexture->getStoredMipFormat();
|
||||
bool useNVTT = false;
|
||||
|
||||
compressionOptions.setQuality(nvtt::Quality_Production);
|
||||
|
||||
if (outputFormat == gpu::Element::COLOR_COMPRESSED_BCX_HDR_RGB) {
|
||||
useNVTT = true;
|
||||
compressionOptions.setFormat(nvtt::Format_BC6);
|
||||
} else if (outputFormat == gpu::Element::COLOR_RGB9E5) {
|
||||
compressionOptions.setFormat(nvtt::Format_RGB);
|
||||
compressionOptions.setPixelType(nvtt::PixelType_Float);
|
||||
compressionOptions.setPixelFormat(32, 32, 32, 0);
|
||||
} else if (outputFormat == gpu::Element::COLOR_R11G11B10) {
|
||||
compressionOptions.setFormat(nvtt::Format_RGB);
|
||||
compressionOptions.setPixelType(nvtt::PixelType_Float);
|
||||
compressionOptions.setPixelFormat(32, 32, 32, 0);
|
||||
} else if (outputFormat == gpu::Element::COLOR_SRGBA_32) {
|
||||
useNVTT = true;
|
||||
compressionOptions.setFormat(nvtt::Format_RGB);
|
||||
compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm);
|
||||
compressionOptions.setPixelFormat(8, 8, 8, 0);
|
||||
} else {
|
||||
qCWarning(imagelogging) << "Unknown mip format";
|
||||
Q_UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!useNVTT) {
|
||||
// Don't use NVTT (at least version 2.1) as it outputs wrong RGB9E5 and R11G11B10F values from floats
|
||||
return new PackedFloatOutputHandler(outputTexture, face, outputFormat);
|
||||
} else {
|
||||
return new OutputHandler(outputTexture, face);
|
||||
}
|
||||
}
|
||||
|
||||
void convertImageToHDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic<bool>& abortProcessing, int face) {
|
||||
assert(image.hasFloatFormat());
|
||||
|
||||
Image localCopy = image.getConvertedToFormat(Image::Format_RGBAF);
|
||||
|
||||
const int width = localCopy.getWidth();
|
||||
const int height = localCopy.getHeight();
|
||||
|
||||
nvtt::OutputOptions outputOptions;
|
||||
outputOptions.setOutputHeader(false);
|
||||
std::unique_ptr<nvtt::OutputHandler> outputHandler;
|
||||
|
||||
nvtt::CompressionOptions compressionOptions;
|
||||
std::unique_ptr<nvtt::OutputHandler> outputHandler{ getNVTTCompressionOutputHandler(texture, face, compressionOptions) };
|
||||
|
||||
MyErrorHandler errorHandler;
|
||||
outputOptions.setErrorHandler(&errorHandler);
|
||||
nvtt::Context context;
|
||||
int mipLevel = 0;
|
||||
|
||||
if (mipFormat == gpu::Element::COLOR_RGB9E5 || mipFormat == gpu::Element::COLOR_R11G11B10) {
|
||||
// Don't use NVTT (at least version 2.1) as it outputs wrong RGB9E5 and R11G11B10F values from floats
|
||||
outputHandler.reset(new PackedFloatOutputHandler(texture, face, mipFormat));
|
||||
} else {
|
||||
outputHandler.reset(new OutputHandler(texture, face));
|
||||
}
|
||||
int mipLevel = baseMipLevel;
|
||||
|
||||
outputOptions.setOutputHandler(outputHandler.get());
|
||||
|
||||
nvtt::Surface surface;
|
||||
surface.setImage(inputFormat, width, height, 1, &(*data.begin()));
|
||||
surface.setAlphaMode(alphaMode);
|
||||
surface.setWrapMode(wrapMode);
|
||||
surface.setImage(nvtt::InputFormat_RGBA_32F, width, height, 1, localCopy.getBits());
|
||||
surface.setAlphaMode(nvtt::AlphaMode_None);
|
||||
surface.setWrapMode(nvtt::WrapMode_Mirror);
|
||||
|
||||
SequentialTaskDispatcher dispatcher(abortProcessing);
|
||||
nvtt::Compressor compressor;
|
||||
context.setTaskDispatcher(&dispatcher);
|
||||
|
||||
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
|
||||
while (surface.canMakeNextMipmap() && !abortProcessing.load()) {
|
||||
surface.buildNextMipmap(nvtt::MipmapFilter_Box);
|
||||
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
|
||||
if (buildMips) {
|
||||
while (surface.canMakeNextMipmap() && !abortProcessing.load()) {
|
||||
surface.buildNextMipmap(nvtt::MipmapFilter_Box);
|
||||
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face) {
|
||||
void convertImageToLDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic<bool>& abortProcessing, int face) {
|
||||
// Take a local copy to force move construction
|
||||
// https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter
|
||||
Image localCopy = std::move(image);
|
||||
|
||||
assert(localCopy.getFormat() != Image::Format_PACKED_FLOAT);
|
||||
if (localCopy.getFormat() != Image::Format_ARGB32) {
|
||||
localCopy = localCopy.getConvertedToFormat(Image::Format_ARGB32);
|
||||
}
|
||||
|
||||
const int width = localCopy.getWidth(), height = localCopy.getHeight();
|
||||
auto mipFormat = texture->getStoredMipFormat();
|
||||
int mipLevel = baseMipLevel;
|
||||
|
||||
if (target != BackendTarget::GLES32) {
|
||||
if (localCopy.getFormat() != Image::Format_ARGB32) {
|
||||
localCopy = localCopy.getConvertedToFormat(Image::Format_ARGB32);
|
||||
}
|
||||
|
||||
const void* data = static_cast<const void*>(localCopy.getBits());
|
||||
nvtt::TextureType textureType = nvtt::TextureType_2D;
|
||||
nvtt::InputFormat inputFormat = nvtt::InputFormat_BGRA_8UB;
|
||||
|
@ -618,23 +673,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
|
|||
float inputGamma = 2.2f;
|
||||
float outputGamma = 2.2f;
|
||||
|
||||
nvtt::InputOptions inputOptions;
|
||||
inputOptions.setTextureLayout(textureType, width, height);
|
||||
nvtt::Surface surface;
|
||||
surface.setImage(inputFormat, width, height, 1, data);
|
||||
surface.setAlphaMode(alphaMode);
|
||||
surface.setWrapMode(wrapMode);
|
||||
|
||||
inputOptions.setMipmapData(data, width, height);
|
||||
// setMipmapData copies the memory, so free up the memory afterward to avoid bloating the heap
|
||||
// Surface copies the memory, so free up the memory afterward to avoid bloating the heap
|
||||
data = nullptr;
|
||||
localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one.
|
||||
|
||||
nvtt::InputOptions inputOptions;
|
||||
inputOptions.setTextureLayout(textureType, width, height);
|
||||
|
||||
inputOptions.setFormat(inputFormat);
|
||||
inputOptions.setGamma(inputGamma, outputGamma);
|
||||
inputOptions.setAlphaMode(alphaMode);
|
||||
inputOptions.setWrapMode(wrapMode);
|
||||
inputOptions.setRoundMode(roundMode);
|
||||
|
||||
inputOptions.setMipmapGeneration(true);
|
||||
inputOptions.setMipmapFilter(nvtt::MipmapFilter_Box);
|
||||
|
||||
nvtt::CompressionOptions compressionOptions;
|
||||
compressionOptions.setQuality(nvtt::Quality_Production);
|
||||
|
||||
|
@ -718,11 +772,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
|
|||
outputOptions.setErrorHandler(&errorHandler);
|
||||
|
||||
SequentialTaskDispatcher dispatcher(abortProcessing);
|
||||
nvtt::Compressor compressor;
|
||||
compressor.setTaskDispatcher(&dispatcher);
|
||||
compressor.process(inputOptions, compressionOptions, outputOptions);
|
||||
nvtt::Compressor context;
|
||||
|
||||
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
|
||||
if (buildMips) {
|
||||
while (surface.canMakeNextMipmap() && !abortProcessing.load()) {
|
||||
surface.buildNextMipmap(nvtt::MipmapFilter_Box);
|
||||
context.compress(surface, face, mipLevel++, compressionOptions, outputOptions);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int numMips = 1 + (int)log2(std::max(width, height));
|
||||
int numMips = 1;
|
||||
|
||||
if (buildMips) {
|
||||
numMips += (int)log2(std::max(width, height)) - baseMipLevel;
|
||||
}
|
||||
assert(numMips > 0);
|
||||
Etc::RawImage *mipMaps = new Etc::RawImage[numMips];
|
||||
Etc::Image::Format etcFormat = Etc::Image::Format::DEFAULT;
|
||||
|
||||
|
@ -756,23 +821,13 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
|
|||
const float effort = 1.0f;
|
||||
const int numEncodeThreads = 4;
|
||||
int encodingTime;
|
||||
const float MAX_COLOR = 255.0f;
|
||||
|
||||
std::vector<vec4> floatData;
|
||||
floatData.resize(width * height);
|
||||
for (int y = 0; y < height; y++) {
|
||||
QRgb *line = (QRgb *)localCopy.editScanLine(y);
|
||||
for (int x = 0; x < width; x++) {
|
||||
QRgb &pixel = line[x];
|
||||
floatData[x + y * width] = vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR;
|
||||
}
|
||||
if (localCopy.getFormat() != Image::Format_RGBAF) {
|
||||
localCopy = localCopy.getConvertedToFormat(Image::Format_RGBAF);
|
||||
}
|
||||
|
||||
// free up the memory afterward to avoid bloating the heap
|
||||
localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one.
|
||||
|
||||
Etc::EncodeMipmaps(
|
||||
(float *)floatData.data(), width, height,
|
||||
(float *)localCopy.editBits(), width, height,
|
||||
etcFormat, errorMetric, effort,
|
||||
numEncodeThreads, numEncodeThreads,
|
||||
numMips, Etc::FILTER_WRAP_NONE,
|
||||
|
@ -782,9 +837,9 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
|
|||
for (int i = 0; i < numMips; i++) {
|
||||
if (mipMaps[i].paucEncodingBits.get()) {
|
||||
if (face >= 0) {
|
||||
texture->assignStoredMipFace(i, face, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
|
||||
texture->assignStoredMipFace(i+baseMipLevel, face, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
|
||||
} else {
|
||||
texture->assignStoredMip(i, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
|
||||
texture->assignStoredMip(i + baseMipLevel, mipMaps[i].uiEncodingBitsBytes, static_cast<const gpu::Byte*>(mipMaps[i].paucEncodingBits.get()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -795,22 +850,27 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target,
|
|||
|
||||
#endif
|
||||
|
||||
void generateMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing = false, int face = -1) {
|
||||
#if CPU_MIPMAPS
|
||||
PROFILE_RANGE(resource_parse, "generateMips");
|
||||
void convertImageToTexture(gpu::Texture* texture, Image& image, BackendTarget target, int face, int baseMipLevel, bool buildMips, const std::atomic<bool>& abortProcessing) {
|
||||
PROFILE_RANGE(resource_parse, "convertToTextureWithMips");
|
||||
|
||||
if (target == BackendTarget::GLES32) {
|
||||
generateLDRMips(texture, std::move(image), target, abortProcessing, face);
|
||||
convertImageToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face);
|
||||
} else {
|
||||
if (image.getFormat() == Image::Format_PACKED_FLOAT) {
|
||||
generateHDRMips(texture, std::move(image), target, abortProcessing, face);
|
||||
if (image.hasFloatFormat()) {
|
||||
convertImageToHDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face);
|
||||
} else {
|
||||
generateLDRMips(texture, std::move(image), target, abortProcessing, face);
|
||||
convertImageToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face);
|
||||
}
|
||||
}
|
||||
#else
|
||||
texture->setAutoGenerateMips(true);
|
||||
#endif
|
||||
}
|
||||
|
||||
void convertToTextureWithMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face) {
|
||||
convertImageToTexture(texture, image, target, face, 0, true, abortProcessing);
|
||||
}
|
||||
|
||||
void convertToTexture(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic<bool>& abortProcessing, int face, int mipLevel) {
|
||||
PROFILE_RANGE(resource_parse, "convertToTexture");
|
||||
convertImageToTexture(texture, image, target, face, mipLevel, false, abortProcessing);
|
||||
}
|
||||
|
||||
void processTextureAlpha(const Image& srcImage, bool& validAlpha, bool& alphaAsMask) {
|
||||
|
@ -900,7 +960,7 @@ gpu::TexturePointer TextureUsage::process2DTextureColorFromImage(Image&& srcImag
|
|||
theTexture->setUsage(usage.build());
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.getByteCount(), image.getBits());
|
||||
generateMips(theTexture.get(), std::move(image), target, abortProcessing);
|
||||
convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
|
@ -944,14 +1004,14 @@ Image processBumpMap(Image&& image) {
|
|||
const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1);
|
||||
|
||||
// surrounding pixels
|
||||
const QRgb topLeft = localCopy.getPixel(iPrevClamped, jPrevClamped);
|
||||
const QRgb top = localCopy.getPixel(iPrevClamped, j);
|
||||
const QRgb topRight = localCopy.getPixel(iPrevClamped, jNextClamped);
|
||||
const QRgb right = localCopy.getPixel(i, jNextClamped);
|
||||
const QRgb bottomRight = localCopy.getPixel(iNextClamped, jNextClamped);
|
||||
const QRgb bottom = localCopy.getPixel(iNextClamped, j);
|
||||
const QRgb bottomLeft = localCopy.getPixel(iNextClamped, jPrevClamped);
|
||||
const QRgb left = localCopy.getPixel(i, jPrevClamped);
|
||||
const QRgb topLeft = localCopy.getPackedPixel(iPrevClamped, jPrevClamped);
|
||||
const QRgb top = localCopy.getPackedPixel(iPrevClamped, j);
|
||||
const QRgb topRight = localCopy.getPackedPixel(iPrevClamped, jNextClamped);
|
||||
const QRgb right = localCopy.getPackedPixel(i, jNextClamped);
|
||||
const QRgb bottomRight = localCopy.getPackedPixel(iNextClamped, jNextClamped);
|
||||
const QRgb bottom = localCopy.getPackedPixel(iNextClamped, j);
|
||||
const QRgb bottomLeft = localCopy.getPackedPixel(iNextClamped, jPrevClamped);
|
||||
const QRgb left = localCopy.getPackedPixel(i, jPrevClamped);
|
||||
|
||||
// take their gray intensities
|
||||
// since it's a grayscale image, the value of each component RGB is the same
|
||||
|
@ -974,12 +1034,13 @@ Image processBumpMap(Image&& image) {
|
|||
|
||||
// convert to rgb from the value obtained computing the filter
|
||||
QRgb qRgbValue = qRgba(mapComponent(v.z), mapComponent(v.y), mapComponent(v.x), 1.0);
|
||||
result.setPixel(i, j, qRgbValue);
|
||||
result.setPackedPixel(i, j, qRgbValue);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& srcImage, const std::string& srcImageName,
|
||||
bool compress, BackendTarget target, bool isBumpMap,
|
||||
const std::atomic<bool>& abortProcessing) {
|
||||
|
@ -1014,7 +1075,7 @@ gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& src
|
|||
theTexture->setSource(srcImageName);
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.getByteCount(), image.getBits());
|
||||
generateMips(theTexture.get(), std::move(image), target, abortProcessing);
|
||||
convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
|
@ -1054,7 +1115,7 @@ gpu::TexturePointer TextureUsage::process2DTextureGrayscaleFromImage(Image&& src
|
|||
theTexture->setSource(srcImageName);
|
||||
theTexture->setStoredMipFormat(formatMip);
|
||||
theTexture->assignStoredMip(0, image.getByteCount(), image.getBits());
|
||||
generateMips(theTexture.get(), std::move(image), target, abortProcessing);
|
||||
convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
|
@ -1416,8 +1477,41 @@ Image convertToHDRFormat(Image&& srcImage, gpu::Element format) {
|
|||
return hdrImage;
|
||||
}
|
||||
|
||||
static bool isLinearTextureFormat(gpu::Element format) {
|
||||
return !((format == gpu::Element::COLOR_SRGBA_32)
|
||||
|| (format == gpu::Element::COLOR_SBGRA_32)
|
||||
|| (format == gpu::Element::COLOR_SR_8)
|
||||
|| (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGB)
|
||||
|| (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA_MASK)
|
||||
|| (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA)
|
||||
|| (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA_HIGH)
|
||||
|| (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGB)
|
||||
|| (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGBA)
|
||||
|| (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGB_PUNCHTHROUGH_ALPHA));
|
||||
}
|
||||
|
||||
void convolveForGGX(const std::vector<Image>& faces, gpu::Texture* texture, BackendTarget target, const std::atomic<bool>& abortProcessing = false) {
|
||||
PROFILE_RANGE(resource_parse, "convolveForGGX");
|
||||
CubeMap source(faces, texture->getNumMips(), abortProcessing);
|
||||
CubeMap output(texture->getWidth(), texture->getHeight(), texture->getNumMips());
|
||||
|
||||
if (!faces.front().hasFloatFormat()) {
|
||||
source.applyGamma(2.2f);
|
||||
}
|
||||
source.convolveForGGX(output, abortProcessing);
|
||||
if (!isLinearTextureFormat(texture->getTexelFormat())) {
|
||||
output.applyGamma(1.0f/2.2f);
|
||||
}
|
||||
|
||||
for (int face = 0; face < 6; face++) {
|
||||
for (gpu::uint16 mipLevel = 0; mipLevel < output.getMipCount(); mipLevel++) {
|
||||
convertToTexture(texture, output.getFaceImage(mipLevel, face), target, abortProcessing, face, mipLevel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName,
|
||||
bool compress, BackendTarget target, bool generateIrradiance,
|
||||
bool compress, BackendTarget target, int options,
|
||||
const std::atomic<bool>& abortProcessing) {
|
||||
PROFILE_RANGE(resource_parse, "processCubeTextureColorFromImage");
|
||||
|
||||
|
@ -1491,7 +1585,7 @@ gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcIm
|
|||
theTexture->setStoredMipFormat(formatMip);
|
||||
|
||||
// Generate irradiance while we are at it
|
||||
if (generateIrradiance) {
|
||||
if (options & CUBE_GENERATE_IRRADIANCE) {
|
||||
PROFILE_RANGE(resource_parse, "generateIrradiance");
|
||||
gpu::Element irradianceFormat;
|
||||
// TODO: we could locally compress the irradiance texture on Android, but we don't need to
|
||||
|
@ -1513,9 +1607,16 @@ gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcIm
|
|||
auto irradiance = irradianceTexture->getIrradiance();
|
||||
theTexture->overrideIrradiance(irradiance);
|
||||
}
|
||||
|
||||
for (uint8 face = 0; face < faces.size(); ++face) {
|
||||
generateMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face);
|
||||
|
||||
if (options & CUBE_GGX_CONVOLVE) {
|
||||
// Performs and convolution AND mip map generation
|
||||
convolveForGGX(faces, theTexture.get(), target, abortProcessing);
|
||||
} else {
|
||||
// Create mip maps and compress to final format in one go
|
||||
for (uint8 face = 0; face < faces.size(); ++face) {
|
||||
// Force building the mip maps right now on CPU if we are convolving for GGX later on
|
||||
convertToTextureWithMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,11 +17,16 @@
|
|||
#include <gpu/Texture.h>
|
||||
|
||||
#include "Image.h"
|
||||
#include <nvtt/nvtt.h>
|
||||
|
||||
namespace image {
|
||||
|
||||
std::function<gpu::uint32(const glm::vec3&)> getHDRPackingFunction();
|
||||
std::function<glm::vec3(gpu::uint32)> getHDRUnpackingFunction();
|
||||
void convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat,
|
||||
glm::vec4* output, size_t outputLinePixelStride);
|
||||
void convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat,
|
||||
const glm::vec4* source, size_t srcLinePixelStride);
|
||||
|
||||
namespace TextureUsage {
|
||||
|
||||
|
@ -62,7 +67,8 @@ enum Type {
|
|||
ROUGHNESS_TEXTURE,
|
||||
GLOSS_TEXTURE,
|
||||
EMISSIVE_TEXTURE,
|
||||
CUBE_TEXTURE,
|
||||
SKY_TEXTURE,
|
||||
AMBIENT_TEXTURE,
|
||||
OCCLUSION_TEXTURE,
|
||||
SCATTERING_TEXTURE = OCCLUSION_TEXTURE,
|
||||
LIGHTMAP_TEXTURE,
|
||||
|
@ -92,8 +98,12 @@ gpu::TexturePointer createMetallicTextureFromImage(Image&& image, const std::str
|
|||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createCubeTextureFromImage(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createCubeTextureFromImageWithoutIrradiance(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createAmbientCubeTextureFromImage(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createAmbientCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer createLightmapTextureFromImage(Image&& image, const std::string& srcImageName,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer process2DTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress,
|
||||
|
@ -102,9 +112,14 @@ gpu::TexturePointer process2DTextureNormalMapFromImage(Image&& srcImage, const s
|
|||
gpu::BackendTarget target, bool isBumpMap, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer process2DTextureGrayscaleFromImage(Image&& srcImage, const std::string& srcImageName, bool compress,
|
||||
gpu::BackendTarget target, bool isInvertedPixels, const std::atomic<bool>& abortProcessing);
|
||||
gpu::TexturePointer processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress,
|
||||
gpu::BackendTarget target, bool generateIrradiance, const std::atomic<bool>& abortProcessing);
|
||||
|
||||
enum CubeTextureOptions {
|
||||
CUBE_DEFAULT = 0x0,
|
||||
CUBE_GENERATE_IRRADIANCE = 0x1,
|
||||
CUBE_GGX_CONVOLVE = 0x2
|
||||
};
|
||||
gpu::TexturePointer processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress,
|
||||
gpu::BackendTarget target, int option, const std::atomic<bool>& abortProcessing);
|
||||
} // namespace TextureUsage
|
||||
|
||||
const QStringList getSupportedFormats();
|
||||
|
@ -113,6 +128,9 @@ gpu::TexturePointer processImage(std::shared_ptr<QIODevice> content, const std::
|
|||
int maxNumPixels, TextureUsage::Type textureType,
|
||||
bool compress, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing = false);
|
||||
|
||||
void convertToTextureWithMips(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing = false, int face = -1);
|
||||
void convertToTexture(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic<bool>& abortProcessing = false, int face = -1, int mipLevel = 0);
|
||||
|
||||
} // namespace image
|
||||
|
||||
#endif // hifi_image_TextureProcessing_h
|
||||
|
|
|
@ -224,10 +224,14 @@ NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUs
|
|||
return getResourceTexture(url);
|
||||
}
|
||||
auto modifiedUrl = url;
|
||||
if (type == image::TextureUsage::CUBE_TEXTURE) {
|
||||
if (type == image::TextureUsage::SKY_TEXTURE) {
|
||||
QUrlQuery query { url.query() };
|
||||
query.addQueryItem("skybox", "");
|
||||
modifiedUrl.setQuery(query.toString());
|
||||
} else if (type == image::TextureUsage::AMBIENT_TEXTURE) {
|
||||
QUrlQuery query{ url.query() };
|
||||
query.addQueryItem("ambient", "");
|
||||
modifiedUrl.setQuery(query.toString());
|
||||
}
|
||||
TextureExtra extra = { type, content, maxNumPixels, sourceChannel };
|
||||
return ResourceCache::getResource(modifiedUrl, QUrl(), &extra, std::hash<TextureExtra>()(extra)).staticCast<NetworkTexture>();
|
||||
|
@ -283,7 +287,8 @@ gpu::TexturePointer getFallbackTextureForType(image::TextureUsage::Type type) {
|
|||
case image::TextureUsage::BUMP_TEXTURE:
|
||||
case image::TextureUsage::SPECULAR_TEXTURE:
|
||||
case image::TextureUsage::GLOSS_TEXTURE:
|
||||
case image::TextureUsage::CUBE_TEXTURE:
|
||||
case image::TextureUsage::SKY_TEXTURE:
|
||||
case image::TextureUsage::AMBIENT_TEXTURE:
|
||||
case image::TextureUsage::STRICT_TEXTURE:
|
||||
default:
|
||||
break;
|
||||
|
@ -408,7 +413,7 @@ void NetworkTexture::setExtra(void* extra) {
|
|||
|
||||
_shouldFailOnRedirect = _currentlyLoadingResourceType != ResourceType::KTX;
|
||||
|
||||
if (_type == image::TextureUsage::CUBE_TEXTURE) {
|
||||
if (_type == image::TextureUsage::SKY_TEXTURE) {
|
||||
setLoadPriority(this, SKYBOX_LOAD_PRIORITY);
|
||||
} else if (_currentlyLoadingResourceType == ResourceType::KTX) {
|
||||
setLoadPriority(this, HIGH_MIPS_LOAD_PRIORITY);
|
||||
|
|
|
@ -588,6 +588,8 @@ void LimitedNodeList::eraseAllNodes() {
|
|||
foreach(const SharedNodePointer& killedNode, killedNodes) {
|
||||
handleNodeKill(killedNode);
|
||||
}
|
||||
|
||||
_delayedNodeAdds.clear();
|
||||
}
|
||||
|
||||
void LimitedNodeList::reset() {
|
||||
|
@ -755,7 +757,7 @@ void LimitedNodeList::delayNodeAdd(NewNodeInfo info) {
|
|||
}
|
||||
|
||||
void LimitedNodeList::removeDelayedAdd(QUuid nodeUUID) {
|
||||
auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](auto info) {
|
||||
auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](const auto& info) {
|
||||
return info.uuid == nodeUUID;
|
||||
});
|
||||
if (it != _delayedNodeAdds.end()) {
|
||||
|
@ -764,7 +766,7 @@ void LimitedNodeList::removeDelayedAdd(QUuid nodeUUID) {
|
|||
}
|
||||
|
||||
bool LimitedNodeList::isDelayedNode(QUuid nodeUUID) {
|
||||
auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](auto info) {
|
||||
auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](const auto& info) {
|
||||
return info.uuid == nodeUUID;
|
||||
});
|
||||
return it != _delayedNodeAdds.end();
|
||||
|
|
|
@ -26,7 +26,7 @@ const quint16 ICE_SERVER_DEFAULT_PORT = 7337;
|
|||
const int ICE_HEARBEAT_INTERVAL_MSECS = 2 * 1000;
|
||||
const int MAX_ICE_CONNECTION_ATTEMPTS = 5;
|
||||
|
||||
const int UDP_PUNCH_PING_INTERVAL_MS = 25;
|
||||
const int UDP_PUNCH_PING_INTERVAL_MS = 250;
|
||||
|
||||
class NetworkPeer : public QObject {
|
||||
Q_OBJECT
|
||||
|
|
|
@ -752,11 +752,11 @@ void NodeList::pingPunchForInactiveNode(const SharedNodePointer& node) {
|
|||
flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPing);
|
||||
}
|
||||
|
||||
// every second we're trying to ping this node and we're not getting anywhere - debug that out
|
||||
const int NUM_DEBUG_CONNECTION_ATTEMPTS = 1000 / (UDP_PUNCH_PING_INTERVAL_MS);
|
||||
// every two seconds we're trying to ping this node and we're not getting anywhere - debug that out
|
||||
const int NUM_DEBUG_CONNECTION_ATTEMPTS = 2000 / (UDP_PUNCH_PING_INTERVAL_MS);
|
||||
|
||||
if (node->getConnectionAttempts() > 0 && node->getConnectionAttempts() % NUM_DEBUG_CONNECTION_ATTEMPTS == 0) {
|
||||
qCDebug(networking) << "No response to UDP hole punch pings for node" << node->getUUID() << "in last second.";
|
||||
qCDebug(networking) << "No response to UDP hole punch pings for node" << node->getUUID() << "in last 2 s.";
|
||||
}
|
||||
|
||||
auto nodeID = node->getUUID();
|
||||
|
|
|
@ -41,10 +41,10 @@ public:
|
|||
NodePermissions(const NodePermissionsKey& key) { _id = key.first.toLower(); _rankID = key.second; }
|
||||
NodePermissions(QMap<QString, QVariant> perms);
|
||||
|
||||
const QString& getID() const { return _id; } // a user-name or a group-name, not verified
|
||||
QString getID() const { return _id; } // a user-name or a group-name, not verified
|
||||
void setID(const QString& id) { _id = id; }
|
||||
void setRankID(QUuid& rankID) { _rankID = rankID; }
|
||||
const QUuid& getRankID() const { return _rankID; }
|
||||
QUuid getRankID() const { return _rankID; }
|
||||
NodePermissionsKey getKey() const { return NodePermissionsKey(_id, _rankID); }
|
||||
|
||||
// the _id member isn't authenticated/verified and _username is.
|
||||
|
@ -52,7 +52,7 @@ public:
|
|||
const QString& getVerifiedUserName() const { return _verifiedUserName; }
|
||||
|
||||
void setGroupID(QUuid groupID) { _groupID = groupID; if (!groupID.isNull()) { _groupIDSet = true; }}
|
||||
const QUuid& getGroupID() const { return _groupID; }
|
||||
QUuid getGroupID() const { return _groupID; }
|
||||
bool isGroup() const { return _groupIDSet; }
|
||||
|
||||
bool isAssignment { false };
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "ViewFrustum.h"
|
||||
#include "GeometryCache.h"
|
||||
#include "FramebufferCache.h"
|
||||
|
||||
#include "RandomAndNoise.h"
|
||||
|
||||
namespace ru {
|
||||
using render_utils::slot::texture::Texture;
|
||||
|
@ -359,36 +359,11 @@ int JitterSampleConfig::play() {
|
|||
return _state;
|
||||
}
|
||||
|
||||
template <int B>
|
||||
class Halton {
|
||||
public:
|
||||
|
||||
float eval(int index) const {
|
||||
float f = 1.0f;
|
||||
float r = 0.0f;
|
||||
float invB = 1.0f / (float)B;
|
||||
index++; // Indices start at 1, not 0
|
||||
|
||||
while (index > 0) {
|
||||
f = f * invB;
|
||||
r = r + f * (float)(index % B);
|
||||
index = index / B;
|
||||
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
JitterSample::SampleSequence::SampleSequence(){
|
||||
// Halton sequence (2,3)
|
||||
Halton<2> genX;
|
||||
Halton<3> genY;
|
||||
|
||||
for (int i = 0; i < SEQUENCE_LENGTH; i++) {
|
||||
offsets[i] = glm::vec2(genX.eval(i), genY.eval(i));
|
||||
offsets[i] = glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i));
|
||||
offsets[i] -= vec2(0.5f);
|
||||
}
|
||||
offsets[SEQUENCE_LENGTH] = glm::vec2(0.0f);
|
||||
|
|
|
@ -365,6 +365,7 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input
|
|||
|
||||
// For the rest of the rendering, bind the lighting model
|
||||
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
|
||||
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -416,6 +417,7 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext,
|
|||
|
||||
// THe lighting model
|
||||
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
|
||||
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
|
||||
|
||||
// Subsurface scattering specific
|
||||
if (surfaceGeometryFramebuffer) {
|
||||
|
@ -642,25 +644,37 @@ void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs
|
|||
config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage());
|
||||
}
|
||||
|
||||
|
||||
|
||||
void DefaultLightingSetup::run(const RenderContextPointer& renderContext) {
|
||||
|
||||
if (!_defaultLight || !_defaultBackground) {
|
||||
auto defaultSkyboxURL = PathUtils::resourcesUrl() + "images/Default-Sky-9-cubemap/Default-Sky-9-cubemap.texmeta.json";
|
||||
|
||||
if (!_defaultSkyboxNetworkTexture) {
|
||||
PROFILE_RANGE(render, "Process Default Skybox");
|
||||
_defaultSkyboxNetworkTexture = DependencyManager::get<TextureCache>()->getTexture(
|
||||
PathUtils::resourcesUrl() + "images/Default-Sky-9-cubemap/Default-Sky-9-cubemap.texmeta.json", image::TextureUsage::CUBE_TEXTURE);
|
||||
defaultSkyboxURL, image::TextureUsage::SKY_TEXTURE);
|
||||
}
|
||||
|
||||
if (!_defaultAmbientNetworkTexture) {
|
||||
PROFILE_RANGE(render, "Process Default Ambient map");
|
||||
_defaultAmbientNetworkTexture = DependencyManager::get<TextureCache>()->getTexture(
|
||||
defaultSkyboxURL, image::TextureUsage::AMBIENT_TEXTURE);
|
||||
}
|
||||
|
||||
if (_defaultSkyboxNetworkTexture && _defaultSkyboxNetworkTexture->isLoaded() && _defaultSkyboxNetworkTexture->getGPUTexture()) {
|
||||
_defaultSkyboxAmbientTexture = _defaultSkyboxNetworkTexture->getGPUTexture();
|
||||
_defaultSkybox->setCubemap(_defaultSkyboxAmbientTexture);
|
||||
_defaultSkybox->setCubemap(_defaultSkyboxNetworkTexture->getGPUTexture());
|
||||
} else {
|
||||
// Don't do anything until the skybox has loaded
|
||||
return;
|
||||
}
|
||||
|
||||
if (_defaultAmbientNetworkTexture && _defaultAmbientNetworkTexture->isLoaded() && _defaultAmbientNetworkTexture->getGPUTexture()) {
|
||||
_defaultAmbientTexture = _defaultAmbientNetworkTexture->getGPUTexture();
|
||||
} else {
|
||||
// Don't do anything until the ambient box has been loaded
|
||||
return;
|
||||
}
|
||||
|
||||
auto lightStage = renderContext->_scene->getStage<LightStage>();
|
||||
if (lightStage) {
|
||||
|
||||
|
@ -674,8 +688,8 @@ void DefaultLightingSetup::run(const RenderContextPointer& renderContext) {
|
|||
lp->setAmbientSpherePreset(gpu::SphericalHarmonics::Preset::OLD_TOWN_SQUARE);
|
||||
|
||||
lp->setAmbientIntensity(0.5f);
|
||||
lp->setAmbientMap(_defaultSkyboxAmbientTexture);
|
||||
auto irradianceSH = _defaultSkyboxAmbientTexture->getIrradiance();
|
||||
lp->setAmbientMap(_defaultAmbientTexture);
|
||||
auto irradianceSH = _defaultAmbientTexture->getIrradiance();
|
||||
if (irradianceSH) {
|
||||
lp->setAmbientSphere((*irradianceSH));
|
||||
}
|
||||
|
|
|
@ -212,7 +212,8 @@ protected:
|
|||
HazeStage::Index _defaultHazeID{ HazeStage::INVALID_INDEX };
|
||||
graphics::SkyboxPointer _defaultSkybox { new ProceduralSkybox() };
|
||||
NetworkTexturePointer _defaultSkyboxNetworkTexture;
|
||||
gpu::TexturePointer _defaultSkyboxAmbientTexture;
|
||||
NetworkTexturePointer _defaultAmbientNetworkTexture;
|
||||
gpu::TexturePointer _defaultAmbientTexture;
|
||||
};
|
||||
|
||||
#endif // hifi_DeferredLightingEffect_h
|
||||
|
|
|
@ -17,8 +17,9 @@ vec4 evalSkyboxLight(vec3 direction, float lod) {
|
|||
|
||||
#if !defined(GL_ES)
|
||||
float filterLod = textureQueryLod(skyboxMap, direction).x;
|
||||
// Keep texture filtering LOD as limit to prevent aliasing on specular reflection
|
||||
lod = max(lod, filterLod);
|
||||
// Keep texture filtering LOD as limit to prevent aliasing on specular reflection, but add
|
||||
// a bias to limit overblurring with convolved maps
|
||||
lod = max(lod, filterLod-2);
|
||||
#endif
|
||||
|
||||
return textureLod(skyboxMap, direction, lod);
|
||||
|
@ -26,16 +27,30 @@ vec4 evalSkyboxLight(vec3 direction, float lod) {
|
|||
<@endfunc@>
|
||||
|
||||
<@func declareEvalAmbientSpecularIrradiance(supportAmbientSphere, supportAmbientMap, supportIfAmbientMapElseAmbientSphere)@>
|
||||
LAYOUT(binding=RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL) uniform sampler2D ambientFresnelLUT;
|
||||
|
||||
vec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) {
|
||||
vec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float roughness) {
|
||||
#if RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT
|
||||
vec2 ambientFresnel = texture(ambientFresnelLUT, vec2(roughness, ndotd)).xy;
|
||||
return fresnelColor * ambientFresnel.x + vec3(ambientFresnel.y);
|
||||
#else
|
||||
float gloss = 1.0-roughness;
|
||||
float f = pow(1.0 - ndotd, 5.0);
|
||||
return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f;
|
||||
#endif
|
||||
}
|
||||
|
||||
<@if supportAmbientMap@>
|
||||
<$declareSkyboxMap()$>
|
||||
<@endif@>
|
||||
|
||||
float getMipLevelFromRoughness(float roughness, float lodCount) {
|
||||
// This should match the value in the CubeMap::convolveForGGX method (CubeMap.cpp)
|
||||
float ROUGHNESS_1_MIP_RESOLUTION = 1.5;
|
||||
float deltaLod = lodCount - ROUGHNESS_1_MIP_RESOLUTION;
|
||||
return deltaLod * (sqrt(1.0+24.0*roughness)-1.0) / 4.0;
|
||||
}
|
||||
|
||||
vec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) {
|
||||
vec3 specularLight;
|
||||
<@if supportIfAmbientMapElseAmbientSphere@>
|
||||
|
@ -43,10 +58,10 @@ vec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, ve
|
|||
<@endif@>
|
||||
<@if supportAmbientMap@>
|
||||
{
|
||||
float levels = getLightAmbientMapNumMips(ambient);
|
||||
float m = 12.0 / (1.0+11.0*surface.roughness);
|
||||
float lod = levels - m;
|
||||
float levelCount = getLightAmbientMapNumMips(ambient);
|
||||
float lod = getMipLevelFromRoughness(surface.roughness, levelCount);
|
||||
lod = max(lod, 0.0);
|
||||
|
||||
specularLight = evalSkyboxLight(lightDir, lod).xyz;
|
||||
}
|
||||
<@endif@>
|
||||
|
@ -87,7 +102,7 @@ void evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambie
|
|||
vec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz;
|
||||
<@endif@>
|
||||
|
||||
vec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness);
|
||||
vec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, surface.roughness);
|
||||
|
||||
diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) *
|
||||
sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz;
|
||||
|
|
|
@ -9,10 +9,88 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "LightingModel.h"
|
||||
#include "RandomAndNoise.h"
|
||||
#include "BRDF.h"
|
||||
|
||||
#include "render-utils/ShaderConstants.h"
|
||||
|
||||
#include <TBBHelpers.h>
|
||||
|
||||
gpu::TexturePointer LightingModel::_ambientFresnelLUT;
|
||||
|
||||
LightingModel::LightingModel() {
|
||||
Parameters parameters;
|
||||
_parametersBuffer = gpu::BufferView(std::make_shared<gpu::Buffer>(sizeof(Parameters), (const gpu::Byte*) ¶meters, sizeof(Parameters)));
|
||||
|
||||
#if RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT
|
||||
if (!_ambientFresnelLUT) {
|
||||
// Code taken from the IntegrateBRDF method as described in this talk :
|
||||
// https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf
|
||||
const auto N_roughness = 32;
|
||||
const auto N_NdotV = 256;
|
||||
|
||||
using LUTVector = std::vector<glm::u16vec2>;
|
||||
using LUTValueType = LUTVector::value_type::value_type;
|
||||
|
||||
LUTVector lut(N_roughness * N_NdotV);
|
||||
|
||||
_ambientFresnelLUT = gpu::Texture::create2D(gpu::Element{ gpu::VEC2, gpu::NUINT16, gpu::XY }, N_roughness, N_NdotV, 1U,
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||
|
||||
tbb::parallel_for(tbb::blocked_range2d<int, int>(0, N_NdotV, 8, 0, N_roughness, 8), [&](const tbb::blocked_range2d<int, int>& range) {
|
||||
auto roughnessRange = range.cols();
|
||||
auto ndotvRange = range.rows();
|
||||
|
||||
for (auto j = ndotvRange.begin(); j < ndotvRange.end(); j++) {
|
||||
const float NdotV = j / float(N_NdotV - 1);
|
||||
|
||||
glm::vec3 V;
|
||||
V.x = std::sqrt(1.0f - NdotV * NdotV); // sin
|
||||
V.y = 0;
|
||||
V.z = NdotV; // cos
|
||||
|
||||
for (auto k = roughnessRange.begin(); k < roughnessRange.end(); k++) {
|
||||
const float roughness = k / float(N_roughness - 1);
|
||||
const float alpha = roughness * roughness;
|
||||
const float alphaSquared = alpha * alpha;
|
||||
|
||||
float A = 0.0f;
|
||||
float B = 0.0f;
|
||||
|
||||
const uint NumSamples = 1024;
|
||||
for (uint i = 0; i < NumSamples; i++) {
|
||||
glm::vec2 Xi = hammersley::evaluate(i, NumSamples);
|
||||
glm::vec3 H = ggx::sample(Xi, roughness);
|
||||
float VdotH = glm::dot(V, H);
|
||||
glm::vec3 L = 2.0f * VdotH * H - V;
|
||||
float NdotL = L.z;
|
||||
|
||||
if (NdotL > 0.0f) {
|
||||
VdotH = glm::clamp(VdotH, 0.0f, 1.0f);
|
||||
|
||||
float NdotH = glm::clamp(H.z, 0.0f, 1.0f);
|
||||
float G = smith::evaluateFastWithoutNdotV(alphaSquared, NdotV, NdotL);
|
||||
float G_Vis = (G * VdotH) / NdotH;
|
||||
float Fc = std::pow(1.0f - VdotH, 5.0f);
|
||||
|
||||
A += (1.0f - Fc) * G_Vis;
|
||||
B += Fc * G_Vis;
|
||||
}
|
||||
}
|
||||
|
||||
A /= NumSamples;
|
||||
B /= NumSamples;
|
||||
|
||||
auto& lutValue = lut[k + j * N_roughness];
|
||||
lutValue.x = (LUTValueType)(glm::min(1.0f, A) * std::numeric_limits<LUTValueType>::max());
|
||||
lutValue.y = (LUTValueType)(glm::min(1.0f, B) * std::numeric_limits<LUTValueType>::max());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
_ambientFresnelLUT->assignStoredMip(0, N_roughness * N_NdotV * sizeof(LUTVector::value_type), (const gpu::Byte*)lut.data());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void LightingModel::setUnlit(bool enable) {
|
||||
|
|
|
@ -83,6 +83,7 @@ public:
|
|||
bool isShadowEnabled() const;
|
||||
|
||||
UniformBufferView getParametersBuffer() const { return _parametersBuffer; }
|
||||
gpu::TexturePointer getAmbientFresnelLUT() const { return _ambientFresnelLUT; }
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -126,6 +127,7 @@ protected:
|
|||
Parameters() {}
|
||||
};
|
||||
UniformBufferView _parametersBuffer;
|
||||
static gpu::TexturePointer _ambientFresnelLUT;
|
||||
};
|
||||
|
||||
using LightingModelPointer = std::shared_ptr<LightingModel>;
|
||||
|
|
|
@ -444,6 +444,19 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g
|
|||
}
|
||||
}
|
||||
|
||||
/**jsdoc
|
||||
* Information about a submesh intersection point.
|
||||
* @typedef {object} SubmeshIntersection
|
||||
* @property {Vec3} worldIntersectionPoint - The intersection point in world coordinates.
|
||||
* @property {Vec3} meshIntersectionPoint - The intersection point in model coordinates.
|
||||
* @property {number} partIndex - The index of the intersected mesh part within the submesh.
|
||||
* @property {number} shapeID - The index of the mesh part within the model.
|
||||
* @property {number} subMeshIndex - The index of the intersected submesh within the model.
|
||||
* @property {string} subMeshName - The name of the intersected submesh.
|
||||
* @property {Triangle} subMeshTriangleWorld - The vertices of the intersected mesh part triangle in world coordinates.
|
||||
* @property {Vec3} subMeshNormal - The normal of the intersected mesh part triangle in model coordinates.
|
||||
* @property {Triangle} subMeshTriangle - The vertices of the intersected mesh part triangle in model coordinates.
|
||||
*/
|
||||
if (intersectedSomething) {
|
||||
distance = bestDistance;
|
||||
face = bestFace;
|
||||
|
|
|
@ -94,6 +94,7 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs&
|
|||
|
||||
// Setup lighting model for all items;
|
||||
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
|
||||
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
|
||||
|
||||
if (_opaquePass) {
|
||||
renderStateSortShapes(renderContext, _shapePlumber, inItems, _maxDrawn);
|
||||
|
|
|
@ -471,6 +471,7 @@ void RenderTransparentDeferred::run(const RenderContextPointer& renderContext, c
|
|||
|
||||
// Setup lighting model for all items;
|
||||
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
|
||||
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
|
||||
|
||||
// Set the light
|
||||
deferredLightingEffect->setupKeyLightBatch(args, batch, *lightFrame);
|
||||
|
@ -536,6 +537,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const
|
|||
|
||||
// Setup lighting model for all items;
|
||||
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
|
||||
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
|
||||
|
||||
// From the lighting model define a global shapeKey ORED with individiual keys
|
||||
ShapeKey::Builder keyBuilder;
|
||||
|
|
|
@ -251,6 +251,7 @@ void DrawForward::run(const RenderContextPointer& renderContext, const Inputs& i
|
|||
|
||||
// Setup lighting model for all items;
|
||||
batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer());
|
||||
batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT());
|
||||
|
||||
// From the lighting model define a global shapeKey ORED with individiual keys
|
||||
ShapeKey::Builder keyBuilder;
|
||||
|
|
|
@ -14,6 +14,10 @@
|
|||
#ifndef RENDER_UTILS_SHADER_CONSTANTS_H
|
||||
#define RENDER_UTILS_SHADER_CONSTANTS_H
|
||||
|
||||
// Feature enabling flags (possibly need to rebuild shaders if this changes)
|
||||
#define RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT 1
|
||||
|
||||
// Binding slots
|
||||
#define RENDER_UTILS_ATTR_TEXCOORD01 0
|
||||
#define RENDER_UTILS_ATTR_COLOR 1
|
||||
|
||||
|
@ -54,6 +58,7 @@
|
|||
#define RENDER_UTILS_TEXTURE_DEFERRED_DIFFUSED_CURVATURE 7
|
||||
#define RENDER_UTILS_TEXTURE_DEFERRED_LIGHTING 10
|
||||
#define RENDER_UTILS_TEXTURE_SKYBOX 11
|
||||
#define RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL 14
|
||||
|
||||
#define RENDER_UTILS_BUFFER_SHADOW_PARAMS 2
|
||||
#define RENDER_UTILS_TEXTURE_SHADOW 12
|
||||
|
@ -198,6 +203,7 @@ enum Texture {
|
|||
BloomColor = RENDER_UTILS_TEXTURE_BLOOM_COLOR,
|
||||
ToneMappingColor = RENDER_UTILS_TEXTURE_TM_COLOR,
|
||||
TextFont = RENDER_UTILS_TEXTURE_TEXT_FONT,
|
||||
AmbientFresnel = RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL,
|
||||
DebugTexture0 = RENDER_UTILS_DEBUG_TEXTURE0,
|
||||
};
|
||||
} // namespace texture
|
||||
|
|
|
@ -42,6 +42,7 @@ const float DEFAULT_AVATAR_HIPS_MASS = 40.0f;
|
|||
const float DEFAULT_AVATAR_HEAD_MASS = 20.0f;
|
||||
const float DEFAULT_AVATAR_LEFTHAND_MASS = 2.0f;
|
||||
const float DEFAULT_AVATAR_RIGHTHAND_MASS = 2.0f;
|
||||
const float DEFAULT_AVATAR_IPD = 0.064f;
|
||||
|
||||
// Used when avatar is missing joints... (avatar space)
|
||||
const glm::quat DEFAULT_AVATAR_MIDDLE_EYE_ROT { Quaternions::Y_180 };
|
||||
|
@ -102,6 +103,7 @@ static const float MAX_AVATAR_HEIGHT = 1000.0f * DEFAULT_AVATAR_HEIGHT; // meter
|
|||
static const float MIN_AVATAR_HEIGHT = 0.005f * DEFAULT_AVATAR_HEIGHT; // meters
|
||||
static const float MIN_AVATAR_RADIUS = 0.5f * MIN_AVATAR_HEIGHT;
|
||||
static const float AVATAR_WALK_SPEED_SCALAR = 1.0f;
|
||||
static const float AVATAR_SPRINT_SPEED_SCALAR = 2.0f;
|
||||
static const float AVATAR_DESKTOP_SPRINT_SPEED_SCALAR = 3.0f;
|
||||
static const float AVATAR_HMD_SPRINT_SPEED_SCALAR = 2.0f;
|
||||
|
||||
#endif // hifi_AvatarConstants_h
|
||||
|
|
45
libraries/shared/src/BRDF.cpp
Normal file
45
libraries/shared/src/BRDF.cpp
Normal file
|
@ -0,0 +1,45 @@
|
|||
#include "BRDF.h"
|
||||
|
||||
#include <cmath>
|
||||
#ifndef M_PI
|
||||
#define M_PI 3.14159265359
|
||||
#endif
|
||||
|
||||
namespace ggx {
|
||||
|
||||
float evaluate(float NdotH, float roughness) {
|
||||
float alpha = roughness * roughness;
|
||||
float alphaSquared = alpha * alpha;
|
||||
float denom = (float)(NdotH * NdotH * (alphaSquared - 1.0f) + 1.0f);
|
||||
return alphaSquared / (denom * denom);
|
||||
}
|
||||
|
||||
glm::vec3 sample(const glm::vec2& Xi, const float roughness) {
|
||||
const float a = roughness * roughness;
|
||||
|
||||
float phi = 2.0f * (float) M_PI * Xi.x;
|
||||
float cosTheta = std::sqrt((1.0f - Xi.y) / (1.0f + (a*a - 1.0f) * Xi.y));
|
||||
float sinTheta = std::sqrt(1.0f - cosTheta * cosTheta);
|
||||
|
||||
// from spherical coordinates to cartesian coordinates
|
||||
glm::vec3 H;
|
||||
H.x = std::cos(phi) * sinTheta;
|
||||
H.y = std::sin(phi) * sinTheta;
|
||||
H.z = cosTheta;
|
||||
|
||||
return H;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace smith {
|
||||
|
||||
float evaluateFastWithoutNdotV(float alphaSquared, float NdotV, float NdotL) {
|
||||
float oneMinusAlphaSquared = 1.0f - alphaSquared;
|
||||
float G = NdotL * std::sqrt(alphaSquared + NdotV * NdotV * oneMinusAlphaSquared);
|
||||
G = G + NdotV * std::sqrt(alphaSquared + NdotL * NdotL * oneMinusAlphaSquared);
|
||||
return 2.0f * NdotL / G;
|
||||
}
|
||||
|
||||
}
|
36
libraries/shared/src/BRDF.h
Normal file
36
libraries/shared/src/BRDF.h
Normal file
|
@ -0,0 +1,36 @@
|
|||
#pragma once
|
||||
//
|
||||
// BRDF.h
|
||||
//
|
||||
// Created by Olivier Prat on 04/04/19.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef SHARED_BRDF_H
|
||||
#define SHARED_BRDF_H
|
||||
|
||||
#include <glm/vec2.hpp>
|
||||
#include <glm/vec3.hpp>
|
||||
|
||||
// GGX micro-facet model
|
||||
namespace ggx {
|
||||
float evaluate(float NdotH, float roughness);
|
||||
glm::vec3 sample(const glm::vec2& Xi, const float roughness);
|
||||
}
|
||||
|
||||
// Smith visibility function
|
||||
namespace smith {
|
||||
float evaluateFastWithoutNdotV(float alphaSquared, float NdotV, float NdotL);
|
||||
|
||||
inline float evaluateFast(float alphaSquared, float NdotV, float NdotL) {
|
||||
return evaluateFastWithoutNdotV(alphaSquared, NdotV, NdotL) * NdotV;
|
||||
}
|
||||
|
||||
inline float evaluate(float roughness, float NdotV, float NdotL) {
|
||||
return evaluateFast(roughness*roughness*roughness*roughness, NdotV, NdotL);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARED_BRDF_H
|
|
@ -119,6 +119,13 @@ void swingTwistDecomposition(const glm::quat& rotation,
|
|||
glm::quat& swing,
|
||||
glm::quat& twist);
|
||||
|
||||
/**jsdoc
|
||||
* A triangle in a mesh.
|
||||
* @typedef {object} Triangle
|
||||
* @property {Vec3} v0 - The position of vertex 0 in the triangle.
|
||||
* @property {Vec3} v1 - The position of vertex 1 in the triangle.
|
||||
* @property {Vec3} v2 - The position of vertex 2 in the triangle.
|
||||
*/
|
||||
class Triangle {
|
||||
public:
|
||||
glm::vec3 v0;
|
||||
|
|
52
libraries/shared/src/RandomAndNoise.h
Normal file
52
libraries/shared/src/RandomAndNoise.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
//
|
||||
// RandomAndNoise.h
|
||||
//
|
||||
// Created by Olivier Prat on 05/16/18.
|
||||
// Copyright 2018 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef RANDOM_AND_NOISE_H
|
||||
#define RANDOM_AND_NOISE_H
|
||||
|
||||
#include <glm/vec2.hpp>
|
||||
|
||||
namespace halton {
|
||||
// Low discrepancy Halton sequence generator
|
||||
template <int B>
|
||||
float evaluate(int index) {
|
||||
float f = 1.0f;
|
||||
float r = 0.0f;
|
||||
float invB = 1.0f / (float)B;
|
||||
index++; // Indices start at 1, not 0
|
||||
|
||||
while (index > 0) {
|
||||
f = f * invB;
|
||||
r = r + f * (float)(index % B);
|
||||
index = index / B;
|
||||
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
inline float getRadicalInverseVdC(uint32_t bits) {
|
||||
bits = (bits << 16u) | (bits >> 16u);
|
||||
bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
|
||||
bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
|
||||
bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
|
||||
bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
|
||||
return float(bits) * 2.3283064365386963e-10f; // / 0x100000000\n"
|
||||
}
|
||||
|
||||
namespace hammersley {
|
||||
// Low discrepancy Hammersley 2D sequence generator
|
||||
inline glm::vec2 evaluate(int k, const int sequenceLength) {
|
||||
return glm::vec2(float(k) / float(sequenceLength), getRadicalInverseVdC(k));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
|
@ -20,6 +20,7 @@
|
|||
#include <tbb/concurrent_unordered_set.h>
|
||||
#include <tbb/concurrent_vector.h>
|
||||
#include <tbb/parallel_for.h>
|
||||
#include <tbb/blocked_range2d.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#pragma warning( pop )
|
||||
|
|
|
@ -36,25 +36,6 @@ static int cameraModeId = qRegisterMetaType<CameraMode>();
|
|||
class Camera : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
/**jsdoc
|
||||
* The Camera API provides access to the "camera" that defines your view in desktop and HMD display modes.
|
||||
*
|
||||
* @namespace Camera
|
||||
*
|
||||
* @hifi-interface
|
||||
* @hifi-client-entity
|
||||
* @hifi-avatar
|
||||
*
|
||||
* @property {Vec3} position - The position of the camera. You can set this value only when the camera is in independent
|
||||
* mode.
|
||||
* @property {Quat} orientation - The orientation of the camera. You can set this value only when the camera is in
|
||||
* independent mode.
|
||||
* @property {Camera.Mode} mode - The camera mode.
|
||||
* @property {ViewFrustum} frustum - The camera frustum.
|
||||
* @property {Uuid} cameraEntity - The ID of the entity that is used for the camera position and orientation when the
|
||||
* camera is in entity mode.
|
||||
*/
|
||||
// FIXME: The cameraEntity property definition is copied from FancyCamera.h.
|
||||
Q_PROPERTY(glm::vec3 position READ getPosition WRITE setPosition)
|
||||
Q_PROPERTY(glm::quat orientation READ getOrientation WRITE setOrientation)
|
||||
Q_PROPERTY(QString mode READ getModeString WRITE setModeString)
|
||||
|
@ -82,53 +63,54 @@ public:
|
|||
|
||||
public slots:
|
||||
/**jsdoc
|
||||
* Get the current camera mode. You can also get the mode using the <code>Camera.mode</code> property.
|
||||
* Gets the current camera mode. You can also get the mode using the {@link Camera|Camera.mode} property.
|
||||
* @function Camera.getModeString
|
||||
* @returns {Camera.Mode} The current camera mode.
|
||||
*/
|
||||
QString getModeString() const;
|
||||
|
||||
/**jsdoc
|
||||
* Set the camera mode. You can also set the mode using the <code>Camera.mode</code> property.
|
||||
* @function Camera.setModeString
|
||||
* @param {Camera.Mode} mode - The mode to set the camera to.
|
||||
*/
|
||||
* Sets the camera mode. You can also set the mode using the {@link Camera|Camera.mode} property.
|
||||
* @function Camera.setModeString
|
||||
* @param {Camera.Mode} mode - The mode to set the camera to.
|
||||
*/
|
||||
void setModeString(const QString& mode);
|
||||
|
||||
/**jsdoc
|
||||
* Get the current camera position. You can also get the position using the <code>Camera.position</code> property.
|
||||
* @function Camera.getPosition
|
||||
* @returns {Vec3} The current camera position.
|
||||
*/
|
||||
* Gets the current camera position. You can also get the position using the {@link Camera|Camera.position} property.
|
||||
* @function Camera.getPosition
|
||||
* @returns {Vec3} The current camera position.
|
||||
*/
|
||||
glm::vec3 getPosition() const { return _position; }
|
||||
|
||||
/**jsdoc
|
||||
* Set the camera position. You can also set the position using the <code>Camera.position</code> property. Only works if the
|
||||
* camera is in independent mode.
|
||||
* @function Camera.setPosition
|
||||
* @param {Vec3} position - The position to set the camera at.
|
||||
*/
|
||||
* Sets the camera position. You can also set the position using the {@link Camera|Camera.position} property. Only works if
|
||||
* the camera is in independent mode.
|
||||
* @function Camera.setPosition
|
||||
* @param {Vec3} position - The position to set the camera at.
|
||||
*/
|
||||
void setPosition(const glm::vec3& position);
|
||||
|
||||
/**jsdoc
|
||||
* Get the current camera orientation. You can also get the orientation using the <code>Camera.orientation</code> property.
|
||||
* @function Camera.getOrientation
|
||||
* @returns {Quat} The current camera orientation.
|
||||
*/
|
||||
* Gets the current camera orientation. You can also get the orientation using the {@link Camera|Camera.orientation}
|
||||
* property.
|
||||
* @function Camera.getOrientation
|
||||
* @returns {Quat} The current camera orientation.
|
||||
*/
|
||||
glm::quat getOrientation() const { return _orientation; }
|
||||
|
||||
/**jsdoc
|
||||
* Set the camera orientation. You can also set the orientation using the <code>Camera.orientation</code> property. Only
|
||||
* works if the camera is in independent mode.
|
||||
* @function Camera.setOrientation
|
||||
* @param {Quat} orientation - The orientation to set the camera to.
|
||||
*/
|
||||
* Sets the camera orientation. You can also set the orientation using the {@link Camera|Camera.orientation} property. Only
|
||||
* works if the camera is in independent mode.
|
||||
* @function Camera.setOrientation
|
||||
* @param {Quat} orientation - The orientation to set the camera to.
|
||||
*/
|
||||
void setOrientation(const glm::quat& orientation);
|
||||
|
||||
/**jsdoc
|
||||
* Compute a {@link PickRay} based on the current camera configuration and the specified <code>x, y</code> position on the
|
||||
* screen. The {@link PickRay} can be used in functions such as {@link Entities.findRayIntersection} and
|
||||
* {@link Overlays.findRayIntersection}.
|
||||
* Computes a {@link PickRay} based on the current camera configuration and the specified <code>x, y</code> position on the
|
||||
* screen. The {@link PickRay} can be used in functions such as {@link Entities.findRayIntersection} and
|
||||
* {@link Overlays.findRayIntersection}.
|
||||
* @function Camera.computePickRay
|
||||
* @param {number} x - X-coordinate on screen.
|
||||
* @param {number} y - Y-coordinate on screen.
|
||||
|
@ -147,9 +129,9 @@ public slots:
|
|||
virtual PickRay computePickRay(float x, float y) const = 0;
|
||||
|
||||
/**jsdoc
|
||||
* Rotate the camera to look at the specified <code>position</code>. Only works if the camera is in independent mode.
|
||||
* Rotates the camera to look at the specified <code>position</code>. Only works if the camera is in independent mode.
|
||||
* @function Camera.lookAt
|
||||
* @param {Vec3} position - Position to look at.
|
||||
* @param {Vec3} position - The position to look at.
|
||||
* @example <caption>Rotate your camera to look at entities as you click on them with your mouse.</caption>
|
||||
* function onMousePressEvent(event) {
|
||||
* var pickRay = Camera.computePickRay(event.x, event.y);
|
||||
|
@ -168,15 +150,15 @@ public slots:
|
|||
void lookAt(const glm::vec3& position);
|
||||
|
||||
/**jsdoc
|
||||
* Set the camera to continue looking at the specified <code>position</code> even while the camera moves. Only works if the
|
||||
* camera is in independent mode.
|
||||
* Sets the camera to continue looking at the specified <code>position</code> even while the camera moves. Only works if
|
||||
* the camera is in independent mode.
|
||||
* @function Camera.keepLookingAt
|
||||
* @param {Vec3} position - Position to keep looking at.
|
||||
* @param {Vec3} position - The position to keep looking at.
|
||||
*/
|
||||
void keepLookingAt(const glm::vec3& position);
|
||||
|
||||
/**jsdoc
|
||||
* Stops the camera from continually looking at the position that was set with <code>Camera.keepLookingAt</code>.
|
||||
* Stops the camera from continually looking at the position that was set with {@link Camera.keepLookingAt}.
|
||||
* @function Camera.stopLookingAt
|
||||
*/
|
||||
void stopLooking() { _isKeepLookingAt = false; }
|
||||
|
|
|
@ -2129,9 +2129,32 @@ var DELETED_ENTITY_MAP = {};
|
|||
|
||||
function applyEntityProperties(data) {
|
||||
var editEntities = data.editEntities;
|
||||
var createEntities = data.createEntities;
|
||||
var deleteEntities = data.deleteEntities;
|
||||
var selectedEntityIDs = [];
|
||||
var selectEdits = data.createEntities.length === 0 || !data.selectCreated;
|
||||
var selectEdits = createEntities.length === 0 || !data.selectCreated;
|
||||
var i, entityID, entityProperties;
|
||||
for (i = 0; i < createEntities.length; i++) {
|
||||
entityID = createEntities[i].entityID;
|
||||
entityProperties = createEntities[i].properties;
|
||||
var newEntityID = Entities.addEntity(entityProperties);
|
||||
recursiveAdd(newEntityID, createEntities[i]);
|
||||
DELETED_ENTITY_MAP[entityID] = newEntityID;
|
||||
if (data.selectCreated) {
|
||||
selectedEntityIDs.push(newEntityID);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < deleteEntities.length; i++) {
|
||||
entityID = deleteEntities[i].entityID;
|
||||
if (DELETED_ENTITY_MAP[entityID] !== undefined) {
|
||||
entityID = DELETED_ENTITY_MAP[entityID];
|
||||
}
|
||||
Entities.deleteEntity(entityID);
|
||||
var index = selectedEntityIDs.indexOf(entityID);
|
||||
if (index >= 0) {
|
||||
selectedEntityIDs.splice(index, 1);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < editEntities.length; i++) {
|
||||
entityID = editEntities[i].entityID;
|
||||
if (DELETED_ENTITY_MAP[entityID] !== undefined) {
|
||||
|
@ -2145,27 +2168,6 @@ function applyEntityProperties(data) {
|
|||
selectedEntityIDs.push(entityID);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < data.createEntities.length; i++) {
|
||||
entityID = data.createEntities[i].entityID;
|
||||
entityProperties = data.createEntities[i].properties;
|
||||
var newEntityID = Entities.addEntity(entityProperties);
|
||||
recursiveAdd(newEntityID, data.createEntities[i]);
|
||||
DELETED_ENTITY_MAP[entityID] = newEntityID;
|
||||
if (data.selectCreated) {
|
||||
selectedEntityIDs.push(newEntityID);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < data.deleteEntities.length; i++) {
|
||||
entityID = data.deleteEntities[i].entityID;
|
||||
if (DELETED_ENTITY_MAP[entityID] !== undefined) {
|
||||
entityID = DELETED_ENTITY_MAP[entityID];
|
||||
}
|
||||
Entities.deleteEntity(entityID);
|
||||
var index = selectedEntityIDs.indexOf(entityID);
|
||||
if (index >= 0) {
|
||||
selectedEntityIDs.splice(index, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// We might be getting an undo while edit.js is disabled. If that is the case, don't set
|
||||
// our selections, causing the edit widgets to display.
|
||||
|
|
|
@ -1347,12 +1347,16 @@ SelectionDisplay = (function() {
|
|||
};
|
||||
|
||||
that.updateLastMouseEvent = function(event) {
|
||||
if (activeTool && lastMouseEvent !== null) {
|
||||
if (activeTool && lastMouseEvent !== null) {
|
||||
var change = lastMouseEvent.isShifted !== event.isShifted || lastMouseEvent.isMeta !== event.isMeta ||
|
||||
lastMouseEvent.isControl !== event.isControl || lastMouseEvent.isAlt !== event.isAlt;
|
||||
lastMouseEvent.isShifted = event.isShifted;
|
||||
lastMouseEvent.isMeta = event.isMeta;
|
||||
lastMouseEvent.isControl = event.isControl;
|
||||
lastMouseEvent.isAlt = event.isAlt;
|
||||
activeTool.onMove(lastMouseEvent);
|
||||
lastMouseEvent.isAlt = event.isAlt;
|
||||
if (change) {
|
||||
activeTool.onMove(lastMouseEvent);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ exports.handlers = {
|
|||
'../../libraries/physics/src',
|
||||
'../../libraries/plugins/src/plugins',
|
||||
'../../libraries/pointers/src',
|
||||
'../../libraries/render-utils/src',
|
||||
'../../libraries/script-engine/src',
|
||||
'../../libraries/shared/src',
|
||||
'../../libraries/shared/src/shared',
|
||||
|
|
|
@ -80,8 +80,9 @@ void BakerCLI::bakeFile(QUrl inputUrl, const QString& outputPath, const QString&
|
|||
{ "roughness", image::TextureUsage::ROUGHNESS_TEXTURE },
|
||||
{ "gloss", image::TextureUsage::GLOSS_TEXTURE },
|
||||
{ "emissive", image::TextureUsage::EMISSIVE_TEXTURE },
|
||||
{ "cube", image::TextureUsage::CUBE_TEXTURE },
|
||||
{ "skybox", image::TextureUsage::CUBE_TEXTURE },
|
||||
{ "cube", image::TextureUsage::SKY_TEXTURE },
|
||||
{ "skybox", image::TextureUsage::SKY_TEXTURE },
|
||||
{ "ambient", image::TextureUsage::AMBIENT_TEXTURE },
|
||||
{ "occlusion", image::TextureUsage::OCCLUSION_TEXTURE },
|
||||
{ "scattering", image::TextureUsage::SCATTERING_TEXTURE },
|
||||
{ "lightmap", image::TextureUsage::LIGHTMAP_TEXTURE },
|
||||
|
|
|
@ -387,13 +387,13 @@ void DomainBaker::enumerateEntities() {
|
|||
if (entity.contains(AMBIENT_LIGHT_KEY)) {
|
||||
auto ambientLight = entity[AMBIENT_LIGHT_KEY].toObject();
|
||||
if (ambientLight.contains(AMBIENT_URL_KEY)) {
|
||||
addTextureBaker(AMBIENT_LIGHT_KEY + "." + AMBIENT_URL_KEY, ambientLight[AMBIENT_URL_KEY].toString(), image::TextureUsage::CUBE_TEXTURE, *it);
|
||||
addTextureBaker(AMBIENT_LIGHT_KEY + "." + AMBIENT_URL_KEY, ambientLight[AMBIENT_URL_KEY].toString(), image::TextureUsage::AMBIENT_TEXTURE, *it);
|
||||
}
|
||||
}
|
||||
if (entity.contains(SKYBOX_KEY)) {
|
||||
auto skybox = entity[SKYBOX_KEY].toObject();
|
||||
if (skybox.contains(SKYBOX_URL_KEY)) {
|
||||
addTextureBaker(SKYBOX_KEY + "." + SKYBOX_URL_KEY, skybox[SKYBOX_URL_KEY].toString(), image::TextureUsage::CUBE_TEXTURE, *it);
|
||||
addTextureBaker(SKYBOX_KEY + "." + SKYBOX_URL_KEY, skybox[SKYBOX_URL_KEY].toString(), image::TextureUsage::SKY_TEXTURE, *it);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -412,9 +412,13 @@ void DomainBaker::enumerateEntities() {
|
|||
if (entity.contains(MATERIAL_URL_KEY)) {
|
||||
addMaterialBaker(MATERIAL_URL_KEY, entity[MATERIAL_URL_KEY].toString(), true, *it);
|
||||
}
|
||||
// FIXME: Disabled for now because relative texture URLs are not supported for embedded materials in material entities
|
||||
// We need to make texture URLs absolute in this particular case only, keeping in mind that FSTBaker also uses embedded materials
|
||||
/*
|
||||
if (entity.contains(MATERIAL_DATA_KEY)) {
|
||||
addMaterialBaker(MATERIAL_DATA_KEY, entity[MATERIAL_DATA_KEY].toString(), false, *it);
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <QtWidgets/QLineEdit>
|
||||
#include <QtWidgets/QMessageBox>
|
||||
#include <QtWidgets/QPushButton>
|
||||
#include <QtWidgets/QCheckBox>
|
||||
#include <QtWidgets/QStackedWidget>
|
||||
|
||||
#include <QtCore/QDir>
|
||||
|
@ -61,6 +62,15 @@ void SkyboxBakeWidget::setupUI() {
|
|||
// start a new row for next component
|
||||
++rowIndex;
|
||||
|
||||
// setup a section to enable Ambient map baking
|
||||
_ambientMapBox = new QCheckBox("Bake ambient map(s)");
|
||||
_ambientMapBox->setChecked(false);
|
||||
|
||||
gridLayout->addWidget(_ambientMapBox, rowIndex, 1);
|
||||
|
||||
// start a new row for next component
|
||||
++rowIndex;
|
||||
|
||||
// setup a section to choose the output directory
|
||||
QLabel* outputDirectoryLabel = new QLabel("Output Directory");
|
||||
|
||||
|
@ -176,51 +186,67 @@ void SkyboxBakeWidget::bakeButtonClicked() {
|
|||
|
||||
// if the URL doesn't have a scheme, assume it is a local file
|
||||
if (skyboxToBakeURL.scheme() != "http" && skyboxToBakeURL.scheme() != "https" && skyboxToBakeURL.scheme() != "ftp") {
|
||||
skyboxToBakeURL.setScheme("file");
|
||||
skyboxToBakeURL = QUrl::fromLocalFile(fileURLString);
|
||||
}
|
||||
|
||||
// everything seems to be in place, kick off a bake for this skybox now
|
||||
auto baker = std::unique_ptr<TextureBaker> {
|
||||
new TextureBaker(skyboxToBakeURL, image::TextureUsage::CUBE_TEXTURE, outputDirectory.absolutePath())
|
||||
};
|
||||
addBaker(new TextureBaker(skyboxToBakeURL, image::TextureUsage::SKY_TEXTURE, outputDirectory.absolutePath()),
|
||||
outputDirectory);
|
||||
|
||||
// move the baker to a worker thread
|
||||
baker->moveToThread(Oven::instance().getNextWorkerThread());
|
||||
if (_ambientMapBox->isChecked()) {
|
||||
QString ambientMapBaseFilename;
|
||||
QString urlPath = skyboxToBakeURL.path();
|
||||
auto urlParts = urlPath.split('.');
|
||||
|
||||
// invoke the bake method on the baker thread
|
||||
QMetaObject::invokeMethod(baker.get(), "bake");
|
||||
urlParts.front() += "-ambient";
|
||||
ambientMapBaseFilename = QUrl(urlParts.front()).fileName();
|
||||
|
||||
// make sure we hear about the results of this baker when it is done
|
||||
connect(baker.get(), &TextureBaker::finished, this, &SkyboxBakeWidget::handleFinishedBaker);
|
||||
|
||||
// add a pending row to the results window to show that this bake is in process
|
||||
auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow();
|
||||
auto resultsRow = resultsWindow->addPendingResultRow(skyboxToBakeURL.fileName(), outputDirectory);
|
||||
|
||||
// keep a unique_ptr to this baker
|
||||
// and remember the row that represents it in the results table
|
||||
_bakers.emplace_back(std::move(baker), resultsRow);
|
||||
// we need to bake the corresponding ambient map too
|
||||
addBaker(new TextureBaker(skyboxToBakeURL, image::TextureUsage::AMBIENT_TEXTURE, outputDirectory.absolutePath(), QString(), ambientMapBaseFilename),
|
||||
outputDirectory);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SkyboxBakeWidget::addBaker(TextureBaker* baker, const QDir& outputDirectory) {
|
||||
auto textureBaker = std::unique_ptr<TextureBaker>{ baker };
|
||||
|
||||
// move the textureBaker to a worker thread
|
||||
textureBaker->moveToThread(Oven::instance().getNextWorkerThread());
|
||||
|
||||
// make sure we hear about the results of this textureBaker when it is done
|
||||
connect(textureBaker.get(), &TextureBaker::finished, this, &SkyboxBakeWidget::handleFinishedBaker);
|
||||
|
||||
// invoke the bake method on the textureBaker thread
|
||||
QMetaObject::invokeMethod(textureBaker.get(), "bake");
|
||||
|
||||
// add a pending row to the results window to show that this bake is in process
|
||||
auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow();
|
||||
auto resultsRow = resultsWindow->addPendingResultRow(baker->getBaseFilename(), outputDirectory);
|
||||
|
||||
// keep a unique_ptr to this textureBaker
|
||||
// and remember the row that represents it in the results table
|
||||
_bakers.emplace_back(std::move(textureBaker), resultsRow);
|
||||
}
|
||||
|
||||
void SkyboxBakeWidget::handleFinishedBaker() {
|
||||
if (auto baker = qobject_cast<TextureBaker*>(sender())) {
|
||||
if (auto textureBaker = qobject_cast<TextureBaker*>(sender())) {
|
||||
// add the results of this bake to the results window
|
||||
auto it = std::find_if(_bakers.begin(), _bakers.end(), [baker](const BakerRowPair& value) {
|
||||
return value.first.get() == baker;
|
||||
auto it = std::find_if(_bakers.begin(), _bakers.end(), [textureBaker](const BakerRowPair& value) {
|
||||
return value.first.get() == textureBaker;
|
||||
});
|
||||
|
||||
if (it != _bakers.end()) {
|
||||
auto resultRow = it->second;
|
||||
auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow();
|
||||
|
||||
if (baker->hasErrors()) {
|
||||
resultsWindow->changeStatusForRow(resultRow, baker->getErrors().join("\n"));
|
||||
if (textureBaker->hasErrors()) {
|
||||
resultsWindow->changeStatusForRow(resultRow, textureBaker->getErrors().join("\n"));
|
||||
} else {
|
||||
resultsWindow->changeStatusForRow(resultRow, "Success");
|
||||
}
|
||||
|
||||
// drop our strong pointer to the baker now that we are done with it
|
||||
// drop our strong pointer to the textureBaker now that we are done with it
|
||||
_bakers.erase(it);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "BakeWidget.h"
|
||||
|
||||
class QLineEdit;
|
||||
class QCheckBox;
|
||||
|
||||
class SkyboxBakeWidget : public BakeWidget {
|
||||
Q_OBJECT
|
||||
|
@ -42,9 +43,12 @@ private:
|
|||
|
||||
QLineEdit* _selectionLineEdit;
|
||||
QLineEdit* _outputDirLineEdit;
|
||||
QCheckBox* _ambientMapBox;
|
||||
|
||||
Setting::Handle<QString> _exportDirectory;
|
||||
Setting::Handle<QString> _selectionStartDirectory;
|
||||
|
||||
void addBaker(TextureBaker* baker, const QDir& outputDir);
|
||||
};
|
||||
|
||||
#endif // hifi_SkyboxBakeWidget_h
|
||||
|
|
Loading…
Reference in a new issue