mirror of
https://github.com/Armored-Dragon/overte.git
synced 2025-03-11 16:13:16 +01:00
Merge branch 'master' into feature/further-rebranding
This commit is contained in:
commit
fda40d56d4
163 changed files with 3223 additions and 3229 deletions
6
.github/workflows/pr_build.yml
vendored
6
.github/workflows/pr_build.yml
vendored
|
@ -51,7 +51,7 @@ jobs:
|
|||
echo ::set-env name=PYTHON_EXEC::python3
|
||||
echo ::set-env name=INSTALLER_EXT::*
|
||||
echo ::set-env name=CMAKE_BUILD_EXTRA::"-- -j3"
|
||||
echo ::set-env name=CMAKE_EXTRA::"-DBUILD_TOOLS:BOOLEAN=FALSE"
|
||||
echo ::set-env name=CMAKE_EXTRA::"-DBUILD_TOOLS:BOOLEAN=FALSE -DHIFI_PYTHON_EXEC:FILEPATH=$(which python3)"
|
||||
fi
|
||||
# Mac build variables
|
||||
if [ "${{ matrix.os }}" = "macOS-latest" ]; then
|
||||
|
@ -70,9 +70,9 @@ jobs:
|
|||
shell: bash
|
||||
run: |
|
||||
echo "${{ steps.buildenv1.outputs.symbols_archive }}"
|
||||
echo ::set-env name=ARTIFACT_PATTERN::ProjectAthena-Alpha-PR${{ github.event.number }}-*.$INSTALLER_EXT
|
||||
echo ::set-env name=ARTIFACT_PATTERN::Vircadia-Alpha-PR${{ github.event.number }}-*.$INSTALLER_EXT
|
||||
# Build type variables
|
||||
echo ::set-env name=INSTALLER::HighFidelity-Beta-$RELEASE_NUMBER-$GIT_COMMIT_SHORT.$INSTALLER_EXT
|
||||
echo ::set-env name=INSTALLER::Vircadia-Alpha-$RELEASE_NUMBER-$GIT_COMMIT_SHORT.$INSTALLER_EXT
|
||||
- name: Clear Working Directory
|
||||
if: startsWith(matrix.os, 'windows')
|
||||
shell: bash
|
||||
|
|
44
BUILD.md
44
BUILD.md
|
@ -1,6 +1,6 @@
|
|||
# General Build Information
|
||||
|
||||
*Last Updated on December 21, 2019*
|
||||
*Last Updated on May 17, 2020*
|
||||
|
||||
### OS Specific Build Guides
|
||||
|
||||
|
@ -22,7 +22,7 @@ These dependencies need not be installed manually. They are automatically downlo
|
|||
- [Bullet Physics Engine](https://github.com/bulletphysics/bullet3/releases): 2.83
|
||||
- [glm](https://glm.g-truc.net/0.9.8/index.html): 0.9.8
|
||||
- [Oculus SDK](https://developer.oculus.com/downloads/): 1.11 (Win32) / 0.5 (Mac)
|
||||
- [OpenVR](https://github.com/ValveSoftware/openvr): 1.0.6 (Win32 only)
|
||||
- [OpenVR](https://github.com/ValveSoftware/openvr): 1.11.11 (Win32 only)
|
||||
- [Polyvox](http://www.volumesoffun.com/): 0.2.1
|
||||
- [QuaZip](https://sourceforge.net/projects/quazip/files/quazip/): 0.7.3
|
||||
- [SDL2](https://www.libsdl.org/download-2.0.php): 2.0.3
|
||||
|
@ -38,7 +38,7 @@ These are not placed in your normal build tree when doing an out of source build
|
|||
|
||||
#### CMake
|
||||
|
||||
Athena uses CMake to generate build files and project files for your platform.
|
||||
Vircadia uses CMake to generate build files and project files for your platform.
|
||||
|
||||
#### Qt
|
||||
CMake will download Qt 5.12.3 using vcpkg.
|
||||
|
@ -51,9 +51,9 @@ This can either be entered directly into your shell session before you build or
|
|||
export QT_CMAKE_PREFIX_PATH=/usr/local/Cellar/qt5/5.12.3/lib/cmake
|
||||
export QT_CMAKE_PREFIX_PATH=/usr/local/opt/qt5/lib/cmake
|
||||
|
||||
#### Vcpkg
|
||||
#### VCPKG
|
||||
|
||||
Athena uses vcpkg to download and build dependencies.
|
||||
Vircadia uses vcpkg to download and build dependencies.
|
||||
You do not need to install vcpkg.
|
||||
|
||||
Building the dependencies can be lengthy and the resulting files will be stored in your OS temp directory.
|
||||
|
@ -63,7 +63,33 @@ export HIFI_VCPKG_BASE=/path/to/directory
|
|||
|
||||
Where /path/to/directory is the path to a directory where you wish the build files to get stored.
|
||||
|
||||
#### Generating build files
|
||||
#### Generating Build Files
|
||||
|
||||
##### Possible Environment Variables
|
||||
|
||||
// The URL to post the dump to.
|
||||
CMAKE_BACKTRACE_URL
|
||||
// The identifying tag of the release.
|
||||
CMAKE_BACKTRACE_TOKEN
|
||||
|
||||
// The release version.
|
||||
RELEASE_NUMBER
|
||||
// The build commit.
|
||||
BUILD_NUMBER
|
||||
|
||||
// The type of release.
|
||||
RELEASE_TYPE=PRODUCTION|PR
|
||||
RELEASE_BUILD=PRODUCTION|PR
|
||||
|
||||
// TODO: What do these do?
|
||||
PRODUCTION_BUILD=0|1
|
||||
STABLE_BUILD=0|1
|
||||
|
||||
// TODO: What do these do?
|
||||
USE_STABLE_GLOBAL_SERVICES=1
|
||||
BUILD_GLOBAL_SERVICES=STABLE
|
||||
|
||||
##### Generate Files
|
||||
|
||||
Create a build directory in the root of your checkout and then run the CMake build from there. This will keep the rest of the directory clean.
|
||||
|
||||
|
@ -71,7 +97,7 @@ Create a build directory in the root of your checkout and then run the CMake bui
|
|||
cd build
|
||||
cmake ..
|
||||
|
||||
If cmake gives you the same error message repeatedly after the build fails, try removing `CMakeCache.txt`.
|
||||
If CMake gives you the same error message repeatedly after the build fails, try removing `CMakeCache.txt`.
|
||||
|
||||
##### Generating a release/debug only vcpkg build
|
||||
|
||||
|
@ -97,13 +123,13 @@ For example, to pass the QT_CMAKE_PREFIX_PATH variable (if not using the vcpkg'e
|
|||
|
||||
The following applies for dependencies we do not grab via CMake ExternalProject (OpenSSL is an example), or for dependencies you have opted not to grab as a CMake ExternalProject (via -DUSE_LOCAL_$NAME=0). The list of dependencies we grab by default as external projects can be found in [the CMake External Project Dependencies section](#cmake-external-project-dependencies).
|
||||
|
||||
You can point our [Cmake find modules](cmake/modules/) to the correct version of dependencies by setting one of the three following variables to the location of the correct version of the dependency.
|
||||
You can point our [CMake find modules](cmake/modules/) to the correct version of dependencies by setting one of the three following variables to the location of the correct version of the dependency.
|
||||
|
||||
In the examples below the variable $NAME would be replaced by the name of the dependency in uppercase, and $name would be replaced by the name of the dependency in lowercase (ex: OPENSSL_ROOT_DIR, openssl).
|
||||
|
||||
* $NAME_ROOT_DIR - pass this variable to Cmake with the -DNAME_ROOT_DIR= flag when running Cmake to generate build files
|
||||
* $NAME_ROOT_DIR - set this variable in your ENV
|
||||
* HIFI_LIB_DIR - set this variable in your ENV to your High Fidelity lib folder, should contain a folder '$name'
|
||||
* HIFI_LIB_DIR - set this variable in your ENV to your Vircadia lib folder, should contain a folder '$name'
|
||||
|
||||
### Optional Components
|
||||
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
## This guide is specific to Ubuntu 16.04.
|
||||
|
||||
THIS DOCUMENT IS OUTDATED.
|
||||
|
||||
Deb packages of High Fidelity domain server and assignment client are stored on debian.highfidelity.com
|
||||
|
||||
```
|
||||
|
|
|
@ -6,7 +6,7 @@ Please read the [general build guide](BUILD.md) for information on dependencies
|
|||
|
||||
### Homebrew
|
||||
|
||||
[Homebrew](https://brew.sh/) is an excellent package manager for macOS. It makes install of some High Fidelity dependencies very simple.
|
||||
[Homebrew](https://brew.sh/) is an excellent package manager for macOS. It makes install of some Vircadia dependencies very simple.
|
||||
|
||||
brew install cmake openssl
|
||||
|
||||
|
|
20
BUILD_WIN.md
20
BUILD_WIN.md
|
@ -1,6 +1,6 @@
|
|||
# Build Windows
|
||||
|
||||
*Last Updated on January 13, 2020*
|
||||
*Last Updated on May 17, 2020*
|
||||
|
||||
This is a stand-alone guide for creating your first Vircadia build for Windows 64-bit.
|
||||
|
||||
|
@ -47,7 +47,7 @@ Download and install the latest version of CMake 3.15.
|
|||
Download the file named win64-x64 Installer from the [CMake Website](https://cmake.org/download/). You can access the installer on this [3.15 Version page](https://cmake.org/files/v3.15/). During installation, make sure to check "Add CMake to system PATH for all users" when prompted.
|
||||
|
||||
### Step 4. Create VCPKG environment variable
|
||||
In the next step, you will use CMake to build Project Athena. By default, the CMake process builds dependency files in Windows' `%TEMP%` directory, which is periodically cleared by the operating system. To prevent you from having to re-build the dependencies in the event that Windows clears that directory, we recommend that you create a `HIFI_VCPKG_BASE` environment variable linked to a directory somewhere on your machine. That directory will contain all dependency files until you manually remove them.
|
||||
In the next step, you will use CMake to build Vircadia. By default, the CMake process builds dependency files in Windows' `%TEMP%` directory, which is periodically cleared by the operating system. To prevent you from having to re-build the dependencies in the event that Windows clears that directory, we recommend that you create a `HIFI_VCPKG_BASE` environment variable linked to a directory somewhere on your machine. That directory will contain all dependency files until you manually remove them.
|
||||
|
||||
To create this variable:
|
||||
* Naviagte to 'Edit the System Environment Variables' Through the start menu.
|
||||
|
@ -68,7 +68,7 @@ To create this variable:
|
|||
### Step 5. Running CMake to Generate Build Files
|
||||
|
||||
Run Command Prompt from Start and run the following commands:
|
||||
`cd "%HIFI_DIR%"`
|
||||
`cd "%VIRCADIA_DIR%"`
|
||||
`mkdir build`
|
||||
`cd build`
|
||||
|
||||
|
@ -78,11 +78,11 @@ Run `cmake .. -G "Visual Studio 15 Win64"`.
|
|||
#### If you're using Visual Studio 2019,
|
||||
Run `cmake .. -G "Visual Studio 16 2019" -A x64`.
|
||||
|
||||
Where `%HIFI_DIR%` is the directory for the highfidelity repository.
|
||||
Where `%VIRCADIA_DIR%` is the directory for the Vircadia repository.
|
||||
|
||||
### Step 6. Making a Build
|
||||
|
||||
Open `%HIFI_DIR%\build\athena.sln` using Visual Studio.
|
||||
Open `%VIRCADIA_DIR%\build\vircadia.sln` using Visual Studio.
|
||||
|
||||
Change the Solution Configuration (menu ribbon under the menu bar, next to the green play button) from "Debug" to "Release" for best performance.
|
||||
|
||||
|
@ -98,22 +98,22 @@ Restart Visual Studio again.
|
|||
|
||||
In Visual Studio, right+click "interface" under the Apps folder in Solution Explorer and select "Set as Startup Project". Run from the menu bar `Debug > Start Debugging`.
|
||||
|
||||
Now, you should have a full build of Project Athena and be able to run the Interface using Visual Studio. Please check our [Docs](https://wiki.highfidelity.com/wiki/Main_Page) for more information regarding the programming workflow.
|
||||
Now, you should have a full build of Vircadia and be able to run the Interface using Visual Studio.
|
||||
|
||||
Note: You can also run Interface by launching it from command line or File Explorer from `%HIFI_DIR%\build\interface\Release\interface.exe`
|
||||
Note: You can also run Interface by launching it from command line or File Explorer from `%VIRCADIA_DIR%\build\interface\Release\interface.exe`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
For any problems after Step #6, first try this:
|
||||
* Delete your locally cloned copy of the highfidelity repository
|
||||
* Delete your locally cloned copy of the Vircadia repository
|
||||
* Restart your computer
|
||||
* Redownload the [repository](https://github.com/kasenvr/project-athena)
|
||||
* Restart directions from Step #6
|
||||
|
||||
#### CMake gives you the same error message repeatedly after the build fails
|
||||
|
||||
Remove `CMakeCache.txt` found in the `%HIFI_DIR%\build` directory.
|
||||
Remove `CMakeCache.txt` found in the `%VIRCADIA_DIR%\build` directory.
|
||||
|
||||
#### CMake can't find OpenSSL
|
||||
|
||||
Remove `CMakeCache.txt` found in the `%HIFI_DIR%\build` directory. Verify that your HIFI_VCPKG_BASE environment variable is set and pointing to the correct location. Verify that the file `${HIFI_VCPKG_BASE}/installed/x64-windows/include/openssl/ssl.h` exists.
|
||||
Remove `CMakeCache.txt` found in the `%VIRCADIA_DIR%\build` directory. Verify that your HIFI_VCPKG_BASE environment variable is set and pointing to the correct location. Verify that the file `${HIFI_VCPKG_BASE}/installed/x64-windows/include/openssl/ssl.h` exists.
|
||||
|
|
|
@ -270,7 +270,6 @@ find_package( Threads )
|
|||
add_definitions(-DGLM_FORCE_RADIANS)
|
||||
add_definitions(-DGLM_ENABLE_EXPERIMENTAL)
|
||||
add_definitions(-DGLM_FORCE_CTOR_INIT)
|
||||
add_definitions(-DGLM_LANG_STL11_FORCED) # Workaround for GLM not detecting support for C++11 templates on Android
|
||||
|
||||
if (WIN32)
|
||||
# Deal with fakakta Visual Studo 2017 bug
|
||||
|
|
|
@ -976,9 +976,9 @@ while (true) {
|
|||
|
||||
#### [4.3.4] Source files (header and implementation) must include a boilerplate.
|
||||
|
||||
Boilerplates should include the filename, location, creator, copyright Project Athena contributors, and Apache 2.0 License
|
||||
Boilerplates should include the filename, location, creator, copyright Vircadia contributors, and Apache 2.0 License
|
||||
information. This should be placed at the top of the file. If editing an existing file that is copyright High Fidelity, add a
|
||||
second copyright line, copyright Project Athena contributors.
|
||||
second copyright line, copyright Vircadia contributors.
|
||||
|
||||
```cpp
|
||||
//
|
||||
|
@ -987,7 +987,7 @@ second copyright line, copyright Project Athena contributors.
|
|||
//
|
||||
// Created by Stephen Birarda on 15 Feb 2013.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
// Copyright 2020 Project Athena contributors.
|
||||
// Copyright 2020 Vircadia contributors.
|
||||
//
|
||||
// This is where you could place an optional one line comment about the file.
|
||||
//
|
||||
|
|
|
@ -21,7 +21,7 @@ Contributing
|
|||
|
||||
```
|
||||
git remote add upstream https://github.com/kasenvr/project-athena
|
||||
git pull upstream kasen/core
|
||||
git pull upstream master
|
||||
```
|
||||
|
||||
Resolve any conflicts that arise with this step.
|
||||
|
@ -29,7 +29,7 @@ Contributing
|
|||
7. Push to your fork
|
||||
|
||||
```
|
||||
git push origin kasen/core
|
||||
git push origin new_branch_name
|
||||
```
|
||||
8. Submit a pull request
|
||||
|
||||
|
|
23
INSTALL.md
23
INSTALL.md
|
@ -1,10 +1,10 @@
|
|||
# Creating an Installer
|
||||
|
||||
Follow the [build guide](BUILD.md) to figure out how to build Project Athena for your platform.
|
||||
Follow the [build guide](BUILD.md) to figure out how to build Vircadia for your platform.
|
||||
|
||||
During generation, CMake should produce an `install` target and a `package` target.
|
||||
|
||||
The `install` target will copy the Project Athena targets and their dependencies to your `CMAKE_INSTALL_PREFIX`.
|
||||
The `install` target will copy the Vircadia targets and their dependencies to your `CMAKE_INSTALL_PREFIX`.
|
||||
This variable is set by the `project(hifi)` command in `CMakeLists.txt` to `C:/Program Files/hifi` and stored in `build/CMakeCache.txt`
|
||||
|
||||
### Packaging
|
||||
|
@ -15,7 +15,7 @@ To produce an installer, run the `package` target.
|
|||
|
||||
To produce an executable installer on Windows, the following are required:
|
||||
|
||||
1. [7-zip](<https://www.7-zip.org/download.html>)
|
||||
1. [7-zip](<https://www.7-zip.org/download.html>)
|
||||
|
||||
1. [Nullsoft Scriptable Install System](http://nsis.sourceforge.net/Download) - 3.04
|
||||
Install using defaults (will install to `C:\Program Files (x86)\NSIS`)
|
||||
|
@ -56,22 +56,23 @@ To produce an executable installer on Windows, the following are required:
|
|||
1. Copy `Release\ApplicationID.dll` to `C:\Program Files (x86)\NSIS\Plugins\x86-ansi\`
|
||||
1. Copy `ReleaseUnicode\ApplicationID.dll` to `C:\Program Files (x86)\NSIS\Plugins\x86-unicode\`
|
||||
|
||||
1. [npm](<https://www.npmjs.com/get-npm>)
|
||||
1. [Node.JS and NPM](<https://www.npmjs.com/get-npm>)
|
||||
1. Install version 10.15.0 LTS
|
||||
|
||||
1. Perform a clean cmake from a new terminal.
|
||||
1. Open the `athena.sln` solution and select the Release configuration.
|
||||
1. Open the `vircadia.sln` solution with elevated (administrator) permissions on Visual Studio and select the **Release** configuration.
|
||||
1. Build the solution.
|
||||
1. Build CMakeTargets->INSTALL
|
||||
1. Build `packaged-server-console-npm-install` (found under **hidden/Server Console**)
|
||||
1. Build `packaged-server-console` (found under **Server Console**)
|
||||
This will add 2 folders to `build\server-console\` -
|
||||
`server-console-win32-x64` and `x64`
|
||||
1. Build CMakeTargets->PACKAGE
|
||||
Installer is now available in `build\_CPack_Packages\win64\NSIS`
|
||||
1. Build CMakeTargets->PACKAGE
|
||||
The installer is now available in `build\_CPack_Packages\win64\NSIS`
|
||||
|
||||
#### OS X
|
||||
1. [npm](<https://www.npmjs.com/get-npm>)
|
||||
Install version 10.15.0 LTS
|
||||
Install version 12.16.3 LTS
|
||||
|
||||
1. Perform a clean CMake.
|
||||
1. Perform a Release build of ALL_BUILD
|
||||
|
@ -80,3 +81,9 @@ To produce an executable installer on Windows, the following are required:
|
|||
Sandbox-darwin-x64
|
||||
1. Perform a Release build of `package`
|
||||
Installer is now available in `build/_CPack_Packages/Darwin/DragNDrop
|
||||
|
||||
### FAQ
|
||||
|
||||
1. **Problem:** Failure to open a file. ```File: failed opening file "\FOLDERSHARE\XYZSRelease\...\Credits.rtf" Error in script "C:\TFS\XYZProject\Releases\NullsoftInstaller\XYZWin7Installer.nsi" on line 77 -- aborting creation process```
|
||||
1. **Cause:** The complete path (current directory + relative path) has to be < 260 characters to any of the relevant files.
|
||||
1. **Solution:** Move your build and packaging folder as high up in the drive as possible to prevent an overage.
|
||||
|
|
4
LICENSE
4
LICENSE
|
@ -1,7 +1,7 @@
|
|||
Copyright (c) 2013-2019, High Fidelity, Inc.
|
||||
Copyright (c) 2019-2020, Project Athena Contributors.
|
||||
Copyright (c) 2019-2020, Vircadia contributors.
|
||||
All rights reserved.
|
||||
https://projectathena.io
|
||||
https://vircadia.com
|
||||
|
||||
Licensed under the Apache License version 2.0 (the "License");
|
||||
You may not use this software except in compliance with the License.
|
||||
|
|
|
@ -12,11 +12,11 @@ Vircadia is a 3D social software project seeking to incrementally bring about a
|
|||
|
||||
### How to build the Interface
|
||||
|
||||
[For Windows](https://github.com/kasenvr/project-athena/blob/kasen/core/BUILD_WIN.md)
|
||||
[For Windows](https://github.com/kasenvr/project-athena/blob/master/BUILD_WIN.md)
|
||||
|
||||
[For Linux](https://github.com/kasenvr/project-athena/blob/kasen/core/BUILD_LINUX.md)
|
||||
[For Linux](https://github.com/kasenvr/project-athena/blob/master/BUILD_LINUX.md)
|
||||
|
||||
[For Linux - Athena Builder](https://github.com/daleglass/athena-builder)
|
||||
[For Linux - Athena Builder](https://github.com/kasenvr/vircadia-builder)
|
||||
|
||||
### How to deploy a Server
|
||||
|
||||
|
@ -24,7 +24,7 @@ Vircadia is a 3D social software project seeking to incrementally bring about a
|
|||
|
||||
### How to build a Server
|
||||
|
||||
[For Linux - Athena Builder](https://github.com/daleglass/athena-builder)
|
||||
[For Linux - Athena Builder](https://github.com/kasenvr/vircadia-builder)
|
||||
|
||||
### Boot to Metaverse: The Goal
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# THIS DOCUMENT IS OUTDATED
|
||||
|
||||
High Fidelity (hifi) is an early-stage technology lab experimenting with Virtual Worlds and VR.
|
||||
|
||||
This repository contains the source to many of the components in our
|
||||
|
@ -15,7 +17,7 @@ Come chat with us in [our Gitter](https://gitter.im/highfidelity/hifi) if you ha
|
|||
|
||||
Documentation
|
||||
=========
|
||||
Documentation is available at [docs.highfidelity.com](https://docs.highfidelity.com), if something is missing, please suggest it via a new job on Worklist (add to the hifi-docs project).
|
||||
Documentation is available at [docs.highfidelity.com](https://docs.highfidelity.com/), if something is missing, please suggest it via a new job on Worklist (add to the hifi-docs project).
|
||||
|
||||
There is also detailed [documentation on our coding standards](CODING_STANDARD.md).
|
||||
|
||||
|
|
|
@ -27,9 +27,9 @@
|
|||
<string name="online">Online</string>
|
||||
<string name="signup">Sign Up</string>
|
||||
<string name="signup_uppercase">SIGN UP</string>
|
||||
<string name="creating_account">Creating your High Fidelity account</string>
|
||||
<string name="creating_account">Creating your Vircadia account</string>
|
||||
<string name="signup_email_username_or_password_incorrect">Email, username or password incorrect.</string>
|
||||
<string name="signedin_welcome">You are now signed into High Fidelity</string>
|
||||
<string name="signedin_welcome">You are now signed into Vircadia</string>
|
||||
<string name="logged_in_welcome">You are now logged in!</string>
|
||||
<string name="welcome">Welcome</string>
|
||||
<string name="cancel">Cancel</string>
|
||||
|
|
|
@ -144,10 +144,10 @@ void ScriptableAvatar::update(float deltatime) {
|
|||
}
|
||||
_animationDetails.currentFrame = currentFrame;
|
||||
|
||||
const std::vector<HFMJoint>& modelJoints = _bind->getHFMModel().joints;
|
||||
const QVector<HFMJoint>& modelJoints = _bind->getHFMModel().joints;
|
||||
QStringList animationJointNames = _animation->getJointNames();
|
||||
|
||||
const auto nJoints = (int)modelJoints.size();
|
||||
const int nJoints = modelJoints.size();
|
||||
if (_jointData.size() != nJoints) {
|
||||
_jointData.resize(nJoints);
|
||||
}
|
||||
|
|
|
@ -18,20 +18,20 @@ macro(GENERATE_INSTALLERS)
|
|||
if (CLIENT_ONLY)
|
||||
set(_PACKAGE_NAME_EXTRA "-Interface")
|
||||
set(INSTALLER_TYPE "client_only")
|
||||
string(REGEX REPLACE "Project Athena" "Project Athena Interface" _DISPLAY_NAME ${BUILD_ORGANIZATION})
|
||||
string(REGEX REPLACE "Vircadia" "Vircadia Interface" _DISPLAY_NAME ${BUILD_ORGANIZATION})
|
||||
elseif (SERVER_ONLY)
|
||||
set(_PACKAGE_NAME_EXTRA "-Sandbox")
|
||||
set(INSTALLER_TYPE "server_only")
|
||||
string(REGEX REPLACE "Project Athena" "Project Athena Sandbox" _DISPLAY_NAME ${BUILD_ORGANIZATION})
|
||||
string(REGEX REPLACE "Vircadia" "Vircadia Sandbox" _DISPLAY_NAME ${BUILD_ORGANIZATION})
|
||||
else ()
|
||||
set(_DISPLAY_NAME ${BUILD_ORGANIZATION})
|
||||
set(INSTALLER_TYPE "full")
|
||||
endif ()
|
||||
|
||||
set(CPACK_PACKAGE_NAME ${_DISPLAY_NAME})
|
||||
set(CPACK_PACKAGE_VENDOR "Project Athena")
|
||||
set(CPACK_PACKAGE_VENDOR "Vircadia")
|
||||
set(CPACK_PACKAGE_VERSION ${BUILD_VERSION})
|
||||
set(CPACK_PACKAGE_FILE_NAME "ProjectAthena-Alpha${_PACKAGE_NAME_EXTRA}-${BUILD_VERSION}")
|
||||
set(CPACK_PACKAGE_FILE_NAME "Vircadia-Alpha${_PACKAGE_NAME_EXTRA}-${BUILD_VERSION}")
|
||||
set(CPACK_NSIS_DISPLAY_NAME ${_DISPLAY_NAME})
|
||||
set(CPACK_NSIS_PACKAGE_NAME ${_DISPLAY_NAME})
|
||||
if (PR_BUILD)
|
||||
|
@ -118,11 +118,11 @@ macro(GENERATE_INSTALLERS)
|
|||
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/LICENSE")
|
||||
|
||||
if (BUILD_CLIENT)
|
||||
cpack_add_component(${CLIENT_COMPONENT} DISPLAY_NAME "Project Athena Interface")
|
||||
cpack_add_component(${CLIENT_COMPONENT} DISPLAY_NAME "Vircadia Interface")
|
||||
endif ()
|
||||
|
||||
if (BUILD_SERVER)
|
||||
cpack_add_component(${SERVER_COMPONENT} DISPLAY_NAME "Project Athena Sandbox")
|
||||
cpack_add_component(${SERVER_COMPONENT} DISPLAY_NAME "Vircadia Sandbox")
|
||||
endif ()
|
||||
|
||||
include(CPack)
|
||||
|
|
|
@ -35,7 +35,7 @@ macro(SET_PACKAGING_PARAMETERS)
|
|||
set(DEPLOY_PACKAGE TRUE)
|
||||
set(PRODUCTION_BUILD 1)
|
||||
set(BUILD_VERSION ${RELEASE_NUMBER})
|
||||
set(BUILD_ORGANIZATION "Project Athena")
|
||||
set(BUILD_ORGANIZATION "Vircadia")
|
||||
set(HIGH_FIDELITY_PROTOCOL "hifi")
|
||||
set(HIGH_FIDELITY_APP_PROTOCOL "hifiapp")
|
||||
set(INTERFACE_BUNDLE_NAME "interface")
|
||||
|
@ -60,7 +60,7 @@ macro(SET_PACKAGING_PARAMETERS)
|
|||
set(DEPLOY_PACKAGE TRUE)
|
||||
set(PR_BUILD 1)
|
||||
set(BUILD_VERSION "PR${RELEASE_NUMBER}")
|
||||
set(BUILD_ORGANIZATION "Project Athena - PR${RELEASE_NUMBER}")
|
||||
set(BUILD_ORGANIZATION "Vircadia - PR${RELEASE_NUMBER}")
|
||||
set(INTERFACE_BUNDLE_NAME "interface")
|
||||
set(INTERFACE_ICON_PREFIX "interface-beta")
|
||||
|
||||
|
@ -69,7 +69,7 @@ macro(SET_PACKAGING_PARAMETERS)
|
|||
else ()
|
||||
set(DEV_BUILD 1)
|
||||
set(BUILD_VERSION "dev")
|
||||
set(BUILD_ORGANIZATION "Project Athena - ${BUILD_VERSION}")
|
||||
set(BUILD_ORGANIZATION "Vircadia - ${BUILD_VERSION}")
|
||||
set(INTERFACE_BUNDLE_NAME "interface")
|
||||
set(INTERFACE_ICON_PREFIX "interface-beta")
|
||||
|
||||
|
@ -171,21 +171,21 @@ macro(SET_PACKAGING_PARAMETERS)
|
|||
|
||||
# shortcut names
|
||||
if (PRODUCTION_BUILD)
|
||||
set(INTERFACE_SHORTCUT_NAME "Project Athena")
|
||||
set(INTERFACE_SHORTCUT_NAME "Vircadia")
|
||||
set(CONSOLE_SHORTCUT_NAME "Console")
|
||||
set(SANDBOX_SHORTCUT_NAME "Sandbox")
|
||||
set(APP_USER_MODEL_ID "com.highfidelity.console")
|
||||
else ()
|
||||
set(INTERFACE_SHORTCUT_NAME "Project Athena - ${BUILD_VERSION_NO_SHA}")
|
||||
set(INTERFACE_SHORTCUT_NAME "Vircadia - ${BUILD_VERSION_NO_SHA}")
|
||||
set(CONSOLE_SHORTCUT_NAME "Console - ${BUILD_VERSION_NO_SHA}")
|
||||
set(SANDBOX_SHORTCUT_NAME "Sandbox - ${BUILD_VERSION_NO_SHA}")
|
||||
endif ()
|
||||
|
||||
set(INTERFACE_HF_SHORTCUT_NAME "${INTERFACE_SHORTCUT_NAME}")
|
||||
set(CONSOLE_HF_SHORTCUT_NAME "Project Athena ${CONSOLE_SHORTCUT_NAME}")
|
||||
set(SANDBOX_HF_SHORTCUT_NAME "Project Athena ${SANDBOX_SHORTCUT_NAME}")
|
||||
set(CONSOLE_HF_SHORTCUT_NAME "Vircadia ${CONSOLE_SHORTCUT_NAME}")
|
||||
set(SANDBOX_HF_SHORTCUT_NAME "Vircadia ${SANDBOX_SHORTCUT_NAME}")
|
||||
|
||||
set(PRE_SANDBOX_INTERFACE_SHORTCUT_NAME "Project Athena")
|
||||
set(PRE_SANDBOX_INTERFACE_SHORTCUT_NAME "Vircadia")
|
||||
set(PRE_SANDBOX_CONSOLE_SHORTCUT_NAME "Server Console")
|
||||
|
||||
# check if we need to find signtool
|
||||
|
|
|
@ -19,4 +19,5 @@ macro(TARGET_PYTHON)
|
|||
message(FATAL_ERROR "Unable to locate Python interpreter 3.5 or higher")
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
message("Using the Python interpreter located at: " ${HIFI_PYTHON_EXEC})
|
||||
endmacro()
|
||||
|
|
|
@ -3,8 +3,8 @@ include(vcpkg_common_functions)
|
|||
vcpkg_from_github(
|
||||
OUT_SOURCE_PATH SOURCE_PATH
|
||||
REPO ValveSoftware/openvr
|
||||
REF v1.0.16
|
||||
SHA512 967356563ba4232da5361510c7519d3058e09eced4571aadc00d8a75ab1f299a0aebda2b0b10b0ffb6c6a443fd718634d0c0103964e289961449c93e8d7b9d02
|
||||
REF v1.11.11
|
||||
SHA512 25bddb0e82eea091fe5101d0d3de1de7bb81b4504adc0c8d8e687d2502c0167bc5a11e68bc343d7563fb4db7c917e9d0e2ea99bc1d8016d479874b0c6bd7f121
|
||||
HEAD_REF master
|
||||
)
|
||||
|
||||
|
|
|
@ -720,7 +720,7 @@ Function InstallTypesPage
|
|||
StrCpy $OffsetUnits u
|
||||
StrCpy $Express "0"
|
||||
|
||||
${NSD_CreateRadioButton} 30% $CurrentOffset$OffsetUnits 100% 10u "Express Install (Recommended)"; $\nInstalls Project Athena Interface and Project Athena Sandbox"
|
||||
${NSD_CreateRadioButton} 30% $CurrentOffset$OffsetUnits 100% 10u "Express Install (Recommended)"; $\nInstalls Vircadia Interface and Vircadia Sandbox"
|
||||
pop $ExpressInstallRadioButton
|
||||
${NSD_OnClick} $ExpressInstallRadioButton ChangeExpressLabel
|
||||
IntOp $CurrentOffset $CurrentOffset + 15
|
||||
|
@ -973,7 +973,7 @@ Function ReadPostInstallOptions
|
|||
|
||||
${If} @CLIENT_COMPONENT_CONDITIONAL@
|
||||
${LogText} "Option: Install Client"
|
||||
; check if the user asked for a desktop shortcut to Project Athena
|
||||
; check if the user asked for a desktop shortcut to Vircadia
|
||||
${NSD_GetState} $DesktopClientCheckbox $DesktopClientState
|
||||
${LogText} "Option: Create Client Desktop Shortcut: $DesktopClientState"
|
||||
${EndIf}
|
||||
|
@ -1027,7 +1027,7 @@ Function HandlePostInstallOptions
|
|||
${EndIf}
|
||||
|
||||
${If} @CLIENT_COMPONENT_CONDITIONAL@
|
||||
; check if the user asked for a desktop shortcut to Project Athena
|
||||
; check if the user asked for a desktop shortcut to Vircadia
|
||||
${If} $DesktopClientState == ${BST_CHECKED}
|
||||
CreateShortCut "$DESKTOP\@INTERFACE_HF_SHORTCUT_NAME@.lnk" "$INSTDIR\@INTERFACE_WIN_EXEC_NAME@"
|
||||
!insertmacro WriteInstallOption "@CLIENT_DESKTOP_SHORTCUT_REG_KEY@" YES
|
||||
|
@ -1088,7 +1088,7 @@ Function HandlePostInstallOptions
|
|||
ClearErrors
|
||||
|
||||
; copy the data from production build to this PR build
|
||||
CopyFiles "$APPDATA\Project Athena\*" $0
|
||||
CopyFiles "$APPDATA\Vircadia\*" $0
|
||||
|
||||
; handle an error in copying files
|
||||
IfErrors 0 NoError
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<span class='step-description'>
|
||||
<a target='_blank' href='https://docs.highfidelity.com/create-and-explore/start-working-in-your-sandbox/place-names'>Place names</a> are similar to web addresses. Users who want to visit your domain can
|
||||
<a target='_blank' href='https://docs.vircadia.dev/create-and-explore/start-working-in-your-sandbox/place-names'>Place names</a> are similar to web addresses. Users who want to visit your domain can
|
||||
enter its Place Name in High Fidelity's Interface. You can choose a Place Name for your domain.</br>
|
||||
Your domain may also be reachable by <b>IP address</b>.
|
||||
</span>
|
||||
|
|
|
@ -159,7 +159,7 @@ elseif (WIN32)
|
|||
set(CONFIGURE_ICON_RC_OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/Icon.rc")
|
||||
configure_file("${HF_CMAKE_DIR}/templates/Icon.rc.in" ${CONFIGURE_ICON_RC_OUTPUT})
|
||||
|
||||
set(APP_FULL_NAME "Project Athena")
|
||||
set(APP_FULL_NAME "Vircadia")
|
||||
set(CONFIGURE_VERSION_INFO_RC_OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/VersionInfo.rc")
|
||||
configure_file("${HF_CMAKE_DIR}/templates/VersionInfo.rc.in" ${CONFIGURE_VERSION_INFO_RC_OUTPUT})
|
||||
|
||||
|
|
|
@ -596,7 +596,7 @@
|
|||
|
||||
<h2>Want to learn more?</h2>
|
||||
<p>You can find out much more about the blockchain and about commerce in High Fidelity by visiting our Docs site:</p>
|
||||
<p><a href="http://docs.highfidelity.com" class="btn">Visit High Fidelity's Docs</a></p>
|
||||
<p><a href="http://docs.vircadia.dev" class="btn">Visit High Fidelity's Docs</a></p>
|
||||
<hr>
|
||||
|
||||
</div>
|
||||
|
|
|
@ -6,7 +6,7 @@ import controlsUit 1.0
|
|||
|
||||
WebView {
|
||||
id: webview
|
||||
url: "https://projectathena.io/"
|
||||
url: "https://vircadia.com/"
|
||||
profile: FileTypeProfile;
|
||||
|
||||
property var parentRoot: null
|
||||
|
|
|
@ -229,7 +229,7 @@ Item {
|
|||
}
|
||||
|
||||
function openDocs() {
|
||||
Qt.openUrlExternally("https://docs.projectathena.dev/create/avatars/package-avatar.html");
|
||||
Qt.openUrlExternally("https://docs.vircadia.dev/create/avatars/package-avatar.html");
|
||||
}
|
||||
|
||||
function openVideo() {
|
||||
|
|
|
@ -318,7 +318,7 @@ Item {
|
|||
text: "This item is not for sale yet, <a href='#'>learn more</a>."
|
||||
|
||||
onLinkActivated: {
|
||||
Qt.openUrlExternally("https://docs.projectathena.dev/sell/add-item/upload-avatar.html");
|
||||
Qt.openUrlExternally("https://docs.vircadia.dev/sell/add-item/upload-avatar.html");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ MessageBox {
|
|||
popup.onButton2Clicked = callback;
|
||||
popup.titleText = 'Specify Avatar URL'
|
||||
popup.bodyText = 'This will not overwrite your existing favorite if you are wearing one.<br>' +
|
||||
'<a href="https://docs.vircadia.dev/create/avatars.html">' +
|
||||
'<a href="https://docs.vircadia.dev/create/avatars/create-avatars.html">' +
|
||||
'Learn to make a custom avatar by opening this link on your desktop.' +
|
||||
'</a>'
|
||||
popup.inputText.visible = true;
|
||||
|
|
|
@ -778,7 +778,7 @@ Rectangle {
|
|||
lightboxPopup.bodyText = "Rezzing this content set will replace the existing environment and all of the items in this domain. " +
|
||||
"If you want to save the state of the content in this domain, create a backup before proceeding.<br><br>" +
|
||||
"For more information about backing up and restoring content, " +
|
||||
"<a href='https://docs.projectathena.dev/host/maintain-domain/backup-domain.html'>" +
|
||||
"<a href='https://docs.vircadia.dev/host/maintain-domain/backup-domain.html'>" +
|
||||
"click here to open info on your desktop browser.";
|
||||
lightboxPopup.button1text = "CANCEL";
|
||||
lightboxPopup.button1method = function() {
|
||||
|
|
|
@ -602,7 +602,7 @@ Rectangle {
|
|||
lightboxPopup.bodyText = "Rezzing this content set will replace the existing environment and all of the items in this domain. " +
|
||||
"If you want to save the state of the content in this domain, create a backup before proceeding.<br><br>" +
|
||||
"For more information about backing up and restoring content, " +
|
||||
"<a href='https://docs.projectathena.dev/host/maintain-domain/backup-domain.html'>" +
|
||||
"<a href='https://docs.vircadia.dev/host/maintain-domain/backup-domain.html'>" +
|
||||
"click here to open info on your desktop browser.";
|
||||
lightboxPopup.button1text = "CANCEL";
|
||||
lightboxPopup.button1method = function() {
|
||||
|
|
|
@ -207,7 +207,7 @@ At the moment, there is currently no way to convert HFC to other currencies. Sta
|
|||
if (link === "#privateKeyPath") {
|
||||
Qt.openUrlExternally("file:///" + root.keyFilePath.substring(0, root.keyFilePath.lastIndexOf('/')));
|
||||
} else if (link === "#blockchain") {
|
||||
Qt.openUrlExternally("https://docs.projectathena.dev/explore/shop.html");
|
||||
Qt.openUrlExternally("https://docs.vircadia.dev/explore/shop.html");
|
||||
} else if (link === "#bank") {
|
||||
if ((Account.metaverseServerURL).toString().indexOf("staging") >= 0) {
|
||||
Qt.openUrlExternally("hifi://hifiqa-master-metaverse-staging"); // So that we can test in staging.
|
||||
|
|
|
@ -54,7 +54,7 @@ Rectangle {
|
|||
textFormat: Text.StyledText
|
||||
linkColor: "#00B4EF"
|
||||
color: "white"
|
||||
text: "<a href=\"https:/github.com/kasenvr/hifi-community\">Project Athena Github</a>."
|
||||
text: "<a href=\"https:/github.com/kasenvr/project-athena\">Vircadia Github</a>."
|
||||
size: 20
|
||||
onLinkActivated: {
|
||||
HiFiAbout.openUrl("https:/github.com/kasenvr/project-athena");
|
||||
|
@ -117,7 +117,7 @@ Rectangle {
|
|||
Item { height: 20; width: 1 }
|
||||
RalewayRegular {
|
||||
color: "white"
|
||||
text: "© 2019 - 2020 Project Athena Contributors."
|
||||
text: "© 2019-2020 Vircadia contributors."
|
||||
size: 14
|
||||
}
|
||||
RalewayRegular {
|
||||
|
|
|
@ -1092,8 +1092,8 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
{
|
||||
// identify gpu as early as possible to help identify OpenGL initialization errors.
|
||||
auto gpuIdent = GPUIdent::getInstance();
|
||||
setCrashAnnotation("gpu_name", gpuIdent->getName().toStdString());
|
||||
setCrashAnnotation("gpu_driver", gpuIdent->getDriver().toStdString());
|
||||
setCrashAnnotation("sentry[contexts][gpu][name]", gpuIdent->getName().toStdString());
|
||||
setCrashAnnotation("sentry[contexts][gpu][version]", gpuIdent->getDriver().toStdString());
|
||||
setCrashAnnotation("gpu_memory", std::to_string(gpuIdent->getMemory()));
|
||||
}
|
||||
|
||||
|
@ -1139,7 +1139,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
QFontDatabase::addApplicationFont(PathUtils::resourcesPath() + "fonts/Graphik-SemiBold.ttf");
|
||||
QFontDatabase::addApplicationFont(PathUtils::resourcesPath() + "fonts/Graphik-Regular.ttf");
|
||||
QFontDatabase::addApplicationFont(PathUtils::resourcesPath() + "fonts/Graphik-Medium.ttf");
|
||||
_window->setWindowTitle("Vircadia Interface");
|
||||
_window->setWindowTitle("Vircadia");
|
||||
|
||||
Model::setAbstractViewStateInterface(this); // The model class will sometimes need to know view state details from us
|
||||
|
||||
|
@ -3166,7 +3166,7 @@ void Application::showLoginScreen() {
|
|||
QJsonObject loginData = {};
|
||||
loginData["action"] = "login dialog popped up";
|
||||
UserActivityLogger::getInstance().logAction("encourageLoginDialog", loginData);
|
||||
_window->setWindowTitle("Vircadia Interface");
|
||||
_window->setWindowTitle("Vircadia");
|
||||
} else {
|
||||
resumeAfterLoginDialogActionTaken();
|
||||
}
|
||||
|
@ -3177,7 +3177,7 @@ void Application::showLoginScreen() {
|
|||
#endif
|
||||
}
|
||||
|
||||
static const QUrl AUTHORIZED_EXTERNAL_QML_SOURCE { "https://content.highfidelity.com/Experiences/Releases" };
|
||||
static const QUrl AUTHORIZED_EXTERNAL_QML_SOURCE { "https://cdn.vircadia.com/community-apps/applications" };
|
||||
|
||||
void Application::initializeUi() {
|
||||
|
||||
|
@ -3196,14 +3196,16 @@ void Application::initializeUi() {
|
|||
safeURLS += settingsSafeURLS;
|
||||
|
||||
// END PULL SAFEURLS FROM INTERFACE.JSON Settings
|
||||
|
||||
bool isInWhitelist = false; // assume unsafe
|
||||
for (const auto& str : safeURLS) {
|
||||
if (!str.isEmpty() && str.endsWith(".qml") && url.toString().endsWith(".qml") &&
|
||||
url.toString().startsWith(str)) {
|
||||
qCDebug(interfaceapp) << "Found matching url!" << url.host();
|
||||
isInWhitelist = true;
|
||||
return true;
|
||||
|
||||
if (AUTHORIZED_EXTERNAL_QML_SOURCE.isParentOf(url)) {
|
||||
return true;
|
||||
} else {
|
||||
for (const auto& str : safeURLS) {
|
||||
if (!str.isEmpty() && str.endsWith(".qml") && url.toString().endsWith(".qml") &&
|
||||
url.toString().startsWith(str)) {
|
||||
qCDebug(interfaceapp) << "Found matching url!" << url.host();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7062,7 +7064,7 @@ void Application::updateWindowTitle() const {
|
|||
auto accountManager = DependencyManager::get<AccountManager>();
|
||||
auto isInErrorState = nodeList->getDomainHandler().isInErrorState();
|
||||
|
||||
QString buildVersion = " - Vircadia Interface - "
|
||||
QString buildVersion = " - Vircadia - "
|
||||
+ (BuildInfo::BUILD_TYPE == BuildInfo::BuildType::Stable ? QString("Version") : QString("Build"))
|
||||
+ " " + applicationVersion();
|
||||
|
||||
|
@ -7072,7 +7074,7 @@ void Application::updateWindowTitle() const {
|
|||
nodeList->getDomainHandler().isConnected() ? "" : " (NOT CONNECTED)";
|
||||
QString username = accountManager->getAccountInfo().getUsername();
|
||||
|
||||
setCrashAnnotation("username", username.toStdString());
|
||||
setCrashAnnotation("sentry[user][username]", username.toStdString());
|
||||
|
||||
QString currentPlaceName;
|
||||
if (isServerlessMode()) {
|
||||
|
@ -7746,7 +7748,7 @@ bool Application::askToReplaceDomainContent(const QString& url) {
|
|||
static const QString infoText = simpleWordWrap("Your domain's content will be replaced with a new content set. "
|
||||
"If you want to save what you have now, create a backup before proceeding. For more information about backing up "
|
||||
"and restoring content, visit the documentation page at: ", MAX_CHARACTERS_PER_LINE) +
|
||||
"\nhttps://docs.projectathena.dev/host/maintain-domain/backup-domain.html";
|
||||
"\nhttps://docs.vircadia.dev/host/maintain-domain/backup-domain.html";
|
||||
|
||||
ModalDialogListener* dig = OffscreenUi::asyncQuestion("Are you sure you want to replace this domain's content set?",
|
||||
infoText, QMessageBox::Yes | QMessageBox::No, QMessageBox::No);
|
||||
|
|
|
@ -84,10 +84,9 @@ bool startCrashHandler(std::string appPath) {
|
|||
std::vector<std::string> arguments;
|
||||
|
||||
std::map<std::string, std::string> annotations;
|
||||
annotations["token"] = BACKTRACE_TOKEN;
|
||||
annotations["format"] = "minidump";
|
||||
annotations["version"] = BuildInfo::VERSION.toStdString();
|
||||
annotations["build_number"] = BuildInfo::BUILD_NUMBER.toStdString();
|
||||
annotations["sentry[release]"] = BACKTRACE_TOKEN;
|
||||
annotations["sentry[contexts][app][app_version]"] = BuildInfo::VERSION.toStdString();
|
||||
annotations["sentry[contexts][app][app_build]"] = BuildInfo::BUILD_NUMBER.toStdString();
|
||||
annotations["build_type"] = BuildInfo::BUILD_TYPE_STRING.toStdString();
|
||||
|
||||
auto machineFingerPrint = uuidStringWithoutCurlyBraces(FingerprintUtils::getMachineFingerprint());
|
||||
|
|
|
@ -20,7 +20,7 @@ class FancyCamera : public Camera {
|
|||
|
||||
/**jsdoc
|
||||
* The <code>Camera</code> API provides access to the "camera" that defines your view in desktop and HMD display modes.
|
||||
* The High Fidelity camera has axes <code>x</code> = right, <code>y</code> = up, <code>-z</code> = forward.
|
||||
* The Vircadia camera has axes <code>x</code> = right, <code>y</code> = up, <code>-z</code> = forward.
|
||||
*
|
||||
* @namespace Camera
|
||||
*
|
||||
|
|
|
@ -617,6 +617,12 @@ Menu::Menu() {
|
|||
false,
|
||||
&UserActivityLogger::getInstance(),
|
||||
SLOT(disable(bool)));
|
||||
addCheckableActionToQMenuAndActionHash(networkMenu,
|
||||
MenuOption::DisableCrashLogger,
|
||||
0,
|
||||
false,
|
||||
&UserActivityLogger::getInstance(),
|
||||
SLOT(crashMonitorDisable(bool)));
|
||||
addActionToQMenuAndActionHash(networkMenu, MenuOption::ShowDSConnectTable, 0,
|
||||
qApp, SLOT(loadDomainConnectionDialog()));
|
||||
|
||||
|
|
|
@ -86,6 +86,7 @@ namespace MenuOption {
|
|||
const QString DeleteAvatarEntitiesBookmark = "Delete Avatar Entities Bookmark";
|
||||
const QString DeleteBookmark = "Delete Bookmark...";
|
||||
const QString DisableActivityLogger = "Disable Activity Logger";
|
||||
const QString DisableCrashLogger = "Disable Crash Logger";
|
||||
const QString DisableEyelidAdjustment = "Disable Eyelid Adjustment";
|
||||
const QString DisableLightEntities = "Disable Light Entities";
|
||||
const QString DisplayCrashOptions = "Display Crash Options";
|
||||
|
|
|
@ -80,7 +80,7 @@ QVariantHash ModelPropertiesDialog::getMapping() const {
|
|||
|
||||
// update the joint indices
|
||||
QVariantHash jointIndices;
|
||||
for (size_t i = 0; i < _hfmModel.joints.size(); i++) {
|
||||
for (int i = 0; i < _hfmModel.joints.size(); i++) {
|
||||
jointIndices.insert(_hfmModel.joints.at(i).name, QString::number(i));
|
||||
}
|
||||
mapping.insert(JOINT_INDEX_FIELD, jointIndices);
|
||||
|
|
|
@ -55,7 +55,7 @@ static QStringList HAND_MAPPING_SUFFIXES = {
|
|||
"HandThumb1",
|
||||
};
|
||||
|
||||
const QUrl PACKAGE_AVATAR_DOCS_BASE_URL = QUrl("https://docs.projectathena.dev/create/avatars/package-avatar.html");
|
||||
const QUrl PACKAGE_AVATAR_DOCS_BASE_URL = QUrl("https://docs.vircadia.dev/create/avatars/package-avatar.html");
|
||||
|
||||
AvatarDoctor::AvatarDoctor(const QUrl& avatarFSTFileUrl) :
|
||||
_avatarFSTFileUrl(avatarFSTFileUrl) {
|
||||
|
@ -79,7 +79,7 @@ void AvatarDoctor::startDiagnosing() {
|
|||
_missingTextureCount = 0;
|
||||
_unsupportedTextureCount = 0;
|
||||
|
||||
const auto resource = DependencyManager::get<ModelCache>()->getModelResource(_avatarFSTFileUrl);
|
||||
const auto resource = DependencyManager::get<ModelCache>()->getGeometryResource(_avatarFSTFileUrl);
|
||||
resource->refresh();
|
||||
|
||||
const auto resourceLoaded = [this, resource](bool success) {
|
||||
|
@ -99,12 +99,12 @@ void AvatarDoctor::startDiagnosing() {
|
|||
}
|
||||
|
||||
// RIG
|
||||
if (avatarModel.joints.empty()) {
|
||||
if (avatarModel.joints.isEmpty()) {
|
||||
addError("Avatar has no rig.", "no-rig");
|
||||
} else {
|
||||
auto jointNames = avatarModel.getJointNames();
|
||||
|
||||
if (avatarModel.joints.size() > NETWORKED_JOINTS_LIMIT) {
|
||||
if (avatarModel.joints.length() > NETWORKED_JOINTS_LIMIT) {
|
||||
addError(tr( "Avatar has over %n bones.", "", NETWORKED_JOINTS_LIMIT), "maximum-bone-limit");
|
||||
}
|
||||
// Avatar does not have Hips bone mapped
|
||||
|
@ -297,7 +297,7 @@ void AvatarDoctor::startDiagnosing() {
|
|||
if (resource->isLoaded()) {
|
||||
resourceLoaded(!resource->isFailed());
|
||||
} else {
|
||||
connect(resource.data(), &ModelResource::finished, this, resourceLoaded);
|
||||
connect(resource.data(), &GeometryResource::finished, this, resourceLoaded);
|
||||
}
|
||||
} else {
|
||||
addError("Model file cannot be opened", "missing-file");
|
||||
|
|
|
@ -53,7 +53,7 @@ private:
|
|||
int _materialMappingCount = 0;
|
||||
int _materialMappingLoadedCount = 0;
|
||||
|
||||
ModelResource::Pointer _model;
|
||||
GeometryResource::Pointer _model;
|
||||
|
||||
bool _isDiagnosing = false;
|
||||
};
|
||||
|
|
|
@ -972,7 +972,7 @@ void MyAvatar::simulate(float deltaTime, bool inView) {
|
|||
recorder->recordFrame(FRAME_TYPE, toFrame(*this));
|
||||
}
|
||||
|
||||
locationChanged(true, false);
|
||||
locationChanged(true, true);
|
||||
// if a entity-child of this avatar has moved outside of its queryAACube, update the cube and tell the entity server.
|
||||
auto entityTreeRenderer = qApp->getEntities();
|
||||
EntityTreePointer entityTree = entityTreeRenderer ? entityTreeRenderer->getTree() : nullptr;
|
||||
|
@ -981,16 +981,7 @@ void MyAvatar::simulate(float deltaTime, bool inView) {
|
|||
entityTree->withWriteLock([&] {
|
||||
zoneInteractionProperties = entityTreeRenderer->getZoneInteractionProperties();
|
||||
EntityEditPacketSender* packetSender = qApp->getEntityEditPacketSender();
|
||||
forEachDescendant([&](SpatiallyNestablePointer object) {
|
||||
locationChanged(true, false);
|
||||
// we need to update attached queryAACubes in our own local tree so point-select always works
|
||||
// however we don't want to flood the update pipeline with AvatarEntity updates, so we assume
|
||||
// others have all info required to properly update queryAACube of AvatarEntities on their end
|
||||
EntityItemPointer entity = std::dynamic_pointer_cast<EntityItem>(object);
|
||||
bool iShouldTellServer = !(entity && entity->isAvatarEntity());
|
||||
const bool force = false;
|
||||
entityTree->updateEntityQueryAACube(object, packetSender, force, iShouldTellServer);
|
||||
});
|
||||
entityTree->updateEntityQueryAACube(shared_from_this(), packetSender, false, true);
|
||||
});
|
||||
bool isPhysicsEnabled = qApp->isPhysicsEnabled();
|
||||
bool zoneAllowsFlying = zoneInteractionProperties.first;
|
||||
|
@ -1988,7 +1979,7 @@ void MyAvatar::loadData() {
|
|||
|
||||
// Flying preferences must be loaded before calling setFlyingEnabled()
|
||||
Setting::Handle<bool> firstRunVal { Settings::firstRun, true };
|
||||
setFlyingHMDPref(firstRunVal.get() ? false : _flyingHMDSetting.get());
|
||||
setFlyingHMDPref(firstRunVal.get() ? true : _flyingHMDSetting.get());
|
||||
setMovementReference(firstRunVal.get() ? false : _movementReferenceSetting.get());
|
||||
setDriveGear1(firstRunVal.get() ? DEFAULT_GEAR_1 : _driveGear1Setting.get());
|
||||
setDriveGear2(firstRunVal.get() ? DEFAULT_GEAR_2 : _driveGear2Setting.get());
|
||||
|
@ -2483,7 +2474,7 @@ void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
|
|||
|
||||
if (_fullAvatarModelName.isEmpty()) {
|
||||
// Store the FST file name into preferences
|
||||
const auto& mapping = _skeletonModel->getNetworkModel()->getMapping();
|
||||
const auto& mapping = _skeletonModel->getGeometry()->getMapping();
|
||||
if (mapping.value("name").isValid()) {
|
||||
_fullAvatarModelName = mapping.value("name").toString();
|
||||
}
|
||||
|
@ -2491,7 +2482,7 @@ void MyAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) {
|
|||
|
||||
initHeadBones();
|
||||
_skeletonModel->setCauterizeBoneSet(_headBoneSet);
|
||||
_fstAnimGraphOverrideUrl = _skeletonModel->getNetworkModel()->getAnimGraphOverrideUrl();
|
||||
_fstAnimGraphOverrideUrl = _skeletonModel->getGeometry()->getAnimGraphOverrideUrl();
|
||||
initAnimGraph();
|
||||
initFlowFromFST();
|
||||
}
|
||||
|
|
|
@ -762,7 +762,7 @@ public:
|
|||
* <p>Note: When using pre-built animation data, it's critical that the joint orientation of the source animation and target
|
||||
* rig are equivalent, since the animation data applies absolute values onto the joints. If the orientations are different,
|
||||
* the avatar will move in unpredictable ways. For more information about avatar joint orientation standards, see
|
||||
* <a href="https://docs.projectathena.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.
|
||||
* <a href="https://docs.vircadia.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.
|
||||
* @function MyAvatar.overrideRoleAnimation
|
||||
* @param {string} role - The animation role to override
|
||||
* @param {string} url - The URL to the animation file. Animation files need to be in glTF or FBX format, but only need to
|
||||
|
@ -1920,7 +1920,7 @@ public:
|
|||
|
||||
/**jsdoc
|
||||
* Enables and disables flow simulation of physics on the avatar's hair, clothes, and body parts. See
|
||||
* {@link https://docs.projectathena.dev/create/avatars/add-flow.html|Add Flow to Your Avatar} for more
|
||||
* {@link https://docs.vircadia.dev/create/avatars/add-flow.html|Add Flow to Your Avatar} for more
|
||||
* information.
|
||||
* @function MyAvatar.useFlow
|
||||
* @param {boolean} isActive - <code>true</code> if flow simulation is enabled on the joint, <code>false</code> if it isn't.
|
||||
|
@ -2285,7 +2285,7 @@ public slots:
|
|||
|
||||
/**jsdoc
|
||||
* Gets the URL of the override animation graph.
|
||||
* <p>See {@link https://docs.projectathena.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* <p>See {@link https://docs.vircadia.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* information on animation graphs.</p>
|
||||
* @function MyAvatar.getAnimGraphOverrideUrl
|
||||
* @returns {string} The URL of the override animation graph JSON file. <code>""</code> if there is no override animation
|
||||
|
@ -2295,7 +2295,7 @@ public slots:
|
|||
|
||||
/**jsdoc
|
||||
* Sets the animation graph to use in preference to the default animation graph.
|
||||
* <p>See {@link https://docs.projectathena.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* <p>See {@link https://docs.vircadia.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* information on animation graphs.</p>
|
||||
* @function MyAvatar.setAnimGraphOverrideUrl
|
||||
* @param {string} url - The URL of the animation graph JSON file to use. Set to <code>""</code> to clear an override.
|
||||
|
@ -2304,7 +2304,7 @@ public slots:
|
|||
|
||||
/**jsdoc
|
||||
* Gets the URL of animation graph (i.e., the avatar animation JSON) that's currently being used for avatar animations.
|
||||
* <p>See {@link https://docs.projectathena.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* <p>See {@link https://docs.vircadia.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* information on animation graphs.</p>
|
||||
* @function MyAvatar.getAnimGraphUrl
|
||||
* @returns {string} The URL of the current animation graph JSON file.
|
||||
|
@ -2315,7 +2315,7 @@ public slots:
|
|||
|
||||
/**jsdoc
|
||||
* Sets the current animation graph (i.e., the avatar animation JSON) to use for avatar animations and makes it the default.
|
||||
* <p>See {@link https://docs.projectathena.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* <p>See {@link https://docs.vircadia.dev/create/avatars/custom-animations.html|Custom Avatar Animations} for
|
||||
* information on animation graphs.</p>
|
||||
* @function MyAvatar.setAnimGraphUrl
|
||||
* @param {string} url - The URL of the animation graph JSON file to use.
|
||||
|
@ -2702,7 +2702,7 @@ private:
|
|||
|
||||
bool _enableFlying { false };
|
||||
bool _flyingPrefDesktop { true };
|
||||
bool _flyingPrefHMD { false };
|
||||
bool _flyingPrefHMD { true };
|
||||
bool _wasPushing { false };
|
||||
bool _isPushing { false };
|
||||
bool _isBeingPushed { false };
|
||||
|
|
|
@ -72,7 +72,7 @@ int main(int argc, const char* argv[]) {
|
|||
}
|
||||
|
||||
QCommandLineParser parser;
|
||||
parser.setApplicationDescription("High Fidelity");
|
||||
parser.setApplicationDescription("Vircadia");
|
||||
QCommandLineOption versionOption = parser.addVersionOption();
|
||||
QCommandLineOption helpOption = parser.addHelpOption();
|
||||
|
||||
|
@ -218,12 +218,12 @@ int main(int argc, const char* argv[]) {
|
|||
}
|
||||
qDebug() << "UserActivityLogger is enabled:" << ual.isEnabled();
|
||||
|
||||
if (ual.isEnabled()) {
|
||||
qDebug() << "Crash handler logger is enabled:" << ual.isCrashMonitorEnabled();
|
||||
if (ual.isCrashMonitorEnabled()) {
|
||||
auto crashHandlerStarted = startCrashHandler(argv[0]);
|
||||
qDebug() << "Crash handler started:" << crashHandlerStarted;
|
||||
}
|
||||
|
||||
|
||||
const QString& applicationName = getInterfaceSharedMemoryName();
|
||||
bool instanceMightBeRunning = true;
|
||||
#ifdef Q_OS_WIN
|
||||
|
|
|
@ -121,9 +121,8 @@ bool CollisionPick::isLoaded() const {
|
|||
bool CollisionPick::getShapeInfoReady(const CollisionRegion& pick) {
|
||||
if (_mathPick.shouldComputeShapeInfo()) {
|
||||
if (_cachedResource && _cachedResource->isLoaded()) {
|
||||
// TODO: Model CollisionPick support
|
||||
//computeShapeInfo(pick, *_mathPick.shapeInfo, _cachedResource);
|
||||
//_mathPick.loaded = true;
|
||||
computeShapeInfo(pick, *_mathPick.shapeInfo, _cachedResource);
|
||||
_mathPick.loaded = true;
|
||||
} else {
|
||||
_mathPick.loaded = false;
|
||||
}
|
||||
|
@ -135,7 +134,7 @@ bool CollisionPick::getShapeInfoReady(const CollisionRegion& pick) {
|
|||
return _mathPick.loaded;
|
||||
}
|
||||
|
||||
void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<ModelResource> resource) {
|
||||
void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource) {
|
||||
ShapeType type = shapeInfo.getType();
|
||||
glm::vec3 dimensions = pick.transform.getScale();
|
||||
QString modelURL = (resource ? resource->getURL().toString() : "");
|
||||
|
@ -148,12 +147,241 @@ void CollisionPick::computeShapeInfoDimensionsOnly(const CollisionRegion& pick,
|
|||
}
|
||||
}
|
||||
|
||||
void CollisionPick::computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource) {
|
||||
// This code was copied and modified from RenderableModelEntityItem::computeShapeInfo
|
||||
// TODO: Move to some shared code area (in entities-renderer? model-networking?)
|
||||
// after we verify this is working and do a diff comparison with RenderableModelEntityItem::computeShapeInfo
|
||||
// to consolidate the code.
|
||||
// We may also want to make computeShapeInfo always abstract away from the gpu model mesh, like it does here.
|
||||
const uint32_t TRIANGLE_STRIDE = 3;
|
||||
const uint32_t QUAD_STRIDE = 4;
|
||||
|
||||
ShapeType type = shapeInfo.getType();
|
||||
glm::vec3 dimensions = pick.transform.getScale();
|
||||
if (type == SHAPE_TYPE_COMPOUND) {
|
||||
// should never fall in here when collision model not fully loaded
|
||||
// TODO: assert that all geometries exist and are loaded
|
||||
//assert(_model && _model->isLoaded() && _compoundShapeResource && _compoundShapeResource->isLoaded());
|
||||
const HFMModel& collisionModel = resource->getHFMModel();
|
||||
|
||||
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
|
||||
pointCollection.clear();
|
||||
uint32_t i = 0;
|
||||
|
||||
// the way OBJ files get read, each section under a "g" line is its own meshPart. We only expect
|
||||
// to find one actual "mesh" (with one or more meshParts in it), but we loop over the meshes, just in case.
|
||||
foreach (const HFMMesh& mesh, collisionModel.meshes) {
|
||||
// each meshPart is a convex hull
|
||||
foreach (const HFMMeshPart &meshPart, mesh.parts) {
|
||||
pointCollection.push_back(QVector<glm::vec3>());
|
||||
ShapeInfo::PointList& pointsInPart = pointCollection[i];
|
||||
|
||||
// run through all the triangles and (uniquely) add each point to the hull
|
||||
uint32_t numIndices = (uint32_t)meshPart.triangleIndices.size();
|
||||
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices % TRIANGLE_STRIDE == 0);
|
||||
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
|
||||
|
||||
for (uint32_t j = 0; j < numIndices; j += TRIANGLE_STRIDE) {
|
||||
glm::vec3 p0 = mesh.vertices[meshPart.triangleIndices[j]];
|
||||
glm::vec3 p1 = mesh.vertices[meshPart.triangleIndices[j + 1]];
|
||||
glm::vec3 p2 = mesh.vertices[meshPart.triangleIndices[j + 2]];
|
||||
if (!pointsInPart.contains(p0)) {
|
||||
pointsInPart << p0;
|
||||
}
|
||||
if (!pointsInPart.contains(p1)) {
|
||||
pointsInPart << p1;
|
||||
}
|
||||
if (!pointsInPart.contains(p2)) {
|
||||
pointsInPart << p2;
|
||||
}
|
||||
}
|
||||
|
||||
// run through all the quads and (uniquely) add each point to the hull
|
||||
numIndices = (uint32_t)meshPart.quadIndices.size();
|
||||
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices % QUAD_STRIDE == 0);
|
||||
numIndices -= numIndices % QUAD_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
|
||||
|
||||
for (uint32_t j = 0; j < numIndices; j += QUAD_STRIDE) {
|
||||
glm::vec3 p0 = mesh.vertices[meshPart.quadIndices[j]];
|
||||
glm::vec3 p1 = mesh.vertices[meshPart.quadIndices[j + 1]];
|
||||
glm::vec3 p2 = mesh.vertices[meshPart.quadIndices[j + 2]];
|
||||
glm::vec3 p3 = mesh.vertices[meshPart.quadIndices[j + 3]];
|
||||
if (!pointsInPart.contains(p0)) {
|
||||
pointsInPart << p0;
|
||||
}
|
||||
if (!pointsInPart.contains(p1)) {
|
||||
pointsInPart << p1;
|
||||
}
|
||||
if (!pointsInPart.contains(p2)) {
|
||||
pointsInPart << p2;
|
||||
}
|
||||
if (!pointsInPart.contains(p3)) {
|
||||
pointsInPart << p3;
|
||||
}
|
||||
}
|
||||
|
||||
if (pointsInPart.size() == 0) {
|
||||
qCDebug(scriptengine) << "Warning -- meshPart has no faces";
|
||||
pointCollection.pop_back();
|
||||
continue;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
// We expect that the collision model will have the same units and will be displaced
|
||||
// from its origin in the same way the visual model is. The visual model has
|
||||
// been centered and probably scaled. We take the scaling and offset which were applied
|
||||
// to the visual model and apply them to the collision model (without regard for the
|
||||
// collision model's extents).
|
||||
|
||||
glm::vec3 scaleToFit = dimensions / resource->getHFMModel().getUnscaledMeshExtents().size();
|
||||
// multiply each point by scale
|
||||
for (int32_t i = 0; i < pointCollection.size(); i++) {
|
||||
for (int32_t j = 0; j < pointCollection[i].size(); j++) {
|
||||
// back compensate for registration so we can apply that offset to the shapeInfo later
|
||||
pointCollection[i][j] = scaleToFit * pointCollection[i][j];
|
||||
}
|
||||
}
|
||||
shapeInfo.setParams(type, dimensions, resource->getURL().toString());
|
||||
} else if (type >= SHAPE_TYPE_SIMPLE_HULL && type <= SHAPE_TYPE_STATIC_MESH) {
|
||||
const HFMModel& hfmModel = resource->getHFMModel();
|
||||
int numHFMMeshes = hfmModel.meshes.size();
|
||||
int totalNumVertices = 0;
|
||||
for (int i = 0; i < numHFMMeshes; i++) {
|
||||
const HFMMesh& mesh = hfmModel.meshes.at(i);
|
||||
totalNumVertices += mesh.vertices.size();
|
||||
}
|
||||
const int32_t MAX_VERTICES_PER_STATIC_MESH = 1e6;
|
||||
if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) {
|
||||
qWarning() << "model" << "has too many vertices" << totalNumVertices << "and will collide as a box.";
|
||||
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
|
||||
return;
|
||||
}
|
||||
|
||||
auto& meshes = resource->getHFMModel().meshes;
|
||||
int32_t numMeshes = (int32_t)(meshes.size());
|
||||
|
||||
const int MAX_ALLOWED_MESH_COUNT = 1000;
|
||||
if (numMeshes > MAX_ALLOWED_MESH_COUNT) {
|
||||
// too many will cause the deadlock timer to throw...
|
||||
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
|
||||
return;
|
||||
}
|
||||
|
||||
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
|
||||
pointCollection.clear();
|
||||
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
pointCollection.resize(numMeshes);
|
||||
} else {
|
||||
pointCollection.resize(1);
|
||||
}
|
||||
|
||||
ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices();
|
||||
triangleIndices.clear();
|
||||
|
||||
Extents extents;
|
||||
int32_t meshCount = 0;
|
||||
int32_t pointListIndex = 0;
|
||||
for (auto& mesh : meshes) {
|
||||
if (!mesh.vertices.size()) {
|
||||
continue;
|
||||
}
|
||||
QVector<glm::vec3> vertices = mesh.vertices;
|
||||
|
||||
ShapeInfo::PointList& points = pointCollection[pointListIndex];
|
||||
|
||||
// reserve room
|
||||
int32_t sizeToReserve = (int32_t)(vertices.count());
|
||||
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
// a list of points for each mesh
|
||||
pointListIndex++;
|
||||
} else {
|
||||
// only one list of points
|
||||
sizeToReserve += (int32_t)points.size();
|
||||
}
|
||||
points.reserve(sizeToReserve);
|
||||
|
||||
// copy points
|
||||
const glm::vec3* vertexItr = vertices.cbegin();
|
||||
while (vertexItr != vertices.cend()) {
|
||||
glm::vec3 point = *vertexItr;
|
||||
points.push_back(point);
|
||||
extents.addPoint(point);
|
||||
++vertexItr;
|
||||
}
|
||||
|
||||
if (type == SHAPE_TYPE_STATIC_MESH) {
|
||||
// copy into triangleIndices
|
||||
size_t triangleIndicesCount = 0;
|
||||
for (const HFMMeshPart& meshPart : mesh.parts) {
|
||||
triangleIndicesCount += meshPart.triangleIndices.count();
|
||||
}
|
||||
triangleIndices.reserve((int)triangleIndicesCount);
|
||||
|
||||
for (const HFMMeshPart& meshPart : mesh.parts) {
|
||||
const int* indexItr = meshPart.triangleIndices.cbegin();
|
||||
while (indexItr != meshPart.triangleIndices.cend()) {
|
||||
triangleIndices.push_back(*indexItr);
|
||||
++indexItr;
|
||||
}
|
||||
}
|
||||
} else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
// for each mesh copy unique part indices, separated by special bogus (flag) index values
|
||||
for (const HFMMeshPart& meshPart : mesh.parts) {
|
||||
// collect unique list of indices for this part
|
||||
std::set<int32_t> uniqueIndices;
|
||||
auto numIndices = meshPart.triangleIndices.count();
|
||||
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices% TRIANGLE_STRIDE == 0);
|
||||
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
|
||||
|
||||
auto indexItr = meshPart.triangleIndices.cbegin();
|
||||
while (indexItr != meshPart.triangleIndices.cend()) {
|
||||
uniqueIndices.insert(*indexItr);
|
||||
++indexItr;
|
||||
}
|
||||
|
||||
// store uniqueIndices in triangleIndices
|
||||
triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size());
|
||||
for (auto index : uniqueIndices) {
|
||||
triangleIndices.push_back(index);
|
||||
}
|
||||
// flag end of part
|
||||
triangleIndices.push_back(END_OF_MESH_PART);
|
||||
}
|
||||
// flag end of mesh
|
||||
triangleIndices.push_back(END_OF_MESH);
|
||||
}
|
||||
++meshCount;
|
||||
}
|
||||
|
||||
// scale and shift
|
||||
glm::vec3 extentsSize = extents.size();
|
||||
glm::vec3 scaleToFit = dimensions / extentsSize;
|
||||
for (int32_t i = 0; i < 3; ++i) {
|
||||
if (extentsSize[i] < 1.0e-6f) {
|
||||
scaleToFit[i] = 1.0f;
|
||||
}
|
||||
}
|
||||
for (auto points : pointCollection) {
|
||||
for (int32_t i = 0; i < points.size(); ++i) {
|
||||
points[i] = (points[i] * scaleToFit);
|
||||
}
|
||||
}
|
||||
|
||||
shapeInfo.setParams(type, 0.5f * dimensions, resource->getURL().toString());
|
||||
}
|
||||
}
|
||||
|
||||
CollisionPick::CollisionPick(const PickFilter& filter, float maxDistance, bool enabled, bool scaleWithParent, CollisionRegion collisionRegion, PhysicsEnginePointer physicsEngine) :
|
||||
Pick(collisionRegion, filter, maxDistance, enabled),
|
||||
_scaleWithParent(scaleWithParent),
|
||||
_physicsEngine(physicsEngine) {
|
||||
if (collisionRegion.shouldComputeShapeInfo()) {
|
||||
_cachedResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(collisionRegion.modelURL);
|
||||
_cachedResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(collisionRegion.modelURL);
|
||||
}
|
||||
_mathPick.loaded = isLoaded();
|
||||
}
|
||||
|
|
|
@ -63,13 +63,14 @@ protected:
|
|||
bool isLoaded() const;
|
||||
// Returns true if _mathPick.shapeInfo is valid. Otherwise, attempts to get the _mathPick ready for use.
|
||||
bool getShapeInfoReady(const CollisionRegion& pick);
|
||||
void computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<ModelResource> resource);
|
||||
void computeShapeInfo(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource);
|
||||
void computeShapeInfoDimensionsOnly(const CollisionRegion& pick, ShapeInfo& shapeInfo, QSharedPointer<GeometryResource> resource);
|
||||
void filterIntersections(std::vector<ContactTestResult>& intersections) const;
|
||||
|
||||
bool _scaleWithParent;
|
||||
|
||||
PhysicsEnginePointer _physicsEngine;
|
||||
QSharedPointer<ModelResource> _cachedResource;
|
||||
QSharedPointer<GeometryResource> _cachedResource;
|
||||
|
||||
// Options for what information to get from collision results
|
||||
bool _includeNormals;
|
||||
|
|
|
@ -254,7 +254,15 @@ void setupPreferences() {
|
|||
auto setter = [](bool value) { Menu::getInstance()->setIsOptionChecked(MenuOption::DisableActivityLogger, !value); };
|
||||
preferences->addPreference(new CheckPreference("Privacy", "Send data - High Fidelity uses information provided by your "
|
||||
"client to improve the product through the logging of errors, tracking of usage patterns, "
|
||||
"installation and system details, and crash events. By allowing High Fidelity to collect "
|
||||
"installation and system details. By allowing High Fidelity to collect this information "
|
||||
"you are helping to improve the product. ", getter, setter));
|
||||
}
|
||||
|
||||
{
|
||||
auto getter = []()->bool { return !Menu::getInstance()->isOptionChecked(MenuOption::DisableCrashLogger); };
|
||||
auto setter = [](bool value) { Menu::getInstance()->setIsOptionChecked(MenuOption::DisableCrashLogger, !value); };
|
||||
preferences->addPreference(new CheckPreference("Privacy", "Send crashes - Vircadia uses information provided by your "
|
||||
"client to improve the product through crash reports. By allowing Vircadia to collect "
|
||||
"this information you are helping to improve the product. ", getter, setter));
|
||||
}
|
||||
|
||||
|
|
|
@ -4,10 +4,10 @@
|
|||
#include <QNetworkAccessManager>
|
||||
|
||||
struct Build {
|
||||
QString tag{ QString::null };
|
||||
QString tag;
|
||||
int latestVersion{ 0 };
|
||||
int buildNumber{ 0 };
|
||||
QString installerZipURL{ QString::null };
|
||||
QString installerZipURL;
|
||||
};
|
||||
|
||||
struct Builds {
|
||||
|
|
|
@ -244,7 +244,7 @@ void LauncherState::getCurrentClientVersion() {
|
|||
if (match.hasMatch()) {
|
||||
_currentClientVersion = match.captured("version");
|
||||
} else {
|
||||
_currentClientVersion = QString::null;
|
||||
_currentClientVersion.clear();
|
||||
}
|
||||
qDebug() << "Current client version is: " << _currentClientVersion;
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ private:
|
|||
QString _displayName;
|
||||
QString _applicationErrorMessage;
|
||||
QString _currentClientVersion;
|
||||
QString _buildTag { QString::null };
|
||||
QString _buildTag;
|
||||
QString _contentCacheURL;
|
||||
QString _loginTokenResponse;
|
||||
QFile _clientZipFile;
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "LoginRequest.h"
|
||||
|
||||
struct UserSettings {
|
||||
QString homeLocation{ QString::null };
|
||||
QString homeLocation;
|
||||
};
|
||||
|
||||
class UserSettingsRequest : public QObject {
|
||||
|
|
|
@ -20,17 +20,24 @@ AnimSkeleton::AnimSkeleton(const HFMModel& hfmModel) {
|
|||
|
||||
_geometryOffset = hfmModel.offset;
|
||||
|
||||
buildSkeletonFromJoints(hfmModel.joints, hfmModel.jointRotationOffsets);
|
||||
// convert to std::vector of joints
|
||||
std::vector<HFMJoint> joints;
|
||||
joints.reserve(hfmModel.joints.size());
|
||||
for (auto& joint : hfmModel.joints) {
|
||||
joints.push_back(joint);
|
||||
}
|
||||
buildSkeletonFromJoints(joints, hfmModel.jointRotationOffsets);
|
||||
|
||||
// we make a copy of the inverseBindMatrices in order to prevent mutating the model bind pose
|
||||
// when we are dealing with a joint offset in the model
|
||||
for (uint32_t i = 0; i < (uint32_t)hfmModel.skinDeformers.size(); i++) {
|
||||
const auto& deformer = hfmModel.skinDeformers[i];
|
||||
for (int i = 0; i < (int)hfmModel.meshes.size(); i++) {
|
||||
const HFMMesh& mesh = hfmModel.meshes.at(i);
|
||||
std::vector<HFMCluster> dummyClustersList;
|
||||
|
||||
for (uint32_t j = 0; j < (uint32_t)deformer.clusters.size(); j++) {
|
||||
for (int j = 0; j < mesh.clusters.size(); j++) {
|
||||
std::vector<glm::mat4> bindMatrices;
|
||||
// cast into a non-const reference, so we can mutate the FBXCluster
|
||||
HFMCluster& cluster = const_cast<HFMCluster&>(deformer.clusters.at(j));
|
||||
HFMCluster& cluster = const_cast<HFMCluster&>(mesh.clusters.at(j));
|
||||
|
||||
HFMCluster localCluster;
|
||||
localCluster.jointIndex = cluster.jointIndex;
|
||||
|
|
|
@ -68,7 +68,7 @@ public:
|
|||
void dump(const AnimPoseVec& poses) const;
|
||||
|
||||
std::vector<int> lookUpJointIndices(const std::vector<QString>& jointNames) const;
|
||||
const HFMCluster getClusterBindMatricesOriginalValues(int skinDeformerIndex, int clusterIndex) const { return _clusterBindMatrixOriginalValues[skinDeformerIndex][clusterIndex]; }
|
||||
const HFMCluster getClusterBindMatricesOriginalValues(const int meshIndex, const int clusterIndex) const { return _clusterBindMatrixOriginalValues[meshIndex][clusterIndex]; }
|
||||
|
||||
protected:
|
||||
void buildSkeletonFromJoints(const std::vector<HFMJoint>& joints, const QMap<int, glm::quat> jointOffsets);
|
||||
|
|
|
@ -943,7 +943,7 @@ void Avatar::simulateAttachments(float deltaTime) {
|
|||
bool texturesLoaded = _attachmentModelsTexturesLoaded.at(i);
|
||||
|
||||
// Watch for texture loading
|
||||
if (!texturesLoaded && model->getNetworkModel() && model->getNetworkModel()->areTexturesLoaded()) {
|
||||
if (!texturesLoaded && model->getGeometry() && model->getGeometry()->areTexturesLoaded()) {
|
||||
_attachmentModelsTexturesLoaded[i] = true;
|
||||
model->updateRenderItems();
|
||||
}
|
||||
|
|
|
@ -207,7 +207,7 @@ public:
|
|||
/**jsdoc
|
||||
* Gets the default rotation of a joint (in the current avatar) relative to its parent.
|
||||
* <p>For information on the joint hierarchy used, see
|
||||
* <a href="https://docs.projectathena.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* <a href="https://docs.vircadia.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* @function MyAvatar.getDefaultJointRotation
|
||||
* @param {number} index - The joint index.
|
||||
* @returns {Quat} The default rotation of the joint if the joint index is valid, otherwise {@link Quat(0)|Quat.IDENTITY}.
|
||||
|
@ -218,7 +218,7 @@ public:
|
|||
* Gets the default translation of a joint (in the current avatar) relative to its parent, in model coordinates.
|
||||
* <p><strong>Warning:</strong> These coordinates are not necessarily in meters.</p>
|
||||
* <p>For information on the joint hierarchy used, see
|
||||
* <a href="https://docs.projectathena.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* <a href="https://docs.vircadia.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* @function MyAvatar.getDefaultJointTranslation
|
||||
* @param {number} index - The joint index.
|
||||
* @returns {Vec3} The default translation of the joint (in model coordinates) if the joint index is valid, otherwise
|
||||
|
|
|
@ -171,7 +171,7 @@ void SkeletonModel::simulate(float deltaTime, bool fullUpdate) {
|
|||
|
||||
// FIXME: This texture loading logic should probably live in Avatar, to mirror RenderableModelEntityItem,
|
||||
// but Avatars don't get updates in the same way
|
||||
if (!_texturesLoaded && getNetworkModel() && getNetworkModel()->areTexturesLoaded()) {
|
||||
if (!_texturesLoaded && getGeometry() && getGeometry()->areTexturesLoaded()) {
|
||||
_texturesLoaded = true;
|
||||
updateRenderItems();
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ void SkeletonModel::computeBoundingShape() {
|
|||
}
|
||||
|
||||
const HFMModel& hfmModel = getHFMModel();
|
||||
if (hfmModel.joints.empty() || _rig.indexOfJoint("Hips") == -1) {
|
||||
if (hfmModel.joints.isEmpty() || _rig.indexOfJoint("Hips") == -1) {
|
||||
// rootJointIndex == -1 if the avatar model has no skeleton
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -796,7 +796,7 @@ public:
|
|||
* @param {Quat} rotation - The rotation of the joint relative to its parent.
|
||||
* @param {Vec3} translation - The translation of the joint relative to its parent, in model coordinates.
|
||||
* @example <caption>Set your avatar to it's default T-pose for a while.<br />
|
||||
* <img alt="Avatar in T-pose" src="https://apidocs.projectathena.dev/images/t-pose.png" /></caption>
|
||||
* <img alt="Avatar in T-pose" src="https://apidocs.vircadia.dev/images/t-pose.png" /></caption>
|
||||
* // Set all joint translations and rotations to defaults.
|
||||
* var i, length, rotation, translation;
|
||||
* for (i = 0, length = MyAvatar.getJointNames().length; i < length; i++) {
|
||||
|
@ -860,7 +860,7 @@ public:
|
|||
|
||||
/**jsdoc
|
||||
* Gets the rotation of a joint relative to its parent. For information on the joint hierarchy used, see
|
||||
* <a href="https://docs.projectathena.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.
|
||||
* <a href="https://docs.vircadia.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.
|
||||
* @function Avatar.getJointRotation
|
||||
* @param {number} index - The index of the joint.
|
||||
* @returns {Quat} The rotation of the joint relative to its parent.
|
||||
|
@ -871,7 +871,7 @@ public:
|
|||
* Gets the translation of a joint relative to its parent, in model coordinates.
|
||||
* <p><strong>Warning:</strong> These coordinates are not necessarily in meters.</p>
|
||||
* <p>For information on the joint hierarchy used, see
|
||||
* <a href="https://docs.projectathena.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* <a href="https://docs.vircadia.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* @function Avatar.getJointTranslation
|
||||
* @param {number} index - The index of the joint.
|
||||
* @returns {Vec3} The translation of the joint relative to its parent, in model coordinates.
|
||||
|
@ -904,7 +904,7 @@ public:
|
|||
* @param {string} name - The name of the joint.
|
||||
* @param {Quat} rotation - The rotation of the joint relative to its parent.
|
||||
* @example <caption>Set your avatar to its default T-pose then rotate its right arm.<br />
|
||||
* <img alt="Avatar in T-pose with arm rotated" src="https://apidocs.projectathena.dev/images/armpose.png" /></caption>
|
||||
* <img alt="Avatar in T-pose with arm rotated" src="https://apidocs.vircadia.dev/images/armpose.png" /></caption>
|
||||
* // Set all joint translations and rotations to defaults.
|
||||
* var i, length, rotation, translation;
|
||||
* for (i = 0, length = MyAvatar.getJointNames().length; i < length; i++) {
|
||||
|
@ -939,7 +939,7 @@ public:
|
|||
* @param {Vec3} translation - The translation of the joint relative to its parent, in model coordinates.
|
||||
* @example <caption>Stretch your avatar's neck. Depending on the avatar you are using, you will either see a gap between
|
||||
* the head and body or you will see the neck stretched.<br />
|
||||
* <img alt="Avatar with neck stretched" src="https://apidocs.projectathena.dev/images/stretched-neck.png" /></caption>
|
||||
* <img alt="Avatar with neck stretched" src="https://apidocs.vircadia.dev/images/stretched-neck.png" /></caption>
|
||||
* // Stretch your avatar's neck.
|
||||
* MyAvatar.setJointTranslation("Neck", Vec3.multiply(2, MyAvatar.getJointTranslation("Neck")));
|
||||
*
|
||||
|
@ -981,7 +981,7 @@ public:
|
|||
|
||||
/**jsdoc
|
||||
* Gets the rotation of a joint relative to its parent. For information on the joint hierarchy used, see
|
||||
* <a href="https://docs.projectathena.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.
|
||||
* <a href="https://docs.vircadia.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.
|
||||
* @function Avatar.getJointRotation
|
||||
* @param {string} name - The name of the joint.
|
||||
* @returns {Quat} The rotation of the joint relative to its parent.
|
||||
|
@ -996,7 +996,7 @@ public:
|
|||
* Gets the translation of a joint relative to its parent, in model coordinates.
|
||||
* <p><strong>Warning:</strong> These coordinates are not necessarily in meters.</p>
|
||||
* <p>For information on the joint hierarchy used, see
|
||||
* <a href="https://docs.projectathena.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* <a href="https://docs.vircadia.dev/create/avatars/avatar-standards.html">Avatar Standards</a>.</p>
|
||||
* @function Avatar.getJointTranslation
|
||||
* @param {number} name - The name of the joint.
|
||||
* @returns {Vec3} The translation of the joint relative to its parent, in model coordinates.
|
||||
|
@ -1041,7 +1041,7 @@ public:
|
|||
* @param {Quat[]} jointRotations - The rotations for all joints in the avatar. The values are in the same order as the
|
||||
* array returned by {@link MyAvatar.getJointNames}, or {@link Avatar.getJointNames} if using the <code>Avatar</code> API.
|
||||
* @example <caption>Set your avatar to its default T-pose then rotate its right arm.<br />
|
||||
* <img alt="Avatar in T-pose" src="https://apidocs.projectathena.dev/images/armpose.png" /></caption>
|
||||
* <img alt="Avatar in T-pose" src="https://apidocs.vircadia.dev/images/armpose.png" /></caption>
|
||||
* // Set all joint translations and rotations to defaults.
|
||||
* var i, length, rotation, translation;
|
||||
* for (i = 0, length = MyAvatar.getJointNames().length; i < length; i++) {
|
||||
|
@ -1138,7 +1138,7 @@ public:
|
|||
* set <code>hasScriptedBlendshapes</code> back to <code>false</code> when the animation is complete.
|
||||
* @function Avatar.setBlendshape
|
||||
* @param {string} name - The name of the blendshape, per the
|
||||
* {@link https://docs.projectathena.dev/create/avatars/avatar-standards.html#blendshapes Avatar Standards}.
|
||||
* {@link https://docs.vircadia.dev/create/avatars/avatar-standards.html#blendshapes Avatar Standards}.
|
||||
* @param {number} value - A value between <code>0.0</code> and <code>1.0</code>.
|
||||
* @example <caption>Open your avatar's mouth wide.</caption>
|
||||
* MyAvatar.hasScriptedBlendshapes = true;
|
||||
|
|
|
@ -34,7 +34,6 @@ HeadData::HeadData(AvatarData* owningAvatar) :
|
|||
{
|
||||
_userProceduralAnimationFlags.assign((size_t)ProceduralAnimaitonTypeCount, true);
|
||||
_suppressProceduralAnimationFlags.assign((size_t)ProceduralAnimaitonTypeCount, false);
|
||||
computeBlendshapesLookupMap();
|
||||
}
|
||||
|
||||
glm::quat HeadData::getRawOrientation() const {
|
||||
|
@ -72,12 +71,6 @@ void HeadData::setOrientation(const glm::quat& orientation) {
|
|||
setHeadOrientation(orientation);
|
||||
}
|
||||
|
||||
void HeadData::computeBlendshapesLookupMap(){
|
||||
for (int i = 0; i < (int)Blendshapes::BlendshapeCount; i++) {
|
||||
_blendshapeLookupMap[FACESHIFT_BLENDSHAPES[i]] = i;
|
||||
}
|
||||
}
|
||||
|
||||
int HeadData::getNumSummedBlendshapeCoefficients() const {
|
||||
int maxSize = std::max(_blendshapeCoefficients.size(), _transientBlendshapeCoefficients.size());
|
||||
return maxSize;
|
||||
|
@ -109,8 +102,8 @@ const QVector<float>& HeadData::getSummedBlendshapeCoefficients() {
|
|||
void HeadData::setBlendshape(QString name, float val) {
|
||||
|
||||
// Check to see if the named blendshape exists, and then set its value if it does
|
||||
auto it = _blendshapeLookupMap.find(name);
|
||||
if (it != _blendshapeLookupMap.end()) {
|
||||
auto it = BLENDSHAPE_LOOKUP_MAP.find(name);
|
||||
if (it != BLENDSHAPE_LOOKUP_MAP.end()) {
|
||||
if (_blendshapeCoefficients.size() <= it.value()) {
|
||||
_blendshapeCoefficients.resize(it.value() + 1);
|
||||
}
|
||||
|
@ -135,8 +128,8 @@ void HeadData::setBlendshape(QString name, float val) {
|
|||
}
|
||||
|
||||
int HeadData::getBlendshapeIndex(const QString& name) {
|
||||
auto it = _blendshapeLookupMap.find(name);
|
||||
int index = it != _blendshapeLookupMap.end() ? it.value() : -1;
|
||||
auto it = BLENDSHAPE_LOOKUP_MAP.find(name);
|
||||
int index = it != BLENDSHAPE_LOOKUP_MAP.end() ? it.value() : -1;
|
||||
return index;
|
||||
}
|
||||
|
||||
|
@ -155,8 +148,8 @@ static const QString JSON_AVATAR_HEAD_LOOKAT = QStringLiteral("lookAt");
|
|||
QJsonObject HeadData::toJson() const {
|
||||
QJsonObject headJson;
|
||||
QJsonObject blendshapesJson;
|
||||
for (auto name : _blendshapeLookupMap.keys()) {
|
||||
auto index = _blendshapeLookupMap[name];
|
||||
for (auto name : BLENDSHAPE_LOOKUP_MAP.keys()) {
|
||||
auto index = BLENDSHAPE_LOOKUP_MAP[name];
|
||||
float value = 0.0f;
|
||||
if (index < _blendshapeCoefficients.size()) {
|
||||
value += _blendshapeCoefficients[index];
|
||||
|
|
|
@ -125,7 +125,6 @@ protected:
|
|||
QVector<float> _blendshapeCoefficients;
|
||||
QVector<float> _transientBlendshapeCoefficients;
|
||||
QVector<float> _summedBlendshapeCoefficients;
|
||||
QMap<QString, int> _blendshapeLookupMap;
|
||||
AvatarData* _owningAvatar;
|
||||
|
||||
private:
|
||||
|
@ -134,7 +133,6 @@ private:
|
|||
HeadData& operator= (const HeadData&);
|
||||
|
||||
void setHeadOrientation(const glm::quat& orientation);
|
||||
void computeBlendshapesLookupMap();
|
||||
};
|
||||
|
||||
#endif // hifi_HeadData_h
|
||||
|
|
|
@ -90,11 +90,11 @@ void FBXBaker::replaceMeshNodeWithDraco(FBXNode& meshNode, const QByteArray& dra
|
|||
}
|
||||
}
|
||||
|
||||
void FBXBaker::rewriteAndBakeSceneModels(const std::vector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) {
|
||||
void FBXBaker::rewriteAndBakeSceneModels(const QVector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) {
|
||||
std::vector<int> meshIndexToRuntimeOrder;
|
||||
auto meshCount = (uint32_t)meshes.size();
|
||||
auto meshCount = (int)meshes.size();
|
||||
meshIndexToRuntimeOrder.resize(meshCount);
|
||||
for (uint32_t i = 0; i < meshCount; i++) {
|
||||
for (int i = 0; i < meshCount; i++) {
|
||||
meshIndexToRuntimeOrder[meshes[i].meshIndex] = i;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ protected:
|
|||
virtual void bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) override;
|
||||
|
||||
private:
|
||||
void rewriteAndBakeSceneModels(const std::vector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists);
|
||||
void rewriteAndBakeSceneModels(const QVector<hfm::Mesh>& meshes, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists);
|
||||
void replaceMeshNodeWithDraco(FBXNode& meshNode, const QByteArray& dracoMeshBytes, const std::vector<hifi::ByteArray>& dracoMaterialList);
|
||||
};
|
||||
|
||||
|
|
|
@ -258,9 +258,9 @@ void MaterialBaker::addTexture(const QString& materialName, image::TextureUsage:
|
|||
}
|
||||
};
|
||||
|
||||
void MaterialBaker::setMaterials(const std::vector<hfm::Material>& materials, const QString& baseURL) {
|
||||
void MaterialBaker::setMaterials(const QHash<QString, hfm::Material>& materials, const QString& baseURL) {
|
||||
_materialResource = NetworkMaterialResourcePointer(new NetworkMaterialResource(), [](NetworkMaterialResource* ptr) { ptr->deleteLater(); });
|
||||
for (const auto& material : materials) {
|
||||
for (auto& material : materials) {
|
||||
_materialResource->parsedMaterials.names.push_back(material.name.toStdString());
|
||||
_materialResource->parsedMaterials.networkMaterials[material.name.toStdString()] = std::make_shared<NetworkMaterial>(material, baseURL);
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ public:
|
|||
bool isURL() const { return _isURL; }
|
||||
QString getBakedMaterialData() const { return _bakedMaterialData; }
|
||||
|
||||
void setMaterials(const std::vector<hfm::Material>& materials, const QString& baseURL);
|
||||
void setMaterials(const QHash<QString, hfm::Material>& materials, const QString& baseURL);
|
||||
void setMaterials(const NetworkMaterialResourcePointer& materialResource);
|
||||
|
||||
NetworkMaterialResourcePointer getNetworkMaterialResource() const { return _materialResource; }
|
||||
|
|
|
@ -265,7 +265,7 @@ void ModelBaker::bakeSourceCopy() {
|
|||
return;
|
||||
}
|
||||
|
||||
if (!_hfmModel->materials.empty()) {
|
||||
if (!_hfmModel->materials.isEmpty()) {
|
||||
_materialBaker = QSharedPointer<MaterialBaker>(
|
||||
new MaterialBaker(_modelURL.fileName(), true, _bakedOutputDir),
|
||||
&MaterialBaker::deleteLater
|
||||
|
|
|
@ -37,10 +37,10 @@ const QByteArray MESH = "Mesh";
|
|||
|
||||
void OBJBaker::bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) {
|
||||
// Write OBJ Data as FBX tree nodes
|
||||
createFBXNodeTree(_rootNode, hfmModel, dracoMeshes[0], dracoMaterialLists[0]);
|
||||
createFBXNodeTree(_rootNode, hfmModel, dracoMeshes[0]);
|
||||
}
|
||||
|
||||
void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh, const std::vector<hifi::ByteArray>& dracoMaterialList) {
|
||||
void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh) {
|
||||
// Make all generated nodes children of rootNode
|
||||
rootNode.children = { FBXNode(), FBXNode(), FBXNode() };
|
||||
FBXNode& globalSettingsNode = rootNode.children[0];
|
||||
|
@ -100,22 +100,19 @@ void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& h
|
|||
}
|
||||
|
||||
// Generating Objects node's child - Material node
|
||||
|
||||
// Each material ID should only appear once thanks to deduplication in BuildDracoMeshTask, but we want to make sure they are created in the right order
|
||||
std::unordered_map<QString, uint32_t> materialIDToIndex;
|
||||
for (uint32_t materialIndex = 0; materialIndex < hfmModel->materials.size(); ++materialIndex) {
|
||||
const auto& material = hfmModel->materials[materialIndex];
|
||||
materialIDToIndex[material.materialID] = materialIndex;
|
||||
}
|
||||
|
||||
// Create nodes for each material in the material list
|
||||
for (const auto& dracoMaterial : dracoMaterialList) {
|
||||
const QString materialID = QString(dracoMaterial);
|
||||
const uint32_t materialIndex = materialIDToIndex[materialID];
|
||||
const auto& material = hfmModel->materials[materialIndex];
|
||||
auto& meshParts = hfmModel->meshes[0].parts;
|
||||
for (auto& meshPart : meshParts) {
|
||||
FBXNode materialNode;
|
||||
materialNode.name = MATERIAL_NODE_NAME;
|
||||
setMaterialNodeProperties(materialNode, material.materialID, material, hfmModel);
|
||||
if (hfmModel->materials.size() == 1) {
|
||||
// case when no material information is provided, OBJSerializer considers it as a single default material
|
||||
for (auto& materialID : hfmModel->materials.keys()) {
|
||||
setMaterialNodeProperties(materialNode, materialID, hfmModel);
|
||||
}
|
||||
} else {
|
||||
setMaterialNodeProperties(materialNode, meshPart.materialID, hfmModel);
|
||||
}
|
||||
|
||||
objectNode.children.append(materialNode);
|
||||
}
|
||||
|
||||
|
@ -156,10 +153,12 @@ void OBJBaker::createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& h
|
|||
}
|
||||
|
||||
// Set properties for material nodes
|
||||
void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, const QString& materialName, const hfm::Material& material, const hfm::Model::Pointer& hfmModel) {
|
||||
void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, QString material, const hfm::Model::Pointer& hfmModel) {
|
||||
auto materialID = nextNodeID();
|
||||
_materialIDs.push_back(materialID);
|
||||
materialNode.properties = { materialID, materialName, MESH };
|
||||
materialNode.properties = { materialID, material, MESH };
|
||||
|
||||
HFMMaterial currentMaterial = hfmModel->materials[material];
|
||||
|
||||
// Setting the hierarchy: Material -> Properties70 -> P -> Properties
|
||||
FBXNode properties70Node;
|
||||
|
@ -171,7 +170,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, const QString& m
|
|||
pNodeDiffuseColor.name = P_NODE_NAME;
|
||||
pNodeDiffuseColor.properties.append({
|
||||
"DiffuseColor", "Color", "", "A",
|
||||
material.diffuseColor[0], material.diffuseColor[1], material.diffuseColor[2]
|
||||
currentMaterial.diffuseColor[0], currentMaterial.diffuseColor[1], currentMaterial.diffuseColor[2]
|
||||
});
|
||||
}
|
||||
properties70Node.children.append(pNodeDiffuseColor);
|
||||
|
@ -182,7 +181,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, const QString& m
|
|||
pNodeSpecularColor.name = P_NODE_NAME;
|
||||
pNodeSpecularColor.properties.append({
|
||||
"SpecularColor", "Color", "", "A",
|
||||
material.specularColor[0], material.specularColor[1], material.specularColor[2]
|
||||
currentMaterial.specularColor[0], currentMaterial.specularColor[1], currentMaterial.specularColor[2]
|
||||
});
|
||||
}
|
||||
properties70Node.children.append(pNodeSpecularColor);
|
||||
|
@ -193,7 +192,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, const QString& m
|
|||
pNodeShininess.name = P_NODE_NAME;
|
||||
pNodeShininess.properties.append({
|
||||
"Shininess", "Number", "", "A",
|
||||
material.shininess
|
||||
currentMaterial.shininess
|
||||
});
|
||||
}
|
||||
properties70Node.children.append(pNodeShininess);
|
||||
|
@ -204,7 +203,7 @@ void OBJBaker::setMaterialNodeProperties(FBXNode& materialNode, const QString& m
|
|||
pNodeOpacity.name = P_NODE_NAME;
|
||||
pNodeOpacity.properties.append({
|
||||
"Opacity", "Number", "", "A",
|
||||
material.opacity
|
||||
currentMaterial.opacity
|
||||
});
|
||||
}
|
||||
properties70Node.children.append(pNodeOpacity);
|
||||
|
|
|
@ -27,8 +27,8 @@ protected:
|
|||
virtual void bakeProcessedSource(const hfm::Model::Pointer& hfmModel, const std::vector<hifi::ByteArray>& dracoMeshes, const std::vector<std::vector<hifi::ByteArray>>& dracoMaterialLists) override;
|
||||
|
||||
private:
|
||||
void createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh, const std::vector<hifi::ByteArray>& dracoMaterialList);
|
||||
void setMaterialNodeProperties(FBXNode& materialNode, const QString& materialName, const hfm::Material& material, const hfm::Model::Pointer& hfmModel);
|
||||
void createFBXNodeTree(FBXNode& rootNode, const hfm::Model::Pointer& hfmModel, const hifi::ByteArray& dracoMesh);
|
||||
void setMaterialNodeProperties(FBXNode& materialNode, QString material, const hfm::Model::Pointer& hfmModel);
|
||||
NodeID nextNodeID() { return _nodeID++; }
|
||||
|
||||
NodeID _nodeID { 0 };
|
||||
|
|
|
@ -248,7 +248,7 @@ void EntityTreeRenderer::clearDomainAndNonOwnedEntities() {
|
|||
for (const auto& entry : _entitiesInScene) {
|
||||
const auto& renderer = entry.second;
|
||||
const EntityItemPointer& entityItem = renderer->getEntity();
|
||||
if (!(entityItem->isLocalEntity() || entityItem->isMyAvatarEntity())) {
|
||||
if (entityItem && !(entityItem->isLocalEntity() || entityItem->isMyAvatarEntity())) {
|
||||
fadeOutRenderable(renderer);
|
||||
} else {
|
||||
savedEntities[entry.first] = entry.second;
|
||||
|
@ -682,7 +682,7 @@ void EntityTreeRenderer::leaveDomainAndNonOwnedEntities() {
|
|||
QSet<EntityItemID> currentEntitiesInsideToSave;
|
||||
foreach (const EntityItemID& entityID, _currentEntitiesInside) {
|
||||
EntityItemPointer entityItem = getTree()->findEntityByEntityItemID(entityID);
|
||||
if (!(entityItem->isLocalEntity() || entityItem->isMyAvatarEntity())) {
|
||||
if (entityItem && !(entityItem->isLocalEntity() || entityItem->isMyAvatarEntity())) {
|
||||
emit leaveEntity(entityID);
|
||||
if (_entitiesScriptEngine) {
|
||||
_entitiesScriptEngine->callEntityScriptMethod(entityID, "leaveEntity");
|
||||
|
|
|
@ -282,7 +282,7 @@ bool RenderableModelEntityItem::findDetailedParabolaIntersection(const glm::vec3
|
|||
}
|
||||
|
||||
void RenderableModelEntityItem::fetchCollisionGeometryResource() {
|
||||
_collisionGeometryResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(getCollisionShapeURL());
|
||||
_collisionGeometryResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(getCollisionShapeURL());
|
||||
}
|
||||
|
||||
bool RenderableModelEntityItem::unableToLoadCollisionShape() {
|
||||
|
@ -357,6 +357,7 @@ bool RenderableModelEntityItem::isReadyToComputeShape() const {
|
|||
|
||||
void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
|
||||
const uint32_t TRIANGLE_STRIDE = 3;
|
||||
const uint32_t QUAD_STRIDE = 4;
|
||||
|
||||
ShapeType type = getShapeType();
|
||||
|
||||
|
@ -379,35 +380,59 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
|
|||
|
||||
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
|
||||
pointCollection.clear();
|
||||
|
||||
size_t numParts = 0;
|
||||
for (const HFMMesh& mesh : collisionGeometry.meshes) {
|
||||
numParts += mesh.triangleListMesh.parts.size();
|
||||
}
|
||||
pointCollection.reserve(numParts);
|
||||
uint32_t i = 0;
|
||||
|
||||
// the way OBJ files get read, each section under a "g" line is its own meshPart. We only expect
|
||||
// to find one actual "mesh" (with one or more meshParts in it), but we loop over the meshes, just in case.
|
||||
for (const HFMMesh& mesh : collisionGeometry.meshes) {
|
||||
const hfm::TriangleListMesh& triangleListMesh = mesh.triangleListMesh;
|
||||
foreach (const HFMMesh& mesh, collisionGeometry.meshes) {
|
||||
// each meshPart is a convex hull
|
||||
for (const glm::ivec2& part : triangleListMesh.parts) {
|
||||
foreach (const HFMMeshPart &meshPart, mesh.parts) {
|
||||
pointCollection.push_back(QVector<glm::vec3>());
|
||||
ShapeInfo::PointList& pointsInPart = pointCollection[i];
|
||||
|
||||
// run through all the triangles and (uniquely) add each point to the hull
|
||||
|
||||
pointCollection.emplace_back();
|
||||
ShapeInfo::PointList& pointsInPart = pointCollection.back();
|
||||
|
||||
uint32_t numIndices = (uint32_t)part.y;
|
||||
uint32_t numIndices = (uint32_t)meshPart.triangleIndices.size();
|
||||
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices % TRIANGLE_STRIDE == 0);
|
||||
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
|
||||
uint32_t indexStart = (uint32_t)part.x;
|
||||
uint32_t indexEnd = indexStart + numIndices;
|
||||
for (uint32_t j = indexStart; j < indexEnd; ++j) {
|
||||
// NOTE: It seems odd to skip vertices when initializing a btConvexHullShape, but let's keep the behavior similar to the old behavior for now
|
||||
glm::vec3 point = triangleListMesh.vertices[triangleListMesh.indices[j]];
|
||||
if (std::find(pointsInPart.cbegin(), pointsInPart.cend(), point) == pointsInPart.cend()) {
|
||||
pointsInPart.push_back(point);
|
||||
|
||||
for (uint32_t j = 0; j < numIndices; j += TRIANGLE_STRIDE) {
|
||||
glm::vec3 p0 = mesh.vertices[meshPart.triangleIndices[j]];
|
||||
glm::vec3 p1 = mesh.vertices[meshPart.triangleIndices[j + 1]];
|
||||
glm::vec3 p2 = mesh.vertices[meshPart.triangleIndices[j + 2]];
|
||||
if (!pointsInPart.contains(p0)) {
|
||||
pointsInPart << p0;
|
||||
}
|
||||
if (!pointsInPart.contains(p1)) {
|
||||
pointsInPart << p1;
|
||||
}
|
||||
if (!pointsInPart.contains(p2)) {
|
||||
pointsInPart << p2;
|
||||
}
|
||||
}
|
||||
|
||||
// run through all the quads and (uniquely) add each point to the hull
|
||||
numIndices = (uint32_t)meshPart.quadIndices.size();
|
||||
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices % QUAD_STRIDE == 0);
|
||||
numIndices -= numIndices % QUAD_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
|
||||
|
||||
for (uint32_t j = 0; j < numIndices; j += QUAD_STRIDE) {
|
||||
glm::vec3 p0 = mesh.vertices[meshPart.quadIndices[j]];
|
||||
glm::vec3 p1 = mesh.vertices[meshPart.quadIndices[j + 1]];
|
||||
glm::vec3 p2 = mesh.vertices[meshPart.quadIndices[j + 2]];
|
||||
glm::vec3 p3 = mesh.vertices[meshPart.quadIndices[j + 3]];
|
||||
if (!pointsInPart.contains(p0)) {
|
||||
pointsInPart << p0;
|
||||
}
|
||||
if (!pointsInPart.contains(p1)) {
|
||||
pointsInPart << p1;
|
||||
}
|
||||
if (!pointsInPart.contains(p2)) {
|
||||
pointsInPart << p2;
|
||||
}
|
||||
if (!pointsInPart.contains(p3)) {
|
||||
pointsInPart << p3;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -416,6 +441,7 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
|
|||
pointCollection.pop_back();
|
||||
continue;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,8 +456,8 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
|
|||
// multiply each point by scale before handing the point-set off to the physics engine.
|
||||
// also determine the extents of the collision model.
|
||||
glm::vec3 registrationOffset = dimensions * (ENTITY_ITEM_DEFAULT_REGISTRATION_POINT - getRegistrationPoint());
|
||||
for (size_t i = 0; i < pointCollection.size(); i++) {
|
||||
for (size_t j = 0; j < pointCollection[i].size(); j++) {
|
||||
for (int32_t i = 0; i < pointCollection.size(); i++) {
|
||||
for (int32_t j = 0; j < pointCollection[i].size(); j++) {
|
||||
// back compensate for registration so we can apply that offset to the shapeInfo later
|
||||
pointCollection[i][j] = scaleToFit * (pointCollection[i][j] + model->getOffset()) - registrationOffset;
|
||||
}
|
||||
|
@ -445,63 +471,46 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
|
|||
model->updateGeometry();
|
||||
|
||||
// compute meshPart local transforms
|
||||
QVector<glm::mat4> localTransforms;
|
||||
const HFMModel& hfmModel = model->getHFMModel();
|
||||
int numHFMMeshes = hfmModel.meshes.size();
|
||||
int totalNumVertices = 0;
|
||||
glm::vec3 dimensions = getScaledDimensions();
|
||||
glm::mat4 invRegistraionOffset = glm::translate(dimensions * (getRegistrationPoint() - ENTITY_ITEM_DEFAULT_REGISTRATION_POINT));
|
||||
|
||||
ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices();
|
||||
triangleIndices.clear();
|
||||
|
||||
Extents extents;
|
||||
int32_t shapeCount = 0;
|
||||
int32_t instanceIndex = 0;
|
||||
|
||||
// NOTE: Each pointCollection corresponds to a mesh. Therefore, we should have one pointCollection per mesh instance
|
||||
// A mesh instance is a unique combination of mesh/transform. For every mesh instance, there are as many shapes as there are parts for that mesh.
|
||||
// We assume the shapes are grouped by mesh instance, and the group contains one of each mesh part.
|
||||
uint32_t numInstances = 0;
|
||||
std::vector<std::vector<std::vector<uint32_t>>> shapesPerInstancePerMesh;
|
||||
shapesPerInstancePerMesh.resize(hfmModel.meshes.size());
|
||||
for (uint32_t shapeIndex = 0; shapeIndex < hfmModel.shapes.size();) {
|
||||
const auto& shape = hfmModel.shapes[shapeIndex];
|
||||
uint32_t meshIndex = shape.mesh;
|
||||
const auto& mesh = hfmModel.meshes[meshIndex];
|
||||
uint32_t numMeshParts = (uint32_t)mesh.parts.size();
|
||||
assert(numMeshParts != 0);
|
||||
|
||||
auto& shapesPerInstance = shapesPerInstancePerMesh[meshIndex];
|
||||
shapesPerInstance.emplace_back();
|
||||
|
||||
auto& shapes = shapesPerInstance.back();
|
||||
shapes.resize(numMeshParts);
|
||||
std::iota(shapes.begin(), shapes.end(), shapeIndex);
|
||||
|
||||
shapeIndex += numMeshParts;
|
||||
++numInstances;
|
||||
for (int i = 0; i < numHFMMeshes; i++) {
|
||||
const HFMMesh& mesh = hfmModel.meshes.at(i);
|
||||
if (mesh.clusters.size() > 0) {
|
||||
const HFMCluster& cluster = mesh.clusters.at(0);
|
||||
auto jointMatrix = model->getRig().getJointTransform(cluster.jointIndex);
|
||||
// we backtranslate by the registration offset so we can apply that offset to the shapeInfo later
|
||||
localTransforms.push_back(invRegistraionOffset * jointMatrix * cluster.inverseBindMatrix);
|
||||
} else {
|
||||
localTransforms.push_back(invRegistraionOffset);
|
||||
}
|
||||
totalNumVertices += mesh.vertices.size();
|
||||
}
|
||||
|
||||
const uint32_t MAX_ALLOWED_MESH_COUNT = 1000;
|
||||
if (numInstances > MAX_ALLOWED_MESH_COUNT) {
|
||||
// too many will cause the deadlock timer to throw...
|
||||
qWarning() << "model" << getModelURL() << "has too many collision meshes" << numInstances << "and will collide as a box.";
|
||||
const int32_t MAX_VERTICES_PER_STATIC_MESH = 1e6;
|
||||
if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) {
|
||||
qWarning() << "model" << getModelURL() << "has too many vertices" << totalNumVertices << "and will collide as a box.";
|
||||
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t totalNumVertices = 0;
|
||||
for (const auto& shapesPerInstance : shapesPerInstancePerMesh) {
|
||||
for (const auto& instanceShapes : shapesPerInstance) {
|
||||
const uint32_t firstShapeIndex = instanceShapes.front();
|
||||
const auto& firstShape = hfmModel.shapes[firstShapeIndex];
|
||||
const auto& mesh = hfmModel.meshes[firstShape.mesh];
|
||||
const auto& triangleListMesh = mesh.triangleListMesh;
|
||||
// Added once per instance per mesh
|
||||
totalNumVertices += triangleListMesh.vertices.size();
|
||||
std::vector<std::shared_ptr<const graphics::Mesh>> meshes;
|
||||
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
auto& hfmMeshes = _collisionGeometryResource->getHFMModel().meshes;
|
||||
meshes.reserve(hfmMeshes.size());
|
||||
for (auto& hfmMesh : hfmMeshes) {
|
||||
meshes.push_back(hfmMesh._mesh);
|
||||
}
|
||||
} else {
|
||||
meshes = model->getGeometry()->getMeshes();
|
||||
}
|
||||
const size_t MAX_VERTICES_PER_STATIC_MESH = 1e6;
|
||||
if (totalNumVertices > MAX_VERTICES_PER_STATIC_MESH) {
|
||||
qWarning() << "model" << getModelURL() << "has too many vertices" << totalNumVertices << "and will collide as a box.";
|
||||
int32_t numMeshes = (int32_t)(meshes.size());
|
||||
|
||||
const int MAX_ALLOWED_MESH_COUNT = 1000;
|
||||
if (numMeshes > MAX_ALLOWED_MESH_COUNT) {
|
||||
// too many will cause the deadlock timer to throw...
|
||||
shapeInfo.setParams(SHAPE_TYPE_BOX, 0.5f * dimensions);
|
||||
return;
|
||||
}
|
||||
|
@ -509,118 +518,169 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
|
|||
ShapeInfo::PointCollection& pointCollection = shapeInfo.getPointCollection();
|
||||
pointCollection.clear();
|
||||
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
pointCollection.resize(numInstances);
|
||||
pointCollection.resize(numMeshes);
|
||||
} else {
|
||||
pointCollection.resize(1);
|
||||
}
|
||||
|
||||
for (uint32_t meshIndex = 0; meshIndex < hfmModel.meshes.size(); ++meshIndex) {
|
||||
const auto& mesh = hfmModel.meshes[meshIndex];
|
||||
const auto& triangleListMesh = mesh.triangleListMesh;
|
||||
const auto& vertices = triangleListMesh.vertices;
|
||||
const auto& indices = triangleListMesh.indices;
|
||||
const std::vector<glm::ivec2>& parts = triangleListMesh.parts;
|
||||
ShapeInfo::TriangleIndices& triangleIndices = shapeInfo.getTriangleIndices();
|
||||
triangleIndices.clear();
|
||||
|
||||
const auto& shapesPerInstance = shapesPerInstancePerMesh[meshIndex];
|
||||
for (const std::vector<uint32_t>& instanceShapes : shapesPerInstance) {
|
||||
ShapeInfo::PointList& points = pointCollection[instanceIndex];
|
||||
Extents extents;
|
||||
int32_t meshCount = 0;
|
||||
int32_t pointListIndex = 0;
|
||||
for (auto& mesh : meshes) {
|
||||
if (!mesh) {
|
||||
continue;
|
||||
}
|
||||
const gpu::BufferView& vertices = mesh->getVertexBuffer();
|
||||
const gpu::BufferView& indices = mesh->getIndexBuffer();
|
||||
const gpu::BufferView& parts = mesh->getPartBuffer();
|
||||
|
||||
// reserve room
|
||||
int32_t sizeToReserve = (int32_t)(vertices.size());
|
||||
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
// a list of points for each instance
|
||||
instanceIndex++;
|
||||
} else {
|
||||
// only one list of points
|
||||
sizeToReserve += (int32_t)((gpu::Size)points.size());
|
||||
}
|
||||
points.reserve(sizeToReserve);
|
||||
|
||||
// get mesh instance transform
|
||||
const uint32_t meshIndexOffset = (uint32_t)points.size();
|
||||
const uint32_t instanceShapeIndexForTransform = instanceShapes.front();
|
||||
const auto& instanceShapeForTransform = hfmModel.shapes[instanceShapeIndexForTransform];
|
||||
glm::mat4 localTransform;
|
||||
if (instanceShapeForTransform.joint != hfm::UNDEFINED_KEY) {
|
||||
auto jointMatrix = model->getRig().getJointTransform(instanceShapeForTransform.joint);
|
||||
// we backtranslate by the registration offset so we can apply that offset to the shapeInfo later
|
||||
if (instanceShapeForTransform.skinDeformer != hfm::UNDEFINED_KEY) {
|
||||
const auto& skinDeformer = hfmModel.skinDeformers[instanceShapeForTransform.skinDeformer];
|
||||
glm::mat4 inverseBindMatrix;
|
||||
if (!skinDeformer.clusters.empty()) {
|
||||
const auto& cluster = skinDeformer.clusters.back();
|
||||
inverseBindMatrix = cluster.inverseBindMatrix;
|
||||
}
|
||||
localTransform = invRegistraionOffset * jointMatrix * inverseBindMatrix;
|
||||
} else {
|
||||
localTransform = invRegistraionOffset * jointMatrix;
|
||||
}
|
||||
} else {
|
||||
localTransform = invRegistraionOffset;
|
||||
}
|
||||
ShapeInfo::PointList& points = pointCollection[pointListIndex];
|
||||
|
||||
// copy points
|
||||
auto vertexItr = vertices.cbegin();
|
||||
while (vertexItr != vertices.cend()) {
|
||||
glm::vec3 point = extractTranslation(localTransform * glm::translate(*vertexItr));
|
||||
points.push_back(point);
|
||||
++vertexItr;
|
||||
}
|
||||
for (const auto& instanceShapeIndex : instanceShapes) {
|
||||
const auto& instanceShape = hfmModel.shapes[instanceShapeIndex];
|
||||
extents.addExtents(instanceShape.transformedExtents);
|
||||
}
|
||||
// reserve room
|
||||
int32_t sizeToReserve = (int32_t)(vertices.getNumElements());
|
||||
if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
// a list of points for each mesh
|
||||
pointListIndex++;
|
||||
} else {
|
||||
// only one list of points
|
||||
sizeToReserve += (int32_t)((gpu::Size)points.size());
|
||||
}
|
||||
points.reserve(sizeToReserve);
|
||||
|
||||
if (type == SHAPE_TYPE_STATIC_MESH) {
|
||||
// copy into triangleIndices
|
||||
triangleIndices.reserve((int32_t)((gpu::Size)(triangleIndices.size()) + indices.size()));
|
||||
auto partItr = parts.cbegin();
|
||||
while (partItr != parts.cend()) {
|
||||
auto numIndices = partItr->y;
|
||||
// copy points
|
||||
uint32_t meshIndexOffset = (uint32_t)points.size();
|
||||
const glm::mat4& localTransform = localTransforms[meshCount];
|
||||
gpu::BufferView::Iterator<const glm::vec3> vertexItr = vertices.cbegin<const glm::vec3>();
|
||||
while (vertexItr != vertices.cend<const glm::vec3>()) {
|
||||
glm::vec3 point = extractTranslation(localTransform * glm::translate(*vertexItr));
|
||||
points.push_back(point);
|
||||
extents.addPoint(point);
|
||||
++vertexItr;
|
||||
}
|
||||
|
||||
if (type == SHAPE_TYPE_STATIC_MESH) {
|
||||
// copy into triangleIndices
|
||||
triangleIndices.reserve((int32_t)((gpu::Size)(triangleIndices.size()) + indices.getNumElements()));
|
||||
gpu::BufferView::Iterator<const graphics::Mesh::Part> partItr = parts.cbegin<const graphics::Mesh::Part>();
|
||||
while (partItr != parts.cend<const graphics::Mesh::Part>()) {
|
||||
auto numIndices = partItr->_numIndices;
|
||||
if (partItr->_topology == graphics::Mesh::TRIANGLES) {
|
||||
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices % TRIANGLE_STRIDE == 0);
|
||||
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
|
||||
auto indexItr = indices.cbegin() + partItr->x;
|
||||
|
||||
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
|
||||
auto indexEnd = indexItr + numIndices;
|
||||
while (indexItr != indexEnd) {
|
||||
triangleIndices.push_back(*indexItr + meshIndexOffset);
|
||||
++indexItr;
|
||||
}
|
||||
++partItr;
|
||||
} else if (partItr->_topology == graphics::Mesh::TRIANGLE_STRIP) {
|
||||
// TODO: resurrect assert after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices > 2);
|
||||
|
||||
uint32_t approxNumIndices = TRIANGLE_STRIDE * numIndices;
|
||||
if (approxNumIndices > (uint32_t)(triangleIndices.capacity() - triangleIndices.size())) {
|
||||
// we underestimated the final size of triangleIndices so we pre-emptively expand it
|
||||
triangleIndices.reserve(triangleIndices.size() + approxNumIndices);
|
||||
}
|
||||
|
||||
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
|
||||
auto indexEnd = indexItr + (numIndices - 2);
|
||||
|
||||
// first triangle uses the first three indices
|
||||
triangleIndices.push_back(*(indexItr++) + meshIndexOffset);
|
||||
triangleIndices.push_back(*(indexItr++) + meshIndexOffset);
|
||||
triangleIndices.push_back(*(indexItr++) + meshIndexOffset);
|
||||
|
||||
// the rest use previous and next index
|
||||
uint32_t triangleCount = 1;
|
||||
while (indexItr != indexEnd) {
|
||||
if ((*indexItr) != graphics::Mesh::PRIMITIVE_RESTART_INDEX) {
|
||||
if (triangleCount % 2 == 0) {
|
||||
// even triangles use first two indices in order
|
||||
triangleIndices.push_back(*(indexItr - 2) + meshIndexOffset);
|
||||
triangleIndices.push_back(*(indexItr - 1) + meshIndexOffset);
|
||||
} else {
|
||||
// odd triangles swap order of first two indices
|
||||
triangleIndices.push_back(*(indexItr - 1) + meshIndexOffset);
|
||||
triangleIndices.push_back(*(indexItr - 2) + meshIndexOffset);
|
||||
}
|
||||
triangleIndices.push_back(*indexItr + meshIndexOffset);
|
||||
++triangleCount;
|
||||
}
|
||||
++indexItr;
|
||||
}
|
||||
}
|
||||
} else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
// for each mesh copy unique part indices, separated by special bogus (flag) index values
|
||||
auto partItr = parts.cbegin();
|
||||
while (partItr != parts.cend()) {
|
||||
// collect unique list of indices for this part
|
||||
std::set<int32_t> uniqueIndices;
|
||||
auto numIndices = partItr->y;
|
||||
++partItr;
|
||||
}
|
||||
} else if (type == SHAPE_TYPE_SIMPLE_COMPOUND) {
|
||||
// for each mesh copy unique part indices, separated by special bogus (flag) index values
|
||||
gpu::BufferView::Iterator<const graphics::Mesh::Part> partItr = parts.cbegin<const graphics::Mesh::Part>();
|
||||
while (partItr != parts.cend<const graphics::Mesh::Part>()) {
|
||||
// collect unique list of indices for this part
|
||||
std::set<int32_t> uniqueIndices;
|
||||
auto numIndices = partItr->_numIndices;
|
||||
if (partItr->_topology == graphics::Mesh::TRIANGLES) {
|
||||
// TODO: assert rather than workaround after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices% TRIANGLE_STRIDE == 0);
|
||||
numIndices -= numIndices % TRIANGLE_STRIDE; // WORKAROUND lack of sanity checking in FBXSerializer
|
||||
auto indexItr = indices.cbegin() + partItr->x;
|
||||
|
||||
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
|
||||
auto indexEnd = indexItr + numIndices;
|
||||
while (indexItr != indexEnd) {
|
||||
uniqueIndices.insert(*indexItr);
|
||||
++indexItr;
|
||||
}
|
||||
} else if (partItr->_topology == graphics::Mesh::TRIANGLE_STRIP) {
|
||||
// TODO: resurrect assert after we start sanitizing HFMMesh higher up
|
||||
//assert(numIndices > TRIANGLE_STRIDE - 1);
|
||||
|
||||
// store uniqueIndices in triangleIndices
|
||||
triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size());
|
||||
for (auto index : uniqueIndices) {
|
||||
triangleIndices.push_back(index);
|
||||
auto indexItr = indices.cbegin<const gpu::BufferView::Index>() + partItr->_startIndex;
|
||||
auto indexEnd = indexItr + (numIndices - 2);
|
||||
|
||||
// first triangle uses the first three indices
|
||||
uniqueIndices.insert(*(indexItr++));
|
||||
uniqueIndices.insert(*(indexItr++));
|
||||
uniqueIndices.insert(*(indexItr++));
|
||||
|
||||
// the rest use previous and next index
|
||||
uint32_t triangleCount = 1;
|
||||
while (indexItr != indexEnd) {
|
||||
if ((*indexItr) != graphics::Mesh::PRIMITIVE_RESTART_INDEX) {
|
||||
if (triangleCount % 2 == 0) {
|
||||
// EVEN triangles use first two indices in order
|
||||
uniqueIndices.insert(*(indexItr - 2));
|
||||
uniqueIndices.insert(*(indexItr - 1));
|
||||
} else {
|
||||
// ODD triangles swap order of first two indices
|
||||
uniqueIndices.insert(*(indexItr - 1));
|
||||
uniqueIndices.insert(*(indexItr - 2));
|
||||
}
|
||||
uniqueIndices.insert(*indexItr);
|
||||
++triangleCount;
|
||||
}
|
||||
++indexItr;
|
||||
}
|
||||
// flag end of part
|
||||
triangleIndices.push_back(END_OF_MESH_PART);
|
||||
|
||||
++partItr;
|
||||
}
|
||||
// flag end of mesh
|
||||
triangleIndices.push_back(END_OF_MESH);
|
||||
}
|
||||
}
|
||||
|
||||
++shapeCount;
|
||||
// store uniqueIndices in triangleIndices
|
||||
triangleIndices.reserve(triangleIndices.size() + (int32_t)uniqueIndices.size());
|
||||
for (auto index : uniqueIndices) {
|
||||
triangleIndices.push_back(index);
|
||||
}
|
||||
// flag end of part
|
||||
triangleIndices.push_back(END_OF_MESH_PART);
|
||||
|
||||
++partItr;
|
||||
}
|
||||
// flag end of mesh
|
||||
triangleIndices.push_back(END_OF_MESH);
|
||||
}
|
||||
++meshCount;
|
||||
}
|
||||
|
||||
// scale and shift
|
||||
|
@ -632,7 +692,7 @@ void RenderableModelEntityItem::computeShapeInfo(ShapeInfo& shapeInfo) {
|
|||
}
|
||||
}
|
||||
for (auto points : pointCollection) {
|
||||
for (size_t i = 0; i < points.size(); ++i) {
|
||||
for (int32_t i = 0; i < points.size(); ++i) {
|
||||
points[i] = (points[i] * scaleToFit);
|
||||
}
|
||||
}
|
||||
|
@ -1023,6 +1083,11 @@ uint32_t ModelEntityRenderer::metaFetchMetaSubItems(ItemIDs& subItems) const {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ModelEntityRenderer::handleBlendedVertices(int blendshapeNumber, const QVector<BlendshapeOffset>& blendshapeOffsets,
|
||||
const QVector<int>& blendedMeshSizes, const render::ItemIDs& subItemIDs) {
|
||||
setBlendedVertices(blendshapeNumber, blendshapeOffsets, blendedMeshSizes, subItemIDs);
|
||||
}
|
||||
|
||||
void ModelEntityRenderer::removeFromScene(const ScenePointer& scene, Transaction& transaction) {
|
||||
if (_model) {
|
||||
_model->removeFromScene(scene, transaction);
|
||||
|
@ -1191,7 +1256,11 @@ bool ModelEntityRenderer::needsRenderUpdateFromTypedEntity(const TypedEntityPoin
|
|||
if (model && model->isLoaded()) {
|
||||
if (!entity->_dimensionsInitialized || entity->_needsInitialSimulation || !entity->_originalTexturesRead) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (entity->blendshapesChanged()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check to see if we need to update the model bounds
|
||||
if (entity->needsUpdateModelBounds()) {
|
||||
|
@ -1350,6 +1419,11 @@ void ModelEntityRenderer::doRenderUpdateSynchronousTyped(const ScenePointer& sce
|
|||
model->setTagMask(tagMask, scene);
|
||||
}
|
||||
|
||||
if (entity->blendshapesChanged()) {
|
||||
model->setBlendshapeCoefficients(entity->getBlendshapeCoefficientVector());
|
||||
model->updateBlendshapes();
|
||||
}
|
||||
|
||||
// TODO? early exit here when not visible?
|
||||
|
||||
if (model->canCastShadow() != _canCastShadow) {
|
||||
|
@ -1370,13 +1444,14 @@ void ModelEntityRenderer::doRenderUpdateSynchronousTyped(const ScenePointer& sce
|
|||
model->removeFromScene(scene, transaction);
|
||||
render::Item::Status::Getters statusGetters;
|
||||
makeStatusGetters(entity, statusGetters);
|
||||
model->addToScene(scene, transaction, statusGetters);
|
||||
using namespace std::placeholders;
|
||||
model->addToScene(scene, transaction, statusGetters, std::bind(&ModelEntityRenderer::metaBlendshapeOperator, _renderItemID, _1, _2, _3, _4));
|
||||
entity->bumpAncestorChainRenderableVersion();
|
||||
processMaterials();
|
||||
}
|
||||
}
|
||||
|
||||
if (!_texturesLoaded && model->getNetworkModel() && model->getNetworkModel()->areTexturesLoaded()) {
|
||||
if (!_texturesLoaded && model->getGeometry() && model->getGeometry()->areTexturesLoaded()) {
|
||||
withWriteLock([&] {
|
||||
_texturesLoaded = true;
|
||||
});
|
||||
|
@ -1529,3 +1604,12 @@ void ModelEntityRenderer::processMaterials() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ModelEntityRenderer::metaBlendshapeOperator(render::ItemID renderItemID, int blendshapeNumber, const QVector<BlendshapeOffset>& blendshapeOffsets,
|
||||
const QVector<int>& blendedMeshSizes, const render::ItemIDs& subItemIDs) {
|
||||
render::Transaction transaction;
|
||||
transaction.updateItem<PayloadProxyInterface>(renderItemID, [blendshapeNumber, blendshapeOffsets, blendedMeshSizes, subItemIDs](PayloadProxyInterface& self) {
|
||||
self.handleBlendedVertices(blendshapeNumber, blendshapeOffsets, blendedMeshSizes, subItemIDs);
|
||||
});
|
||||
AbstractViewStateInterface::instance()->getMain3DScene()->enqueueTransaction(transaction);
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <AnimationCache.h>
|
||||
#include <Model.h>
|
||||
#include <model-networking/ModelCache.h>
|
||||
#include <MetaModelPayload.h>
|
||||
|
||||
#include "RenderableEntityItem.h"
|
||||
|
||||
|
@ -120,7 +121,7 @@ private:
|
|||
bool readyToAnimate() const;
|
||||
void fetchCollisionGeometryResource();
|
||||
|
||||
ModelResource::Pointer _collisionGeometryResource;
|
||||
GeometryResource::Pointer _collisionGeometryResource;
|
||||
std::vector<int> _jointMap;
|
||||
QVariantMap _originalTextures;
|
||||
bool _jointMapCompleted { false };
|
||||
|
@ -131,7 +132,7 @@ private:
|
|||
|
||||
namespace render { namespace entities {
|
||||
|
||||
class ModelEntityRenderer : public TypedEntityRenderer<RenderableModelEntityItem> {
|
||||
class ModelEntityRenderer : public TypedEntityRenderer<RenderableModelEntityItem>, public MetaModelPayload {
|
||||
using Parent = TypedEntityRenderer<RenderableModelEntityItem>;
|
||||
friend class EntityRenderer;
|
||||
Q_OBJECT
|
||||
|
@ -155,6 +156,8 @@ protected:
|
|||
void setKey(bool didVisualGeometryRequestSucceed);
|
||||
virtual ItemKey getKey() override;
|
||||
virtual uint32_t metaFetchMetaSubItems(ItemIDs& subItems) const override;
|
||||
virtual void handleBlendedVertices(int blendshapeNumber, const QVector<BlendshapeOffset>& blendshapeOffsets,
|
||||
const QVector<int>& blendedMeshSizes, const render::ItemIDs& subItemIDs) override;
|
||||
|
||||
virtual bool needsRenderUpdateFromTypedEntity(const TypedEntityPointer& entity) const override;
|
||||
virtual bool needsRenderUpdate() const override;
|
||||
|
@ -199,6 +202,10 @@ private:
|
|||
bool _prevModelLoaded { false };
|
||||
|
||||
void processMaterials();
|
||||
|
||||
static void metaBlendshapeOperator(render::ItemID renderItemID, int blendshapeNumber, const QVector<BlendshapeOffset>& blendshapeOffsets,
|
||||
const QVector<int>& blendedMeshSizes, const render::ItemIDs& subItemIDs);
|
||||
|
||||
};
|
||||
|
||||
} } // namespace
|
||||
|
|
|
@ -200,7 +200,7 @@ float importanceSample3DDimension(float startDim) {
|
|||
}
|
||||
|
||||
ParticleEffectEntityRenderer::CpuParticle ParticleEffectEntityRenderer::createParticle(uint64_t now, const Transform& baseTransform, const particle::Properties& particleProperties,
|
||||
const ShapeType& shapeType, const ModelResource::Pointer& geometryResource,
|
||||
const ShapeType& shapeType, const GeometryResource::Pointer& geometryResource,
|
||||
const TriangleInfo& triangleInfo) {
|
||||
CpuParticle particle;
|
||||
|
||||
|
@ -385,7 +385,7 @@ void ParticleEffectEntityRenderer::stepSimulation() {
|
|||
|
||||
particle::Properties particleProperties;
|
||||
ShapeType shapeType;
|
||||
ModelResource::Pointer geometryResource;
|
||||
GeometryResource::Pointer geometryResource;
|
||||
withReadLock([&] {
|
||||
particleProperties = _particleProperties;
|
||||
shapeType = _shapeType;
|
||||
|
@ -488,7 +488,7 @@ void ParticleEffectEntityRenderer::fetchGeometryResource() {
|
|||
if (hullURL.isEmpty()) {
|
||||
_geometryResource.reset();
|
||||
} else {
|
||||
_geometryResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(hullURL);
|
||||
_geometryResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(hullURL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -496,7 +496,7 @@ void ParticleEffectEntityRenderer::fetchGeometryResource() {
|
|||
void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel) {
|
||||
PROFILE_RANGE(render, __FUNCTION__);
|
||||
|
||||
uint32_t numberOfMeshes = (uint32_t)hfmModel.meshes.size();
|
||||
int numberOfMeshes = hfmModel.meshes.size();
|
||||
|
||||
_hasComputedTriangles = true;
|
||||
_triangleInfo.triangles.clear();
|
||||
|
@ -506,11 +506,11 @@ void ParticleEffectEntityRenderer::computeTriangles(const hfm::Model& hfmModel)
|
|||
float minArea = FLT_MAX;
|
||||
AABox bounds;
|
||||
|
||||
for (uint32_t i = 0; i < numberOfMeshes; i++) {
|
||||
for (int i = 0; i < numberOfMeshes; i++) {
|
||||
const HFMMesh& mesh = hfmModel.meshes.at(i);
|
||||
|
||||
const uint32_t numberOfParts = (uint32_t)mesh.parts.size();
|
||||
for (uint32_t j = 0; j < numberOfParts; j++) {
|
||||
const int numberOfParts = mesh.parts.size();
|
||||
for (int j = 0; j < numberOfParts; j++) {
|
||||
const HFMMeshPart& part = mesh.parts.at(j);
|
||||
|
||||
const int INDICES_PER_TRIANGLE = 3;
|
||||
|
|
|
@ -89,7 +89,7 @@ private:
|
|||
} _triangleInfo;
|
||||
|
||||
static CpuParticle createParticle(uint64_t now, const Transform& baseTransform, const particle::Properties& particleProperties,
|
||||
const ShapeType& shapeType, const ModelResource::Pointer& geometryResource,
|
||||
const ShapeType& shapeType, const GeometryResource::Pointer& geometryResource,
|
||||
const TriangleInfo& triangleInfo);
|
||||
void stepSimulation();
|
||||
|
||||
|
@ -108,7 +108,7 @@ private:
|
|||
QString _compoundShapeURL;
|
||||
|
||||
void fetchGeometryResource();
|
||||
ModelResource::Pointer _geometryResource;
|
||||
GeometryResource::Pointer _geometryResource;
|
||||
|
||||
NetworkTexturePointer _networkTexture;
|
||||
ScenePointer _scene;
|
||||
|
|
|
@ -1429,13 +1429,14 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
|
|||
|
||||
QtConcurrent::run([entity, voxelSurfaceStyle, voxelVolumeSize, mesh] {
|
||||
auto polyVoxEntity = std::static_pointer_cast<RenderablePolyVoxEntityItem>(entity);
|
||||
ShapeInfo::PointCollection pointCollection;
|
||||
QVector<QVector<glm::vec3>> pointCollection;
|
||||
AABox box;
|
||||
glm::mat4 vtoM = std::static_pointer_cast<RenderablePolyVoxEntityItem>(entity)->voxelToLocalMatrix();
|
||||
|
||||
if (voxelSurfaceStyle == PolyVoxEntityItem::SURFACE_MARCHING_CUBES ||
|
||||
voxelSurfaceStyle == PolyVoxEntityItem::SURFACE_EDGED_MARCHING_CUBES) {
|
||||
// pull each triangle in the mesh into a polyhedron which can be collided with
|
||||
unsigned int i = 0;
|
||||
|
||||
const gpu::BufferView& vertexBufferView = mesh->getVertexBuffer();
|
||||
const gpu::BufferView& indexBufferView = mesh->getIndexBuffer();
|
||||
|
@ -1464,16 +1465,19 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
|
|||
box += p2Model;
|
||||
box += p3Model;
|
||||
|
||||
ShapeInfo::PointList pointsInPart;
|
||||
pointsInPart.push_back(p0Model);
|
||||
pointsInPart.push_back(p1Model);
|
||||
pointsInPart.push_back(p2Model);
|
||||
pointsInPart.push_back(p3Model);
|
||||
|
||||
// add points to a new convex hull
|
||||
pointCollection.push_back(pointsInPart);
|
||||
QVector<glm::vec3> pointsInPart;
|
||||
pointsInPart << p0Model;
|
||||
pointsInPart << p1Model;
|
||||
pointsInPart << p2Model;
|
||||
pointsInPart << p3Model;
|
||||
// add next convex hull
|
||||
QVector<glm::vec3> newMeshPoints;
|
||||
pointCollection << newMeshPoints;
|
||||
// add points to the new convex hull
|
||||
pointCollection[i++] << pointsInPart;
|
||||
}
|
||||
} else {
|
||||
unsigned int i = 0;
|
||||
polyVoxEntity->forEachVoxelValue(voxelVolumeSize, [&](const ivec3& v, uint8_t value) {
|
||||
if (value > 0) {
|
||||
const auto& x = v.x;
|
||||
|
@ -1492,7 +1496,7 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
|
|||
return;
|
||||
}
|
||||
|
||||
ShapeInfo::PointList pointsInPart;
|
||||
QVector<glm::vec3> pointsInPart;
|
||||
|
||||
float offL = -0.5f;
|
||||
float offH = 0.5f;
|
||||
|
@ -1519,17 +1523,20 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
|
|||
box += p110;
|
||||
box += p111;
|
||||
|
||||
pointsInPart.push_back(p000);
|
||||
pointsInPart.push_back(p001);
|
||||
pointsInPart.push_back(p010);
|
||||
pointsInPart.push_back(p011);
|
||||
pointsInPart.push_back(p100);
|
||||
pointsInPart.push_back(p101);
|
||||
pointsInPart.push_back(p110);
|
||||
pointsInPart.push_back(p111);
|
||||
pointsInPart << p000;
|
||||
pointsInPart << p001;
|
||||
pointsInPart << p010;
|
||||
pointsInPart << p011;
|
||||
pointsInPart << p100;
|
||||
pointsInPart << p101;
|
||||
pointsInPart << p110;
|
||||
pointsInPart << p111;
|
||||
|
||||
// add points to a new convex hull
|
||||
pointCollection.push_back(pointsInPart);
|
||||
// add next convex hull
|
||||
QVector<glm::vec3> newMeshPoints;
|
||||
pointCollection << newMeshPoints;
|
||||
// add points to the new convex hull
|
||||
pointCollection[i++] << pointsInPart;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -1539,7 +1546,7 @@ void RenderablePolyVoxEntityItem::computeShapeInfoWorker() {
|
|||
|
||||
void RenderablePolyVoxEntityItem::setCollisionPoints(ShapeInfo::PointCollection pointCollection, AABox box) {
|
||||
// this catches the payload from computeShapeInfoWorker
|
||||
if (pointCollection.empty()) {
|
||||
if (pointCollection.isEmpty()) {
|
||||
EntityItem::computeShapeInfo(_shapeInfo);
|
||||
withWriteLock([&] {
|
||||
_shapeReady = true;
|
||||
|
|
|
@ -536,6 +536,7 @@ EntityPropertyFlags EntityItemProperties::getChangedProperties() const {
|
|||
CHECK_PROPERTY_CHANGE(PROP_JOINT_TRANSLATIONS, jointTranslations);
|
||||
CHECK_PROPERTY_CHANGE(PROP_RELAY_PARENT_JOINTS, relayParentJoints);
|
||||
CHECK_PROPERTY_CHANGE(PROP_GROUP_CULLED, groupCulled);
|
||||
CHECK_PROPERTY_CHANGE(PROP_BLENDSHAPE_COEFFICIENTS, blendshapeCoefficients);
|
||||
changedProperties += _animation.getChangedProperties();
|
||||
|
||||
// Light
|
||||
|
@ -788,9 +789,9 @@ EntityPropertyFlags EntityItemProperties::getChangedProperties() const {
|
|||
* unnecessary entity server updates. Scripts should not change this property's value.
|
||||
*
|
||||
* @property {string} actionData="" - Base-64 encoded compressed dump of the actions associated with the entity. This property
|
||||
* is typically not used in scripts directly; rather, functions that manipulate an entity's actions update it, e.g.,
|
||||
* {@link Entities.addAction}. The size of this property increases with the number of actions. Because this property value
|
||||
* has to fit within a High Fidelity datagram packet, there is a limit to the number of actions that an entity can have;
|
||||
* is typically not used in scripts directly; rather, functions that manipulate an entity's actions update it, e.g.,
|
||||
* {@link Entities.addAction}. The size of this property increases with the number of actions. Because this property value
|
||||
* has to fit within a Vircadia datagram packet, there is a limit to the number of actions that an entity can have;
|
||||
* edits which would result in overflow are rejected. <em>Read-only.</em>
|
||||
* @property {Entities.RenderInfo} renderInfo - Information on the cost of rendering the entity. Currently information is only
|
||||
* provided for <code>Model</code> entities. <em>Read-only.</em>
|
||||
|
@ -996,6 +997,9 @@ EntityPropertyFlags EntityItemProperties::getChangedProperties() const {
|
|||
* compressed in GZ format, in which case the URL ends in ".gz".
|
||||
* @property {Vec3} modelScale - The scale factor applied to the model's dimensions.
|
||||
* <p class="important">Deprecated: This property is deprecated and will be removed.</p>
|
||||
* @property {string} blendshapeCoefficients - A JSON string of a map of blendshape names to values. Only stores set values.
|
||||
* When editing this property, only coefficients that you are editing will change; it will not explicitly reset other
|
||||
* coefficients.
|
||||
* @property {string} textures="" - A JSON string of texture name, URL pairs used when rendering the model in place of the
|
||||
* model's original textures. Use a texture name from the <code>originalTextures</code> property to override that texture.
|
||||
* Only the texture names and URLs to be overridden need be specified; original textures are used where there are no
|
||||
|
@ -1230,8 +1234,8 @@ EntityPropertyFlags EntityItemProperties::getChangedProperties() const {
|
|||
* @property {string} voxelData="ABAAEAAQAAAAHgAAEAB42u3BAQ0AAADCoPdPbQ8HFAAAAPBuEAAAAQ==" - Base-64 encoded compressed dump of
|
||||
* the PolyVox data. This property is typically not used in scripts directly; rather, functions that manipulate a PolyVox
|
||||
* entity update it.
|
||||
* <p>The size of this property increases with the size and complexity of the PolyVox entity, with the size depending on how
|
||||
* the particular entity's voxels compress. Because this property value has to fit within a High Fidelity datagram packet,
|
||||
* <p>The size of this property increases with the size and complexity of the PolyVox entity, with the size depending on how
|
||||
* the particular entity's voxels compress. Because this property value has to fit within a Vircadia datagram packet,
|
||||
* there is a limit to the size and complexity of a PolyVox entity; edits which would result in an overflow are rejected.</p>
|
||||
* @property {Entities.PolyVoxSurfaceStyle} voxelSurfaceStyle=2 - The style of rendering the voxels' surface and how
|
||||
* neighboring PolyVox entities are joined.
|
||||
|
@ -1723,6 +1727,7 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
|
|||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_JOINT_TRANSLATIONS, jointTranslations);
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_RELAY_PARENT_JOINTS, relayParentJoints);
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_GROUP_CULLED, groupCulled);
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_BLENDSHAPE_COEFFICIENTS, blendshapeCoefficients);
|
||||
if (!psuedoPropertyFlagsButDesiredEmpty) {
|
||||
_animation.copyToScriptValue(_desiredProperties, properties, engine, skipDefaults, defaultEntityProperties);
|
||||
}
|
||||
|
@ -2130,6 +2135,7 @@ void EntityItemProperties::copyFromScriptValue(const QScriptValue& object, bool
|
|||
COPY_PROPERTY_FROM_QSCRIPTVALUE(jointTranslations, qVectorVec3, setJointTranslations);
|
||||
COPY_PROPERTY_FROM_QSCRIPTVALUE(relayParentJoints, bool, setRelayParentJoints);
|
||||
COPY_PROPERTY_FROM_QSCRIPTVALUE(groupCulled, bool, setGroupCulled);
|
||||
COPY_PROPERTY_FROM_QSCRIPTVALUE(blendshapeCoefficients, QString, setBlendshapeCoefficients);
|
||||
_animation.copyFromScriptValue(object, _defaultSettings);
|
||||
|
||||
// Light
|
||||
|
@ -2420,6 +2426,7 @@ void EntityItemProperties::merge(const EntityItemProperties& other) {
|
|||
COPY_PROPERTY_IF_CHANGED(jointTranslations);
|
||||
COPY_PROPERTY_IF_CHANGED(relayParentJoints);
|
||||
COPY_PROPERTY_IF_CHANGED(groupCulled);
|
||||
COPY_PROPERTY_IF_CHANGED(blendshapeCoefficients);
|
||||
_animation.merge(other._animation);
|
||||
|
||||
// Light
|
||||
|
@ -2774,6 +2781,7 @@ bool EntityItemProperties::getPropertyInfo(const QString& propertyName, EntityPr
|
|||
ADD_PROPERTY_TO_MAP(PROP_JOINT_TRANSLATIONS, JointTranslations, jointTranslations, QVector<vec3>);
|
||||
ADD_PROPERTY_TO_MAP(PROP_RELAY_PARENT_JOINTS, RelayParentJoints, relayParentJoints, bool);
|
||||
ADD_PROPERTY_TO_MAP(PROP_GROUP_CULLED, GroupCulled, groupCulled, bool);
|
||||
ADD_PROPERTY_TO_MAP(PROP_BLENDSHAPE_COEFFICIENTS, BlendshapeCoefficients, blendshapeCoefficients, QString);
|
||||
{ // Animation
|
||||
ADD_GROUP_PROPERTY_TO_MAP(PROP_ANIMATION_URL, Animation, animation, URL, url);
|
||||
ADD_GROUP_PROPERTY_TO_MAP(PROP_ANIMATION_ALLOW_TRANSLATION, Animation, animation, AllowTranslation, allowTranslation);
|
||||
|
@ -3213,6 +3221,7 @@ OctreeElement::AppendState EntityItemProperties::encodeEntityEditPacket(PacketTy
|
|||
APPEND_ENTITY_PROPERTY(PROP_JOINT_TRANSLATIONS, properties.getJointTranslations());
|
||||
APPEND_ENTITY_PROPERTY(PROP_RELAY_PARENT_JOINTS, properties.getRelayParentJoints());
|
||||
APPEND_ENTITY_PROPERTY(PROP_GROUP_CULLED, properties.getGroupCulled());
|
||||
APPEND_ENTITY_PROPERTY(PROP_BLENDSHAPE_COEFFICIENTS, properties.getBlendshapeCoefficients());
|
||||
|
||||
_staticAnimation.setProperties(properties);
|
||||
_staticAnimation.appendToEditPacket(packetData, requestedProperties, propertyFlags, propertiesDidntFit, propertyCount, appendState);
|
||||
|
@ -3700,6 +3709,7 @@ bool EntityItemProperties::decodeEntityEditPacket(const unsigned char* data, int
|
|||
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_JOINT_TRANSLATIONS, QVector<vec3>, setJointTranslations);
|
||||
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_RELAY_PARENT_JOINTS, bool, setRelayParentJoints);
|
||||
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_GROUP_CULLED, bool, setGroupCulled);
|
||||
READ_ENTITY_PROPERTY_TO_PROPERTIES(PROP_BLENDSHAPE_COEFFICIENTS, QString, setBlendshapeCoefficients);
|
||||
|
||||
properties.getAnimation().decodeFromEditPacket(propertyFlags, dataAt, processedBytes);
|
||||
}
|
||||
|
@ -4107,6 +4117,7 @@ void EntityItemProperties::markAllChanged() {
|
|||
_jointTranslationsChanged = true;
|
||||
_relayParentJointsChanged = true;
|
||||
_groupCulledChanged = true;
|
||||
_blendshapeCoefficientsChanged = true;
|
||||
_animation.markAllChanged();
|
||||
|
||||
// Light
|
||||
|
@ -4675,6 +4686,9 @@ QList<QString> EntityItemProperties::listChangedProperties() {
|
|||
if (groupCulledChanged()) {
|
||||
out += "groupCulled";
|
||||
}
|
||||
if (blendshapeCoefficientsChanged()) {
|
||||
out += "blendshapeCoefficients";
|
||||
}
|
||||
getAnimation().listChangedProperties(out);
|
||||
|
||||
// Light
|
||||
|
@ -5091,7 +5105,7 @@ bool EntityItemProperties::verifySignature(const QString& publicKey, const QByte
|
|||
|
||||
bool EntityItemProperties::verifyStaticCertificateProperties() {
|
||||
// True IFF a non-empty certificateID matches the static certificate json.
|
||||
// I.e., if we can verify that the certificateID was produced by High Fidelity signing the static certificate hash.
|
||||
// I.e., if we can verify that the certificateID was produced by Vircadia signing the static certificate hash.
|
||||
return verifySignature(EntityItem::_marketplacePublicKey, getStaticCertificateHash(), QByteArray::fromBase64(getCertificateID().toUtf8()));
|
||||
}
|
||||
|
||||
|
|
|
@ -300,6 +300,7 @@ public:
|
|||
DEFINE_PROPERTY_REF(PROP_JOINT_TRANSLATIONS, JointTranslations, jointTranslations, QVector<glm::vec3>, ENTITY_ITEM_DEFAULT_EMPTY_VEC3_QVEC);
|
||||
DEFINE_PROPERTY(PROP_RELAY_PARENT_JOINTS, RelayParentJoints, relayParentJoints, bool, ENTITY_ITEM_DEFAULT_RELAY_PARENT_JOINTS);
|
||||
DEFINE_PROPERTY_REF(PROP_GROUP_CULLED, GroupCulled, groupCulled, bool, false);
|
||||
DEFINE_PROPERTY_REF(PROP_BLENDSHAPE_COEFFICIENTS, BlendshapeCoefficients, blendshapeCoefficients, QString, "");
|
||||
DEFINE_PROPERTY_GROUP(Animation, animation, AnimationPropertyGroup);
|
||||
|
||||
// Light
|
||||
|
|
|
@ -216,16 +216,17 @@ enum EntityPropertyList {
|
|||
PROP_JOINT_TRANSLATIONS = PROP_DERIVED_5,
|
||||
PROP_RELAY_PARENT_JOINTS = PROP_DERIVED_6,
|
||||
PROP_GROUP_CULLED = PROP_DERIVED_7,
|
||||
PROP_BLENDSHAPE_COEFFICIENTS = PROP_DERIVED_8,
|
||||
// Animation
|
||||
PROP_ANIMATION_URL = PROP_DERIVED_8,
|
||||
PROP_ANIMATION_ALLOW_TRANSLATION = PROP_DERIVED_9,
|
||||
PROP_ANIMATION_FPS = PROP_DERIVED_10,
|
||||
PROP_ANIMATION_FRAME_INDEX = PROP_DERIVED_11,
|
||||
PROP_ANIMATION_PLAYING = PROP_DERIVED_12,
|
||||
PROP_ANIMATION_LOOP = PROP_DERIVED_13,
|
||||
PROP_ANIMATION_FIRST_FRAME = PROP_DERIVED_14,
|
||||
PROP_ANIMATION_LAST_FRAME = PROP_DERIVED_15,
|
||||
PROP_ANIMATION_HOLD = PROP_DERIVED_16,
|
||||
PROP_ANIMATION_URL = PROP_DERIVED_9,
|
||||
PROP_ANIMATION_ALLOW_TRANSLATION = PROP_DERIVED_10,
|
||||
PROP_ANIMATION_FPS = PROP_DERIVED_11,
|
||||
PROP_ANIMATION_FRAME_INDEX = PROP_DERIVED_12,
|
||||
PROP_ANIMATION_PLAYING = PROP_DERIVED_13,
|
||||
PROP_ANIMATION_LOOP = PROP_DERIVED_14,
|
||||
PROP_ANIMATION_FIRST_FRAME = PROP_DERIVED_15,
|
||||
PROP_ANIMATION_LAST_FRAME = PROP_DERIVED_16,
|
||||
PROP_ANIMATION_HOLD = PROP_DERIVED_17,
|
||||
|
||||
// Light
|
||||
PROP_IS_SPOTLIGHT = PROP_DERIVED_0,
|
||||
|
|
|
@ -3194,21 +3194,30 @@ glm::vec3 EntityTree::getUnscaledDimensionsForID(const QUuid& id) {
|
|||
return glm::vec3(1.0f);
|
||||
}
|
||||
|
||||
void EntityTree::updateEntityQueryAACubeWorker(SpatiallyNestablePointer object, EntityEditPacketSender* packetSender,
|
||||
AACube EntityTree::updateEntityQueryAACubeWorker(SpatiallyNestablePointer object, EntityEditPacketSender* packetSender,
|
||||
MovingEntitiesOperator& moveOperator, bool force, bool tellServer) {
|
||||
glm::vec3 min(FLT_MAX);
|
||||
glm::vec3 max(-FLT_MAX);
|
||||
|
||||
// if the queryBox has changed, tell the entity-server
|
||||
EntityItemPointer entity = std::dynamic_pointer_cast<EntityItem>(object);
|
||||
if (entity) {
|
||||
bool queryAACubeChanged = false;
|
||||
if (!entity->hasChildren()) {
|
||||
// updateQueryAACube will also update all ancestors' AACubes, so we only need to call this for leaf nodes
|
||||
queryAACubeChanged = entity->updateQueryAACube();
|
||||
queryAACubeChanged = entity->updateQueryAACube(false);
|
||||
AACube entityAACube = entity->getQueryAACube();
|
||||
min = glm::min(min, entityAACube.getMinimumPoint());
|
||||
max = glm::max(max, entityAACube.getMaximumPoint());
|
||||
} else {
|
||||
AACube oldCube = entity->getQueryAACube();
|
||||
object->forEachChild([&](SpatiallyNestablePointer descendant) {
|
||||
updateEntityQueryAACubeWorker(descendant, packetSender, moveOperator, force, tellServer);
|
||||
AACube entityAACube = updateEntityQueryAACubeWorker(descendant, packetSender, moveOperator, force, tellServer);
|
||||
min = glm::min(min, entityAACube.getMinimumPoint());
|
||||
max = glm::max(max, entityAACube.getMaximumPoint());
|
||||
});
|
||||
queryAACubeChanged = oldCube != entity->getQueryAACube();
|
||||
queryAACubeChanged = entity->updateQueryAACubeWithDescendantAACube(AACube(Extents(min, max)), false);
|
||||
AACube newCube = entity->getQueryAACube();
|
||||
min = glm::min(min, newCube.getMinimumPoint());
|
||||
max = glm::max(max, newCube.getMaximumPoint());
|
||||
}
|
||||
|
||||
if (queryAACubeChanged || force) {
|
||||
|
@ -3217,9 +3226,10 @@ void EntityTree::updateEntityQueryAACubeWorker(SpatiallyNestablePointer object,
|
|||
if (success) {
|
||||
moveOperator.addEntityToMoveList(entity, newCube);
|
||||
}
|
||||
// send an edit packet to update the entity-server about the queryAABox. We do this for domain-hosted
|
||||
// entities as well as for avatar-entities; the packet-sender will route the update accordingly
|
||||
if (tellServer && packetSender && (entity->isDomainEntity() || entity->isAvatarEntity())) {
|
||||
// send an edit packet to update the entity-server about the queryAABox. We only do this for domain-hosted
|
||||
// entities, as we don't want to flood the update pipeline with AvatarEntity updates, so we assume
|
||||
// others have all info required to properly update queryAACube of AvatarEntities on their end
|
||||
if (tellServer && packetSender && entity->isDomainEntity()) {
|
||||
quint64 now = usecTimestampNow();
|
||||
EntityItemProperties properties = entity->getProperties();
|
||||
properties.setQueryAACubeDirty();
|
||||
|
@ -3234,7 +3244,16 @@ void EntityTree::updateEntityQueryAACubeWorker(SpatiallyNestablePointer object,
|
|||
entity->markDirtyFlags(Simulation::DIRTY_POSITION);
|
||||
entityChanged(entity);
|
||||
}
|
||||
} else {
|
||||
// if we're called on a non-entity, we might still have entity descendants
|
||||
object->forEachChild([&](SpatiallyNestablePointer descendant) {
|
||||
AACube entityAACube = updateEntityQueryAACubeWorker(descendant, packetSender, moveOperator, force, tellServer);
|
||||
min = glm::min(min, entityAACube.getMinimumPoint());
|
||||
max = glm::max(max, entityAACube.getMaximumPoint());
|
||||
});
|
||||
}
|
||||
|
||||
return AACube(Extents(min, max));
|
||||
}
|
||||
|
||||
void EntityTree::updateEntityQueryAACube(SpatiallyNestablePointer object, EntityEditPacketSender* packetSender,
|
||||
|
|
|
@ -400,8 +400,9 @@ private:
|
|||
|
||||
std::map<QString, QString> _namedPaths;
|
||||
|
||||
void updateEntityQueryAACubeWorker(SpatiallyNestablePointer object, EntityEditPacketSender* packetSender,
|
||||
MovingEntitiesOperator& moveOperator, bool force, bool tellServer);
|
||||
// Return an AACube containing object and all its entity descendants
|
||||
AACube updateEntityQueryAACubeWorker(SpatiallyNestablePointer object, EntityEditPacketSender* packetSender,
|
||||
MovingEntitiesOperator& moveOperator, bool force, bool tellServer);
|
||||
};
|
||||
|
||||
void convertGrabUserDataToProperties(EntityItemProperties& properties);
|
||||
|
|
|
@ -33,7 +33,8 @@ EntityItemPointer ModelEntityItem::factory(const EntityItemID& entityID, const E
|
|||
return entity;
|
||||
}
|
||||
|
||||
ModelEntityItem::ModelEntityItem(const EntityItemID& entityItemID) : EntityItem(entityItemID)
|
||||
ModelEntityItem::ModelEntityItem(const EntityItemID& entityItemID) : EntityItem(entityItemID),
|
||||
_blendshapeCoefficientsVector((int)Blendshapes::BlendshapeCount, 0.0f)
|
||||
{
|
||||
_lastAnimated = usecTimestampNow();
|
||||
// set the last animated when interface (re)starts
|
||||
|
@ -71,6 +72,7 @@ EntityItemProperties ModelEntityItem::getProperties(const EntityPropertyFlags& d
|
|||
COPY_ENTITY_PROPERTY_TO_PROPERTIES(jointTranslations, getJointTranslations);
|
||||
COPY_ENTITY_PROPERTY_TO_PROPERTIES(relayParentJoints, getRelayParentJoints);
|
||||
COPY_ENTITY_PROPERTY_TO_PROPERTIES(groupCulled, getGroupCulled);
|
||||
COPY_ENTITY_PROPERTY_TO_PROPERTIES(blendshapeCoefficients, getBlendshapeCoefficients);
|
||||
withReadLock([&] {
|
||||
_animationProperties.getProperties(properties);
|
||||
});
|
||||
|
@ -94,6 +96,7 @@ bool ModelEntityItem::setProperties(const EntityItemProperties& properties) {
|
|||
SET_ENTITY_PROPERTY_FROM_PROPERTIES(jointTranslations, setJointTranslations);
|
||||
SET_ENTITY_PROPERTY_FROM_PROPERTIES(relayParentJoints, setRelayParentJoints);
|
||||
SET_ENTITY_PROPERTY_FROM_PROPERTIES(groupCulled, setGroupCulled);
|
||||
SET_ENTITY_PROPERTY_FROM_PROPERTIES(blendshapeCoefficients, setBlendshapeCoefficients);
|
||||
|
||||
withWriteLock([&] {
|
||||
AnimationPropertyGroup animationProperties = _animationProperties;
|
||||
|
@ -138,6 +141,7 @@ int ModelEntityItem::readEntitySubclassDataFromBuffer(const unsigned char* data,
|
|||
READ_ENTITY_PROPERTY(PROP_JOINT_TRANSLATIONS, QVector<glm::vec3>, setJointTranslations);
|
||||
READ_ENTITY_PROPERTY(PROP_RELAY_PARENT_JOINTS, bool, setRelayParentJoints);
|
||||
READ_ENTITY_PROPERTY(PROP_GROUP_CULLED, bool, setGroupCulled);
|
||||
READ_ENTITY_PROPERTY(PROP_BLENDSHAPE_COEFFICIENTS, QString, setBlendshapeCoefficients);
|
||||
|
||||
// grab a local copy of _animationProperties to avoid multiple locks
|
||||
int bytesFromAnimation;
|
||||
|
@ -176,6 +180,7 @@ EntityPropertyFlags ModelEntityItem::getEntityProperties(EncodeBitstreamParams&
|
|||
requestedProperties += PROP_JOINT_TRANSLATIONS;
|
||||
requestedProperties += PROP_RELAY_PARENT_JOINTS;
|
||||
requestedProperties += PROP_GROUP_CULLED;
|
||||
requestedProperties += PROP_BLENDSHAPE_COEFFICIENTS;
|
||||
requestedProperties += _animationProperties.getEntityProperties(params);
|
||||
|
||||
return requestedProperties;
|
||||
|
@ -204,6 +209,7 @@ void ModelEntityItem::appendSubclassData(OctreePacketData* packetData, EncodeBit
|
|||
APPEND_ENTITY_PROPERTY(PROP_JOINT_TRANSLATIONS, getJointTranslations());
|
||||
APPEND_ENTITY_PROPERTY(PROP_RELAY_PARENT_JOINTS, getRelayParentJoints());
|
||||
APPEND_ENTITY_PROPERTY(PROP_GROUP_CULLED, getGroupCulled());
|
||||
APPEND_ENTITY_PROPERTY(PROP_BLENDSHAPE_COEFFICIENTS, getBlendshapeCoefficients());
|
||||
|
||||
withReadLock([&] {
|
||||
_animationProperties.appendSubclassData(packetData, params, entityTreeElementExtraEncodeData, requestedProperties,
|
||||
|
@ -256,6 +262,7 @@ void ModelEntityItem::debugDump() const {
|
|||
qCDebug(entities) << " dimensions:" << getScaledDimensions();
|
||||
qCDebug(entities) << " model URL:" << getModelURL();
|
||||
qCDebug(entities) << " compound shape URL:" << getCompoundShapeURL();
|
||||
qCDebug(entities) << " blendshapeCoefficients:" << getBlendshapeCoefficients();
|
||||
}
|
||||
|
||||
void ModelEntityItem::setShapeType(ShapeType type) {
|
||||
|
@ -743,3 +750,39 @@ void ModelEntityItem::setModelScale(const glm::vec3& modelScale) {
|
|||
_modelScale = modelScale;
|
||||
});
|
||||
}
|
||||
|
||||
QString ModelEntityItem::getBlendshapeCoefficients() const {
|
||||
return resultWithReadLock<QString>([&] {
|
||||
return QJsonDocument::fromVariant(_blendshapeCoefficientsMap).toJson();
|
||||
});
|
||||
}
|
||||
|
||||
void ModelEntityItem::setBlendshapeCoefficients(const QString& blendshapeCoefficients) {
|
||||
QJsonParseError error;
|
||||
QJsonDocument newCoefficientsJSON = QJsonDocument::fromJson(blendshapeCoefficients.toUtf8(), &error);
|
||||
if (error.error != QJsonParseError::NoError) {
|
||||
qWarning() << "Could not evaluate blendshapeCoefficients property value:" << newCoefficientsJSON;
|
||||
return;
|
||||
}
|
||||
|
||||
QVariantMap newCoefficientsMap = newCoefficientsJSON.toVariant().toMap();
|
||||
withWriteLock([&] {
|
||||
for (auto& blendshape : newCoefficientsMap.keys()) {
|
||||
auto newCoefficient = newCoefficientsMap[blendshape];
|
||||
auto blendshapeIter = BLENDSHAPE_LOOKUP_MAP.find(blendshape);
|
||||
if (newCoefficient.canConvert<float>() && blendshapeIter != BLENDSHAPE_LOOKUP_MAP.end()) {
|
||||
float newCoefficientValue = newCoefficient.toFloat();
|
||||
_blendshapeCoefficientsVector[blendshapeIter.value()] = newCoefficientValue;
|
||||
_blendshapeCoefficientsMap[blendshape] = newCoefficientValue;
|
||||
_blendshapesChanged = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
QVector<float> ModelEntityItem::getBlendshapeCoefficientVector() {
|
||||
return resultWithReadLock<QVector<float>>([&] {
|
||||
_blendshapesChanged = false; // ok to change this within read lock here
|
||||
return _blendshapeCoefficientsVector;
|
||||
});
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <ThreadSafeValueCache.h>
|
||||
#include "AnimationPropertyGroup.h"
|
||||
|
||||
#include "BlendshapeConstants.h"
|
||||
|
||||
class ModelEntityItem : public EntityItem {
|
||||
public:
|
||||
|
@ -133,6 +134,11 @@ public:
|
|||
glm::vec3 getModelScale() const;
|
||||
void setModelScale(const glm::vec3& modelScale);
|
||||
|
||||
QString getBlendshapeCoefficients() const;
|
||||
void setBlendshapeCoefficients(const QString& blendshapeCoefficients);
|
||||
bool blendshapesChanged() const { return _blendshapesChanged; }
|
||||
QVector<float> getBlendshapeCoefficientVector();
|
||||
|
||||
private:
|
||||
void setAnimationSettings(const QString& value); // only called for old bitstream format
|
||||
bool applyNewAnimationProperties(AnimationPropertyGroup newProperties);
|
||||
|
@ -166,6 +172,7 @@ protected:
|
|||
QString _modelURL;
|
||||
bool _relayParentJoints;
|
||||
bool _groupCulled { false };
|
||||
QVariantMap _blendshapeCoefficientsMap;
|
||||
|
||||
ThreadSafeValueCache<QString> _compoundShapeURL;
|
||||
|
||||
|
@ -178,6 +185,9 @@ protected:
|
|||
private:
|
||||
uint64_t _lastAnimated{ 0 };
|
||||
float _currentFrame{ -1.0f };
|
||||
|
||||
QVector<float> _blendshapeCoefficientsVector;
|
||||
bool _blendshapesChanged { false };
|
||||
};
|
||||
|
||||
#endif // hifi_ModelEntityItem_h
|
||||
|
|
|
@ -350,7 +350,7 @@ bool ZoneEntityItem::findDetailedParabolaIntersection(const glm::vec3& origin, c
|
|||
}
|
||||
|
||||
bool ZoneEntityItem::contains(const glm::vec3& point) const {
|
||||
ModelResource::Pointer resource = _shapeResource;
|
||||
GeometryResource::Pointer resource = _shapeResource;
|
||||
if (_shapeType == SHAPE_TYPE_COMPOUND && resource) {
|
||||
if (resource->isLoaded()) {
|
||||
const HFMModel& hfmModel = resource->getHFMModel();
|
||||
|
@ -467,7 +467,7 @@ void ZoneEntityItem::fetchCollisionGeometryResource() {
|
|||
if (hullURL.isEmpty()) {
|
||||
_shapeResource.reset();
|
||||
} else {
|
||||
_shapeResource = DependencyManager::get<ModelCache>()->getCollisionModelResource(hullURL);
|
||||
_shapeResource = DependencyManager::get<ModelCache>()->getCollisionGeometryResource(hullURL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ protected:
|
|||
static bool _zonesArePickable;
|
||||
|
||||
void fetchCollisionGeometryResource();
|
||||
ModelResource::Pointer _shapeResource;
|
||||
GeometryResource::Pointer _shapeResource;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <BlendshapeConstants.h>
|
||||
|
||||
#include <hfm/ModelFormatLogging.h>
|
||||
#include <hfm/HFMModelMath.h>
|
||||
|
||||
// TOOL: Uncomment the following line to enable the filtering of all the unkwnon fields of a node so we can break point easily while loading a model with problems...
|
||||
//#define DEBUG_FBXSERIALIZER
|
||||
|
@ -146,9 +145,8 @@ public:
|
|||
bool isLimbNode; // is this FBXModel transform is a "LimbNode" i.e. a joint
|
||||
};
|
||||
|
||||
|
||||
glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParentMap,
|
||||
const QHash<QString, FBXModel>& fbxModels, QString nodeID, bool mixamoHack, const QString& url) {
|
||||
const QHash<QString, FBXModel>& fbxModels, QString nodeID, bool mixamoHack, const QString& url) {
|
||||
glm::mat4 globalTransform;
|
||||
QVector<QString> visitedNodes; // Used to prevent following a cycle
|
||||
while (!nodeID.isNull()) {
|
||||
|
@ -168,11 +166,12 @@ glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParen
|
|||
}
|
||||
QList<QString> parentIDs = _connectionParentMap.values(nodeID);
|
||||
nodeID = QString();
|
||||
foreach(const QString& parentID, parentIDs) {
|
||||
foreach (const QString& parentID, parentIDs) {
|
||||
if (visitedNodes.contains(parentID)) {
|
||||
qCWarning(modelformat) << "Ignoring loop detected in FBX connection map for" << url;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (fbxModels.contains(parentID)) {
|
||||
nodeID = parentID;
|
||||
break;
|
||||
|
@ -182,21 +181,6 @@ glm::mat4 getGlobalTransform(const QMultiMap<QString, QString>& _connectionParen
|
|||
return globalTransform;
|
||||
}
|
||||
|
||||
std::vector<QString> getModelIDsForMeshID(const QString& meshID, const QHash<QString, FBXModel>& fbxModels, const QMultiMap<QString, QString>& _connectionParentMap) {
|
||||
std::vector<QString> modelsForMesh;
|
||||
if (fbxModels.contains(meshID)) {
|
||||
modelsForMesh.push_back(meshID);
|
||||
} else {
|
||||
// This mesh may have more than one parent model, with different material and transform, representing a different instance of the mesh
|
||||
for (const auto& parentID : _connectionParentMap.values(meshID)) {
|
||||
if (fbxModels.contains(parentID)) {
|
||||
modelsForMesh.push_back(parentID);
|
||||
}
|
||||
}
|
||||
}
|
||||
return modelsForMesh;
|
||||
}
|
||||
|
||||
class ExtractedBlendshape {
|
||||
public:
|
||||
QString id;
|
||||
|
@ -420,7 +404,7 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
QVector<ExtractedBlendshape> blendshapes;
|
||||
|
||||
QHash<QString, FBXModel> fbxModels;
|
||||
QHash<QString, Cluster> fbxClusters;
|
||||
QHash<QString, Cluster> clusters;
|
||||
QHash<QString, AnimationCurve> animationCurves;
|
||||
|
||||
QHash<QString, QString> typeFlags;
|
||||
|
@ -531,8 +515,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
if (object.properties.at(2) == "Mesh") {
|
||||
meshes.insert(getID(object.properties), extractMesh(object, meshIndex, deduplicateIndices));
|
||||
} else { // object.properties.at(2) == "Shape"
|
||||
ExtractedBlendshape blendshape = { getID(object.properties), extractBlendshape(object) };
|
||||
blendshapes.append(blendshape);
|
||||
ExtractedBlendshape extracted = { getID(object.properties), extractBlendshape(object) };
|
||||
blendshapes.append(extracted);
|
||||
}
|
||||
} else if (object.name == "Model") {
|
||||
QString name = getModelName(object.properties);
|
||||
|
@ -706,8 +690,8 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
|
||||
// add the blendshapes included in the model, if any
|
||||
if (mesh) {
|
||||
foreach (const ExtractedBlendshape& blendshape, blendshapes) {
|
||||
addBlendshapes(blendshape, blendshapeIndices.values(blendshape.id.toLatin1()), *mesh);
|
||||
foreach (const ExtractedBlendshape& extracted, blendshapes) {
|
||||
addBlendshapes(extracted, blendshapeIndices.values(extracted.id.toLatin1()), *mesh);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1074,9 +1058,9 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
}
|
||||
}
|
||||
|
||||
// skip empty fbxClusters
|
||||
// skip empty clusters
|
||||
if (cluster.indices.size() > 0 && cluster.weights.size() > 0) {
|
||||
fbxClusters.insert(getID(object.properties), cluster);
|
||||
clusters.insert(getID(object.properties), cluster);
|
||||
}
|
||||
|
||||
} else if (object.properties.last() == "BlendShapeChannel") {
|
||||
|
@ -1230,11 +1214,11 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
}
|
||||
|
||||
// assign the blendshapes to their corresponding meshes
|
||||
foreach (const ExtractedBlendshape& blendshape, blendshapes) {
|
||||
QString blendshapeChannelID = _connectionParentMap.value(blendshape.id);
|
||||
foreach (const ExtractedBlendshape& extracted, blendshapes) {
|
||||
QString blendshapeChannelID = _connectionParentMap.value(extracted.id);
|
||||
QString blendshapeID = _connectionParentMap.value(blendshapeChannelID);
|
||||
QString meshID = _connectionParentMap.value(blendshapeID);
|
||||
addBlendshapes(blendshape, blendshapeChannelIndices.values(blendshapeChannelID), meshes[meshID]);
|
||||
addBlendshapes(extracted, blendshapeChannelIndices.values(blendshapeChannelID), meshes[meshID]);
|
||||
}
|
||||
|
||||
// get offset transform from mapping
|
||||
|
@ -1249,13 +1233,13 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
QVector<QString> modelIDs;
|
||||
QSet<QString> remainingFBXModels;
|
||||
for (QHash<QString, FBXModel>::const_iterator fbxModel = fbxModels.constBegin(); fbxModel != fbxModels.constEnd(); fbxModel++) {
|
||||
// models with fbxClusters must be parented to the cluster top
|
||||
// models with clusters must be parented to the cluster top
|
||||
// Unless the model is a root node.
|
||||
bool isARootNode = !modelIDs.contains(_connectionParentMap.value(fbxModel.key()));
|
||||
if (!isARootNode) {
|
||||
foreach(const QString& deformerID, _connectionChildMap.values(fbxModel.key())) {
|
||||
foreach(const QString& clusterID, _connectionChildMap.values(deformerID)) {
|
||||
if (!fbxClusters.contains(clusterID)) {
|
||||
if (!clusters.contains(clusterID)) {
|
||||
continue;
|
||||
}
|
||||
QString topID = getTopModelID(_connectionParentMap, fbxModels, _connectionChildMap.value(clusterID), url);
|
||||
|
@ -1299,18 +1283,12 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
|
||||
// convert the models to joints
|
||||
hfmModel.hasSkeletonJoints = false;
|
||||
|
||||
bool needMixamoHack = hfmModel.applicationName == "mixamo.com";
|
||||
|
||||
std::vector<glm::mat4> transformForClusters;
|
||||
transformForClusters.reserve((size_t)modelIDs.size());
|
||||
for (const QString& modelID : modelIDs) {
|
||||
foreach (const QString& modelID, modelIDs) {
|
||||
const FBXModel& fbxModel = fbxModels[modelID];
|
||||
HFMJoint joint;
|
||||
joint.parentIndex = fbxModel.parentIndex;
|
||||
uint32_t jointIndex = (uint32_t)hfmModel.joints.size();
|
||||
|
||||
// Copy default joint parameters from model
|
||||
int jointIndex = hfmModel.joints.size();
|
||||
|
||||
joint.translation = fbxModel.translation; // these are usually in centimeters
|
||||
joint.preTransform = fbxModel.preTransform;
|
||||
|
@ -1321,62 +1299,35 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
joint.rotationMin = fbxModel.rotationMin;
|
||||
joint.rotationMax = fbxModel.rotationMax;
|
||||
|
||||
if (fbxModel.hasGeometricOffset) {
|
||||
joint.geometricOffset = createMatFromScaleQuatAndPos(fbxModel.geometricScaling, fbxModel.geometricRotation, fbxModel.geometricTranslation);
|
||||
}
|
||||
joint.hasGeometricOffset = fbxModel.hasGeometricOffset;
|
||||
joint.geometricTranslation = fbxModel.geometricTranslation;
|
||||
joint.geometricRotation = fbxModel.geometricRotation;
|
||||
joint.geometricScaling = fbxModel.geometricScaling;
|
||||
joint.isSkeletonJoint = fbxModel.isLimbNode;
|
||||
hfmModel.hasSkeletonJoints = (hfmModel.hasSkeletonJoints || joint.isSkeletonJoint);
|
||||
|
||||
joint.name = fbxModel.name;
|
||||
|
||||
joint.bindTransformFoundInCluster = false;
|
||||
|
||||
// With the basic joint information, we can start to calculate compound transform information
|
||||
// modelIDs is ordered from parent to children, so we can safely get parent transforms from earlier joints as we iterate
|
||||
|
||||
// Make adjustments to the static joint properties, and pre-calculate static transforms
|
||||
|
||||
if (applyUpAxisZRotation && joint.parentIndex == -1) {
|
||||
joint.rotation *= upAxisZRotation;
|
||||
joint.translation = upAxisZRotation * joint.translation;
|
||||
}
|
||||
|
||||
glm::quat combinedRotation = joint.preRotation * joint.rotation * joint.postRotation;
|
||||
joint.localTransform = glm::translate(joint.translation) * joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform;
|
||||
|
||||
if (joint.parentIndex == -1) {
|
||||
joint.transform = joint.localTransform;
|
||||
joint.globalTransform = hfmModel.offset * joint.localTransform;
|
||||
joint.transform = hfmModel.offset * glm::translate(joint.translation) * joint.preTransform *
|
||||
glm::mat4_cast(combinedRotation) * joint.postTransform;
|
||||
joint.inverseDefaultRotation = glm::inverse(combinedRotation);
|
||||
joint.distanceToParent = 0.0f;
|
||||
|
||||
} else {
|
||||
const HFMJoint& parentJoint = hfmModel.joints.at(joint.parentIndex);
|
||||
joint.transform = parentJoint.transform * joint.localTransform;
|
||||
joint.globalTransform = parentJoint.globalTransform * joint.localTransform;
|
||||
joint.transform = parentJoint.transform * glm::translate(joint.translation) *
|
||||
joint.preTransform * glm::mat4_cast(combinedRotation) * joint.postTransform;
|
||||
joint.inverseDefaultRotation = glm::inverse(combinedRotation) * parentJoint.inverseDefaultRotation;
|
||||
joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform), extractTranslation(joint.transform));
|
||||
joint.distanceToParent = glm::distance(extractTranslation(parentJoint.transform),
|
||||
extractTranslation(joint.transform));
|
||||
}
|
||||
joint.inverseBindRotation = joint.inverseDefaultRotation;
|
||||
joint.name = fbxModel.name;
|
||||
|
||||
// If needed, separately calculate the FBX-specific transform used for inverse bind transform calculations
|
||||
|
||||
glm::mat4 transformForCluster;
|
||||
if (applyUpAxisZRotation) {
|
||||
const glm::quat jointBindCombinedRotation = fbxModel.preRotation * fbxModel.rotation * fbxModel.postRotation;
|
||||
const glm::mat4 localTransformForCluster = glm::translate(fbxModel.translation) * fbxModel.preTransform * glm::mat4_cast(jointBindCombinedRotation) * fbxModel.postTransform;
|
||||
if (fbxModel.parentIndex != -1 && fbxModel.parentIndex < (int)jointIndex && !needMixamoHack) {
|
||||
const glm::mat4& parenttransformForCluster = transformForClusters[fbxModel.parentIndex];
|
||||
transformForCluster = parenttransformForCluster * localTransformForCluster;
|
||||
} else {
|
||||
transformForCluster = localTransformForCluster;
|
||||
}
|
||||
} else {
|
||||
transformForCluster = joint.transform;
|
||||
}
|
||||
transformForClusters.push_back(transformForCluster);
|
||||
|
||||
// Initialize animation information next
|
||||
// And also get the joint poses from the first frame of the animation, if present
|
||||
joint.bindTransformFoundInCluster = false;
|
||||
|
||||
QString rotationID = localRotations.value(modelID);
|
||||
AnimationCurve xRotCurve = animationCurves.value(xComponents.value(rotationID));
|
||||
|
@ -1404,11 +1355,14 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
joint.translation = hfmModel.animationFrames[i].translations[jointIndex];
|
||||
joint.rotation = hfmModel.animationFrames[i].rotations[jointIndex];
|
||||
}
|
||||
}
|
||||
|
||||
hfmModel.joints.push_back(joint);
|
||||
}
|
||||
hfmModel.joints.append(joint);
|
||||
}
|
||||
|
||||
// NOTE: shapeVertices are in joint-frame
|
||||
hfmModel.shapeVertices.resize(std::max(1, hfmModel.joints.size()) );
|
||||
|
||||
hfmModel.bindExtents.reset();
|
||||
hfmModel.meshExtents.reset();
|
||||
|
||||
|
@ -1446,202 +1400,233 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
std::unordered_map<std::string, uint32_t> materialNameToID;
|
||||
for (auto materialIt = _hfmMaterials.cbegin(); materialIt != _hfmMaterials.cend(); ++materialIt) {
|
||||
materialNameToID[materialIt.key().toStdString()] = (uint32_t)hfmModel.materials.size();
|
||||
hfmModel.materials.push_back(materialIt.value());
|
||||
}
|
||||
hfmModel.materials = _hfmMaterials;
|
||||
|
||||
// see if any materials have texture children
|
||||
bool materialsHaveTextures = checkMaterialsHaveTextures(_hfmMaterials, _textureFilenames, _connectionChildMap);
|
||||
|
||||
for (QMap<QString, ExtractedMesh>::iterator it = meshes.begin(); it != meshes.end(); it++) {
|
||||
const QString& meshID = it.key();
|
||||
const ExtractedMesh& extracted = it.value();
|
||||
const auto& partMaterialTextures = extracted.partMaterialTextures;
|
||||
ExtractedMesh& extracted = it.value();
|
||||
|
||||
uint32_t meshIndex = (uint32_t)hfmModel.meshes.size();
|
||||
meshIDsToMeshIndices.insert(meshID, meshIndex);
|
||||
hfmModel.meshes.push_back(extracted.mesh);
|
||||
hfm::Mesh& mesh = hfmModel.meshes.back();
|
||||
extracted.mesh.meshExtents.reset();
|
||||
|
||||
std::vector<QString> instanceModelIDs = getModelIDsForMeshID(meshID, fbxModels, _connectionParentMap);
|
||||
// meshShapes will be added to hfmModel at the very end
|
||||
std::vector<hfm::Shape> meshShapes;
|
||||
meshShapes.reserve(instanceModelIDs.size() * mesh.parts.size());
|
||||
for (const QString& modelID : instanceModelIDs) {
|
||||
// The transform node has the same indexing order as the joints
|
||||
int indexOfModelID = modelIDs.indexOf(modelID);
|
||||
if (indexOfModelID == -1) {
|
||||
qCDebug(modelformat) << "Model not in model list: " << modelID;
|
||||
}
|
||||
const uint32_t transformIndex = (indexOfModelID == -1) ? 0 : (uint32_t)indexOfModelID;
|
||||
// accumulate local transforms
|
||||
QString modelID = fbxModels.contains(it.key()) ? it.key() : _connectionParentMap.value(it.key());
|
||||
glm::mat4 modelTransform = getGlobalTransform(_connectionParentMap, fbxModels, modelID, hfmModel.applicationName == "mixamo.com", url);
|
||||
|
||||
// partShapes will be added to meshShapes at the very end
|
||||
std::vector<hfm::Shape> partShapes { mesh.parts.size() };
|
||||
for (uint32_t i = 0; i < (uint32_t)partShapes.size(); ++i) {
|
||||
hfm::Shape& shape = partShapes[i];
|
||||
shape.mesh = meshIndex;
|
||||
shape.meshPart = i;
|
||||
shape.joint = transformIndex;
|
||||
}
|
||||
// compute the mesh extents from the transformed vertices
|
||||
foreach (const glm::vec3& vertex, extracted.mesh.vertices) {
|
||||
glm::vec3 transformedVertex = glm::vec3(modelTransform * glm::vec4(vertex, 1.0f));
|
||||
hfmModel.meshExtents.minimum = glm::min(hfmModel.meshExtents.minimum, transformedVertex);
|
||||
hfmModel.meshExtents.maximum = glm::max(hfmModel.meshExtents.maximum, transformedVertex);
|
||||
|
||||
// For FBX_DRACO_MESH_VERSION < 2, or unbaked models, get materials from the partMaterialTextures
|
||||
if (!partMaterialTextures.empty()) {
|
||||
int materialIndex = 0;
|
||||
int textureIndex = 0;
|
||||
QList<QString> children = _connectionChildMap.values(modelID);
|
||||
for (int i = children.size() - 1; i >= 0; i--) {
|
||||
const QString& childID = children.at(i);
|
||||
if (_hfmMaterials.contains(childID)) {
|
||||
// the pure material associated with this part
|
||||
const HFMMaterial& material = _hfmMaterials.value(childID);
|
||||
for (int j = 0; j < partMaterialTextures.size(); j++) {
|
||||
if (partMaterialTextures.at(j).first == materialIndex) {
|
||||
hfm::Shape& shape = partShapes[j];
|
||||
shape.material = materialNameToID[material.materialID.toStdString()];
|
||||
}
|
||||
}
|
||||
materialIndex++;
|
||||
} else if (_textureFilenames.contains(childID)) {
|
||||
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
|
||||
// I'm leaving the second parameter blank right now as this code may never be used.
|
||||
HFMTexture texture = getTexture(childID, "");
|
||||
for (int j = 0; j < partMaterialTextures.size(); j++) {
|
||||
int partTexture = partMaterialTextures.at(j).second;
|
||||
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
|
||||
// TODO: DO something here that replaces this legacy code
|
||||
// Maybe create a material just for this part with the correct textures?
|
||||
// material.albedoTexture = texture;
|
||||
// partShapes[j].material = materialIndex;
|
||||
}
|
||||
}
|
||||
textureIndex++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// For baked models with FBX_DRACO_MESH_VERSION >= 2, get materials from extracted.materialIDPerMeshPart
|
||||
if (!extracted.materialIDPerMeshPart.empty()) {
|
||||
assert(partShapes.size() == extracted.materialIDPerMeshPart.size());
|
||||
for (uint32_t i = 0; i < (uint32_t)extracted.materialIDPerMeshPart.size(); ++i) {
|
||||
hfm::Shape& shape = partShapes[i];
|
||||
const std::string& materialID = extracted.materialIDPerMeshPart[i];
|
||||
auto materialIt = materialNameToID.find(materialID);
|
||||
if (materialIt != materialNameToID.end()) {
|
||||
shape.material = materialIt->second;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the clusters with which the mesh is associated
|
||||
QVector<QString> clusterIDs;
|
||||
for (const QString& childID : _connectionChildMap.values(meshID)) {
|
||||
for (const QString& clusterID : _connectionChildMap.values(childID)) {
|
||||
if (!fbxClusters.contains(clusterID)) {
|
||||
continue;
|
||||
}
|
||||
clusterIDs.append(clusterID);
|
||||
}
|
||||
}
|
||||
|
||||
// whether we're skinned depends on how many clusters are attached
|
||||
if (clusterIDs.size() > 0) {
|
||||
hfm::SkinDeformer skinDeformer;
|
||||
auto& clusters = skinDeformer.clusters;
|
||||
for (const auto& clusterID : clusterIDs) {
|
||||
HFMCluster hfmCluster;
|
||||
const Cluster& fbxCluster = fbxClusters[clusterID];
|
||||
|
||||
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
|
||||
// of skinning information in FBX
|
||||
QString jointID = _connectionChildMap.value(clusterID);
|
||||
int indexOfJointID = modelIDs.indexOf(jointID);
|
||||
if (indexOfJointID == -1) {
|
||||
qCDebug(modelformat) << "Joint not in model list: " << jointID;
|
||||
hfmCluster.jointIndex = 0;
|
||||
} else {
|
||||
hfmCluster.jointIndex = (uint32_t)indexOfJointID;
|
||||
}
|
||||
|
||||
const glm::mat4& transformForCluster = transformForClusters[transformIndex];
|
||||
hfmCluster.inverseBindMatrix = glm::inverse(fbxCluster.transformLink) * transformForCluster;
|
||||
|
||||
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
|
||||
// sometimes floating point fuzz can be introduced after the inverse.
|
||||
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
|
||||
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
|
||||
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
|
||||
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
|
||||
|
||||
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
|
||||
|
||||
clusters.push_back(hfmCluster);
|
||||
|
||||
// override the bind rotation with the transform link
|
||||
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
|
||||
joint.inverseBindRotation = glm::inverse(extractRotation(fbxCluster.transformLink));
|
||||
joint.bindTransform = fbxCluster.transformLink;
|
||||
joint.bindTransformFoundInCluster = true;
|
||||
|
||||
// update the bind pose extents
|
||||
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
|
||||
hfmModel.bindExtents.addPoint(bindTranslation);
|
||||
}
|
||||
|
||||
// the last cluster is the root cluster
|
||||
HFMCluster cluster;
|
||||
cluster.jointIndex = transformIndex;
|
||||
clusters.push_back(cluster);
|
||||
|
||||
// Skinned mesh instances have an hfm::SkinDeformer
|
||||
std::vector<hfm::SkinCluster> skinClusters;
|
||||
for (const auto& clusterID : clusterIDs) {
|
||||
const Cluster& fbxCluster = fbxClusters[clusterID];
|
||||
skinClusters.emplace_back();
|
||||
hfm::SkinCluster& skinCluster = skinClusters.back();
|
||||
size_t indexWeightPairs = (size_t)std::min(fbxCluster.indices.size(), fbxCluster.weights.size());
|
||||
skinCluster.indices.reserve(indexWeightPairs);
|
||||
skinCluster.weights.reserve(indexWeightPairs);
|
||||
|
||||
for (int j = 0; j < fbxCluster.indices.size(); j++) {
|
||||
int oldIndex = fbxCluster.indices.at(j);
|
||||
float weight = fbxCluster.weights.at(j);
|
||||
for (QMultiHash<int, int>::const_iterator it = extracted.newIndices.constFind(oldIndex);
|
||||
it != extracted.newIndices.end() && it.key() == oldIndex; it++) {
|
||||
int newIndex = it.value();
|
||||
|
||||
skinCluster.indices.push_back(newIndex);
|
||||
skinCluster.weights.push_back(weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
// It seems odd that this mesh-related code should be inside of the for loop for instanced model IDs.
|
||||
// However, in practice, skinned FBX models appear to not be instanced, as the skinning includes both the weights and joints.
|
||||
{
|
||||
hfm::ReweightedDeformers reweightedDeformers = hfm::getReweightedDeformers(mesh.vertices.size(), skinClusters);
|
||||
if (reweightedDeformers.trimmedToMatch) {
|
||||
qDebug(modelformat) << "FBXSerializer -- The number of indices and weights for a skinning deformer had different sizes and have been trimmed to match";
|
||||
}
|
||||
mesh.clusterIndices = std::move(reweightedDeformers.indices);
|
||||
mesh.clusterWeights = std::move(reweightedDeformers.weights);
|
||||
mesh.clusterWeightsPerVertex = reweightedDeformers.weightsPerVertex;
|
||||
}
|
||||
|
||||
// Store the model's dynamic transform, and put its ID in the shapes
|
||||
uint32_t skinDeformerID = (uint32_t)hfmModel.skinDeformers.size();
|
||||
hfmModel.skinDeformers.push_back(skinDeformer);
|
||||
for (hfm::Shape& shape : partShapes) {
|
||||
shape.skinDeformer = skinDeformerID;
|
||||
}
|
||||
}
|
||||
|
||||
// Store the parts for this mesh (or instance of this mesh, as the case may be)
|
||||
meshShapes.insert(meshShapes.cend(), partShapes.cbegin(), partShapes.cend());
|
||||
extracted.mesh.meshExtents.minimum = glm::min(extracted.mesh.meshExtents.minimum, transformedVertex);
|
||||
extracted.mesh.meshExtents.maximum = glm::max(extracted.mesh.meshExtents.maximum, transformedVertex);
|
||||
extracted.mesh.modelTransform = modelTransform;
|
||||
}
|
||||
|
||||
// Store the shapes for the mesh (or multiple instances of the mesh, as the case may be)
|
||||
hfmModel.shapes.insert(hfmModel.shapes.cend(), meshShapes.cbegin(), meshShapes.cend());
|
||||
// look for textures, material properties
|
||||
// allocate the Part material library
|
||||
// NOTE: extracted.partMaterialTextures is empty for FBX_DRACO_MESH_VERSION >= 2. In that case, the mesh part's materialID string is already defined.
|
||||
int materialIndex = 0;
|
||||
int textureIndex = 0;
|
||||
QList<QString> children = _connectionChildMap.values(modelID);
|
||||
for (int i = children.size() - 1; i >= 0; i--) {
|
||||
|
||||
const QString& childID = children.at(i);
|
||||
if (_hfmMaterials.contains(childID)) {
|
||||
// the pure material associated with this part
|
||||
HFMMaterial material = _hfmMaterials.value(childID);
|
||||
|
||||
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
|
||||
if (extracted.partMaterialTextures.at(j).first == materialIndex) {
|
||||
HFMMeshPart& part = extracted.mesh.parts[j];
|
||||
part.materialID = material.materialID;
|
||||
}
|
||||
}
|
||||
|
||||
materialIndex++;
|
||||
} else if (_textureFilenames.contains(childID)) {
|
||||
// NOTE (Sabrina 2019/01/11): getTextures now takes in the materialID as a second parameter, because FBX material nodes can sometimes have uv transform information (ex: "Maya|uv_scale")
|
||||
// I'm leaving the second parameter blank right now as this code may never be used.
|
||||
HFMTexture texture = getTexture(childID, "");
|
||||
for (int j = 0; j < extracted.partMaterialTextures.size(); j++) {
|
||||
int partTexture = extracted.partMaterialTextures.at(j).second;
|
||||
if (partTexture == textureIndex && !(partTexture == 0 && materialsHaveTextures)) {
|
||||
// TODO: DO something here that replaces this legacy code
|
||||
// Maybe create a material just for this part with the correct textures?
|
||||
// extracted.mesh.parts[j].diffuseTexture = texture;
|
||||
}
|
||||
}
|
||||
textureIndex++;
|
||||
}
|
||||
}
|
||||
|
||||
// find the clusters with which the mesh is associated
|
||||
QVector<QString> clusterIDs;
|
||||
foreach (const QString& childID, _connectionChildMap.values(it.key())) {
|
||||
foreach (const QString& clusterID, _connectionChildMap.values(childID)) {
|
||||
if (!clusters.contains(clusterID)) {
|
||||
continue;
|
||||
}
|
||||
HFMCluster hfmCluster;
|
||||
const Cluster& cluster = clusters[clusterID];
|
||||
clusterIDs.append(clusterID);
|
||||
|
||||
// see http://stackoverflow.com/questions/13566608/loading-skinning-information-from-fbx for a discussion
|
||||
// of skinning information in FBX
|
||||
QString jointID = _connectionChildMap.value(clusterID);
|
||||
hfmCluster.jointIndex = modelIDs.indexOf(jointID);
|
||||
if (hfmCluster.jointIndex == -1) {
|
||||
qCDebug(modelformat) << "Joint not in model list: " << jointID;
|
||||
hfmCluster.jointIndex = 0;
|
||||
}
|
||||
|
||||
hfmCluster.inverseBindMatrix = glm::inverse(cluster.transformLink) * modelTransform;
|
||||
|
||||
// slam bottom row to (0, 0, 0, 1), we KNOW this is not a perspective matrix and
|
||||
// sometimes floating point fuzz can be introduced after the inverse.
|
||||
hfmCluster.inverseBindMatrix[0][3] = 0.0f;
|
||||
hfmCluster.inverseBindMatrix[1][3] = 0.0f;
|
||||
hfmCluster.inverseBindMatrix[2][3] = 0.0f;
|
||||
hfmCluster.inverseBindMatrix[3][3] = 1.0f;
|
||||
|
||||
hfmCluster.inverseBindTransform = Transform(hfmCluster.inverseBindMatrix);
|
||||
|
||||
extracted.mesh.clusters.append(hfmCluster);
|
||||
|
||||
// override the bind rotation with the transform link
|
||||
HFMJoint& joint = hfmModel.joints[hfmCluster.jointIndex];
|
||||
joint.inverseBindRotation = glm::inverse(extractRotation(cluster.transformLink));
|
||||
joint.bindTransform = cluster.transformLink;
|
||||
joint.bindTransformFoundInCluster = true;
|
||||
|
||||
// update the bind pose extents
|
||||
glm::vec3 bindTranslation = extractTranslation(hfmModel.offset * joint.bindTransform);
|
||||
hfmModel.bindExtents.addPoint(bindTranslation);
|
||||
}
|
||||
}
|
||||
|
||||
// the last cluster is the root cluster
|
||||
{
|
||||
HFMCluster cluster;
|
||||
cluster.jointIndex = modelIDs.indexOf(modelID);
|
||||
if (cluster.jointIndex == -1) {
|
||||
qCDebug(modelformat) << "Model not in model list: " << modelID;
|
||||
cluster.jointIndex = 0;
|
||||
}
|
||||
extracted.mesh.clusters.append(cluster);
|
||||
}
|
||||
|
||||
// whether we're skinned depends on how many clusters are attached
|
||||
if (clusterIDs.size() > 1) {
|
||||
// this is a multi-mesh joint
|
||||
const int WEIGHTS_PER_VERTEX = 4;
|
||||
int numClusterIndices = extracted.mesh.vertices.size() * WEIGHTS_PER_VERTEX;
|
||||
extracted.mesh.clusterIndices.fill(extracted.mesh.clusters.size() - 1, numClusterIndices);
|
||||
QVector<float> weightAccumulators;
|
||||
weightAccumulators.fill(0.0f, numClusterIndices);
|
||||
|
||||
for (int i = 0; i < clusterIDs.size(); i++) {
|
||||
QString clusterID = clusterIDs.at(i);
|
||||
const Cluster& cluster = clusters[clusterID];
|
||||
const HFMCluster& hfmCluster = extracted.mesh.clusters.at(i);
|
||||
int jointIndex = hfmCluster.jointIndex;
|
||||
HFMJoint& joint = hfmModel.joints[jointIndex];
|
||||
|
||||
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
|
||||
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
|
||||
|
||||
for (int j = 0; j < cluster.indices.size(); j++) {
|
||||
int oldIndex = cluster.indices.at(j);
|
||||
float weight = cluster.weights.at(j);
|
||||
for (QMultiHash<int, int>::const_iterator it = extracted.newIndices.constFind(oldIndex);
|
||||
it != extracted.newIndices.end() && it.key() == oldIndex; it++) {
|
||||
int newIndex = it.value();
|
||||
|
||||
// remember vertices with at least 1/4 weight
|
||||
// FIXME: vertices with no weightpainting won't get recorded here
|
||||
const float EXPANSION_WEIGHT_THRESHOLD = 0.25f;
|
||||
if (weight >= EXPANSION_WEIGHT_THRESHOLD) {
|
||||
// transform to joint-frame and save for later
|
||||
const glm::mat4 vertexTransform = meshToJoint * glm::translate(extracted.mesh.vertices.at(newIndex));
|
||||
points.push_back(extractTranslation(vertexTransform));
|
||||
}
|
||||
|
||||
// look for an unused slot in the weights vector
|
||||
int weightIndex = newIndex * WEIGHTS_PER_VERTEX;
|
||||
int lowestIndex = -1;
|
||||
float lowestWeight = FLT_MAX;
|
||||
int k = 0;
|
||||
for (; k < WEIGHTS_PER_VERTEX; k++) {
|
||||
if (weightAccumulators[weightIndex + k] == 0.0f) {
|
||||
extracted.mesh.clusterIndices[weightIndex + k] = i;
|
||||
weightAccumulators[weightIndex + k] = weight;
|
||||
break;
|
||||
}
|
||||
if (weightAccumulators[weightIndex + k] < lowestWeight) {
|
||||
lowestIndex = k;
|
||||
lowestWeight = weightAccumulators[weightIndex + k];
|
||||
}
|
||||
}
|
||||
if (k == WEIGHTS_PER_VERTEX && weight > lowestWeight) {
|
||||
// no space for an additional weight; we must replace the lowest
|
||||
weightAccumulators[weightIndex + lowestIndex] = weight;
|
||||
extracted.mesh.clusterIndices[weightIndex + lowestIndex] = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now that we've accumulated the most relevant weights for each vertex
|
||||
// normalize and compress to 16-bits
|
||||
extracted.mesh.clusterWeights.fill(0, numClusterIndices);
|
||||
int numVertices = extracted.mesh.vertices.size();
|
||||
for (int i = 0; i < numVertices; ++i) {
|
||||
int j = i * WEIGHTS_PER_VERTEX;
|
||||
|
||||
// normalize weights into uint16_t
|
||||
float totalWeight = 0.0f;
|
||||
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
|
||||
totalWeight += weightAccumulators[k];
|
||||
}
|
||||
|
||||
const float ALMOST_HALF = 0.499f;
|
||||
if (totalWeight > 0.0f) {
|
||||
float weightScalingFactor = (float)(UINT16_MAX) / totalWeight;
|
||||
for (int k = j; k < j + WEIGHTS_PER_VERTEX; ++k) {
|
||||
extracted.mesh.clusterWeights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF);
|
||||
}
|
||||
} else {
|
||||
extracted.mesh.clusterWeights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// this is a single-joint mesh
|
||||
const HFMCluster& firstHFMCluster = extracted.mesh.clusters.at(0);
|
||||
int jointIndex = firstHFMCluster.jointIndex;
|
||||
HFMJoint& joint = hfmModel.joints[jointIndex];
|
||||
|
||||
// transform cluster vertices to joint-frame and save for later
|
||||
glm::mat4 meshToJoint = glm::inverse(joint.bindTransform) * modelTransform;
|
||||
ShapeVertices& points = hfmModel.shapeVertices.at(jointIndex);
|
||||
foreach (const glm::vec3& vertex, extracted.mesh.vertices) {
|
||||
const glm::mat4 vertexTransform = meshToJoint * glm::translate(vertex);
|
||||
points.push_back(extractTranslation(vertexTransform));
|
||||
}
|
||||
|
||||
// Apply geometric offset, if present, by transforming the vertices directly
|
||||
if (joint.hasGeometricOffset) {
|
||||
glm::mat4 geometricOffset = createMatFromScaleQuatAndPos(joint.geometricScaling, joint.geometricRotation, joint.geometricTranslation);
|
||||
for (int i = 0; i < extracted.mesh.vertices.size(); i++) {
|
||||
extracted.mesh.vertices[i] = transformPoint(geometricOffset, extracted.mesh.vertices[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hfmModel.meshes.append(extracted.mesh);
|
||||
int meshIndex = hfmModel.meshes.size() - 1;
|
||||
meshIDsToMeshIndices.insert(it.key(), meshIndex);
|
||||
}
|
||||
|
||||
// attempt to map any meshes to a named model
|
||||
|
@ -1660,6 +1645,14 @@ HFMModel* FBXSerializer::extractHFMModel(const hifi::VariantHash& mapping, const
|
|||
}
|
||||
}
|
||||
|
||||
if (applyUpAxisZRotation) {
|
||||
hfmModelPtr->meshExtents.transform(glm::mat4_cast(upAxisZRotation));
|
||||
hfmModelPtr->bindExtents.transform(glm::mat4_cast(upAxisZRotation));
|
||||
for (auto &mesh : hfmModelPtr->meshes) {
|
||||
mesh.modelTransform *= glm::mat4_cast(upAxisZRotation);
|
||||
mesh.meshExtents.transform(glm::mat4_cast(upAxisZRotation));
|
||||
}
|
||||
}
|
||||
return hfmModelPtr;
|
||||
}
|
||||
|
||||
|
|
|
@ -100,15 +100,7 @@ public:
|
|||
{}
|
||||
};
|
||||
|
||||
class ExtractedMesh {
|
||||
public:
|
||||
hfm::Mesh mesh;
|
||||
std::vector<std::string> materialIDPerMeshPart;
|
||||
QMultiHash<int, int> newIndices;
|
||||
QVector<QHash<int, int> > blendshapeIndexMaps;
|
||||
QVector<QPair<int, int> > partMaterialTextures;
|
||||
QHash<QString, size_t> texcoordSetMap;
|
||||
};
|
||||
class ExtractedMesh;
|
||||
|
||||
class FBXSerializer : public HFMSerializer {
|
||||
public:
|
||||
|
|
|
@ -355,7 +355,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
|
|||
|
||||
// Check for additional metadata
|
||||
unsigned int dracoMeshNodeVersion = 1;
|
||||
std::vector<std::string> dracoMaterialList;
|
||||
std::vector<QString> dracoMaterialList;
|
||||
for (const auto& dracoChild : child.children) {
|
||||
if (dracoChild.name == "FBXDracoMeshVersion") {
|
||||
if (!dracoChild.properties.isEmpty()) {
|
||||
|
@ -364,7 +364,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
|
|||
} else if (dracoChild.name == "MaterialList") {
|
||||
dracoMaterialList.reserve(dracoChild.properties.size());
|
||||
for (const auto& materialID : dracoChild.properties) {
|
||||
dracoMaterialList.push_back(materialID.toString().toStdString());
|
||||
dracoMaterialList.push_back(materialID.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -486,20 +486,21 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
|
|||
// grab or setup the HFMMeshPart for the part this face belongs to
|
||||
int& partIndexPlusOne = materialTextureParts[materialTexture];
|
||||
if (partIndexPlusOne == 0) {
|
||||
data.extracted.mesh.parts.emplace_back();
|
||||
data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1);
|
||||
HFMMeshPart& part = data.extracted.mesh.parts.back();
|
||||
|
||||
// Figure out if this is the older way of defining the per-part material for baked FBX
|
||||
// Figure out what material this part is
|
||||
if (dracoMeshNodeVersion >= 2) {
|
||||
// Define the materialID for this mesh part index
|
||||
uint16_t safeMaterialID = materialID < dracoMaterialList.size() ? materialID : 0;
|
||||
data.extracted.materialIDPerMeshPart.push_back(dracoMaterialList[safeMaterialID].c_str());
|
||||
// Define the materialID now
|
||||
if (materialID < dracoMaterialList.size()) {
|
||||
part.materialID = dracoMaterialList[materialID];
|
||||
}
|
||||
} else {
|
||||
// Define the materialID later, based on the order of first appearance of the materials in the _connectionChildMap
|
||||
data.extracted.partMaterialTextures.append(materialTexture);
|
||||
}
|
||||
// in dracoMeshNodeVersion >= 2, fbx meshes have their per-part materials already defined in data.extracted.materialIDPerMeshPart
|
||||
|
||||
partIndexPlusOne = (int)data.extracted.mesh.parts.size();
|
||||
partIndexPlusOne = data.extracted.mesh.parts.size();
|
||||
}
|
||||
|
||||
// give the mesh part this index
|
||||
|
@ -534,7 +535,7 @@ ExtractedMesh FBXSerializer::extractMesh(const FBXNode& object, unsigned int& me
|
|||
if (partIndex == 0) {
|
||||
data.extracted.partMaterialTextures.append(materialTexture);
|
||||
data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1);
|
||||
partIndex = (int)data.extracted.mesh.parts.size();
|
||||
partIndex = data.extracted.mesh.parts.size();
|
||||
}
|
||||
HFMMeshPart& part = data.extracted.mesh.parts[partIndex - 1];
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ FST* FST::createFSTFromModel(const QString& fstPath, const QString& modelFilePat
|
|||
mapping.insert(JOINT_FIELD, joints);
|
||||
|
||||
QVariantHash jointIndices;
|
||||
for (size_t i = 0; i < (size_t)hfmModel.joints.size(); i++) {
|
||||
for (int i = 0; i < hfmModel.joints.size(); i++) {
|
||||
jointIndices.insert(hfmModel.joints.at(i).name, QString::number(i));
|
||||
}
|
||||
mapping.insert(JOINT_INDEX_FIELD, jointIndices);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -38,15 +38,15 @@ struct GLTFAsset {
|
|||
|
||||
struct GLTFNode {
|
||||
QString name;
|
||||
int camera{ -1 };
|
||||
int mesh{ -1 };
|
||||
int camera;
|
||||
int mesh;
|
||||
QVector<int> children;
|
||||
QVector<double> translation;
|
||||
QVector<double> rotation;
|
||||
QVector<double> scale;
|
||||
QVector<double> matrix;
|
||||
glm::mat4 transform;
|
||||
int skin { -1 };
|
||||
QVector<glm::mat4> transforms;
|
||||
int skin;
|
||||
QVector<int> skeletons;
|
||||
QString jointName;
|
||||
QMap<QString, bool> defined;
|
||||
|
@ -85,8 +85,6 @@ struct GLTFNode {
|
|||
qCDebug(modelformat) << "skeletons: " << skeletons;
|
||||
}
|
||||
}
|
||||
|
||||
void normalizeTransform();
|
||||
};
|
||||
|
||||
// Meshes
|
||||
|
@ -460,56 +458,15 @@ struct GLTFMaterial {
|
|||
// Accesors
|
||||
|
||||
namespace GLTFAccessorType {
|
||||
enum Value {
|
||||
SCALAR = 1,
|
||||
VEC2 = 2,
|
||||
VEC3 = 3,
|
||||
VEC4 = 4,
|
||||
MAT2 = 5,
|
||||
MAT3 = 9,
|
||||
MAT4 = 16
|
||||
enum Values {
|
||||
SCALAR = 0,
|
||||
VEC2,
|
||||
VEC3,
|
||||
VEC4,
|
||||
MAT2,
|
||||
MAT3,
|
||||
MAT4
|
||||
};
|
||||
|
||||
inline int count(Value value) {
|
||||
if (value == MAT2) {
|
||||
return 4;
|
||||
}
|
||||
return (int)value;
|
||||
}
|
||||
}
|
||||
|
||||
namespace GLTFVertexAttribute {
|
||||
enum Value {
|
||||
UNKNOWN = -1,
|
||||
POSITION = 0,
|
||||
NORMAL,
|
||||
TANGENT,
|
||||
TEXCOORD_0,
|
||||
TEXCOORD_1,
|
||||
COLOR_0,
|
||||
JOINTS_0,
|
||||
WEIGHTS_0,
|
||||
};
|
||||
inline Value fromString(const QString& key) {
|
||||
if (key == "POSITION") {
|
||||
return POSITION;
|
||||
} else if (key == "NORMAL") {
|
||||
return NORMAL;
|
||||
} else if (key == "TANGENT") {
|
||||
return TANGENT;
|
||||
} else if (key == "TEXCOORD_0") {
|
||||
return TEXCOORD_0;
|
||||
} else if (key == "TEXCOORD_1") {
|
||||
return TEXCOORD_1;
|
||||
} else if (key == "COLOR_0") {
|
||||
return COLOR_0;
|
||||
} else if (key == "JOINTS_0") {
|
||||
return JOINTS_0;
|
||||
} else if (key == "WEIGHTS_0") {
|
||||
return WEIGHTS_0;
|
||||
}
|
||||
return UNKNOWN;
|
||||
}
|
||||
}
|
||||
namespace GLTFAccessorComponentType {
|
||||
enum Values {
|
||||
|
@ -801,13 +758,6 @@ struct GLTFFile {
|
|||
foreach(auto tex, textures) tex.dump();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void populateMaterialNames();
|
||||
void sortNodes();
|
||||
void normalizeNodeTransforms();
|
||||
private:
|
||||
void reorderNodes(const std::unordered_map<int, int>& reorderMap);
|
||||
};
|
||||
|
||||
class GLTFSerializer : public QObject, public HFMSerializer {
|
||||
|
@ -822,7 +772,7 @@ private:
|
|||
hifi::URL _url;
|
||||
hifi::ByteArray _glbBinary;
|
||||
|
||||
const glm::mat4& getModelTransform(const GLTFNode& node);
|
||||
glm::mat4 getModelTransform(const GLTFNode& node);
|
||||
void getSkinInverseBindMatrices(std::vector<std::vector<float>>& inverseBindMatrixValues);
|
||||
void generateTargetData(int index, float weight, QVector<glm::vec3>& returnVector);
|
||||
|
||||
|
@ -891,9 +841,6 @@ private:
|
|||
template <typename T>
|
||||
bool addArrayFromAccessor(GLTFAccessor& accessor, QVector<T>& outarray);
|
||||
|
||||
template <typename T>
|
||||
bool addArrayFromAttribute(GLTFVertexAttribute::Value vertexAttribute, GLTFAccessor& accessor, QVector<T>& outarray);
|
||||
|
||||
void retriangulate(const QVector<int>& in_indices, const QVector<glm::vec3>& in_vertices,
|
||||
const QVector<glm::vec3>& in_normals, QVector<int>& out_indices,
|
||||
QVector<glm::vec3>& out_vertices, QVector<glm::vec3>& out_normals);
|
||||
|
|
|
@ -174,6 +174,11 @@ glm::vec2 OBJTokenizer::getVec2() {
|
|||
return v;
|
||||
}
|
||||
|
||||
|
||||
void setMeshPartDefaults(HFMMeshPart& meshPart, QString materialID) {
|
||||
meshPart.materialID = materialID;
|
||||
}
|
||||
|
||||
// OBJFace
|
||||
// NOTE (trent, 7/20/17): The vertexColors vector being passed-in isn't necessary here, but I'm just
|
||||
// pairing it with the vertices vector for consistency.
|
||||
|
@ -487,7 +492,8 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa
|
|||
float& scaleGuess, bool combineParts) {
|
||||
FaceGroup faces;
|
||||
HFMMesh& mesh = hfmModel.meshes[0];
|
||||
mesh.parts.push_back(HFMMeshPart());
|
||||
mesh.parts.append(HFMMeshPart());
|
||||
HFMMeshPart& meshPart = mesh.parts.last();
|
||||
bool sawG = false;
|
||||
bool result = true;
|
||||
int originalFaceCountForDebugging = 0;
|
||||
|
@ -495,6 +501,8 @@ bool OBJSerializer::parseOBJGroup(OBJTokenizer& tokenizer, const hifi::VariantHa
|
|||
bool anyVertexColor { false };
|
||||
int vertexCount { 0 };
|
||||
|
||||
setMeshPartDefaults(meshPart, QString("dontknow") + QString::number(mesh.parts.count()));
|
||||
|
||||
while (true) {
|
||||
int tokenType = tokenizer.nextToken();
|
||||
if (tokenType == OBJTokenizer::COMMENT_TOKEN) {
|
||||
|
@ -667,19 +675,17 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
|
|||
|
||||
_url = url;
|
||||
bool combineParts = mapping.value("combineParts").toBool();
|
||||
hfmModel.meshes.push_back(HFMMesh());
|
||||
hfmModel.meshExtents.reset();
|
||||
hfmModel.meshes.append(HFMMesh());
|
||||
|
||||
std::vector<QString> materialNamePerShape;
|
||||
try {
|
||||
// call parseOBJGroup as long as it's returning true. Each successful call will
|
||||
// add a new meshPart to the model's single mesh.
|
||||
while (parseOBJGroup(tokenizer, mapping, hfmModel, scaleGuess, combineParts)) {}
|
||||
|
||||
uint32_t meshIndex = 0;
|
||||
HFMMesh& mesh = hfmModel.meshes[meshIndex];
|
||||
mesh.meshIndex = meshIndex;
|
||||
HFMMesh& mesh = hfmModel.meshes[0];
|
||||
mesh.meshIndex = 0;
|
||||
|
||||
uint32_t jointIndex = 0;
|
||||
hfmModel.joints.resize(1);
|
||||
hfmModel.joints[0].parentIndex = -1;
|
||||
hfmModel.joints[0].distanceToParent = 0;
|
||||
|
@ -691,11 +697,19 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
|
|||
|
||||
hfmModel.jointIndices["x"] = 1;
|
||||
|
||||
HFMCluster cluster;
|
||||
cluster.jointIndex = 0;
|
||||
cluster.inverseBindMatrix = glm::mat4(1, 0, 0, 0,
|
||||
0, 1, 0, 0,
|
||||
0, 0, 1, 0,
|
||||
0, 0, 0, 1);
|
||||
mesh.clusters.append(cluster);
|
||||
|
||||
QMap<QString, int> materialMeshIdMap;
|
||||
std::vector<HFMMeshPart> hfmMeshParts;
|
||||
for (uint32_t meshPartIndex = 0; meshPartIndex < (uint32_t)mesh.parts.size(); ++meshPartIndex) {
|
||||
HFMMeshPart& meshPart = mesh.parts[meshPartIndex];
|
||||
FaceGroup faceGroup = faceGroups[meshPartIndex];
|
||||
QVector<HFMMeshPart> hfmMeshParts;
|
||||
for (int i = 0, meshPartCount = 0; i < mesh.parts.count(); i++, meshPartCount++) {
|
||||
HFMMeshPart& meshPart = mesh.parts[i];
|
||||
FaceGroup faceGroup = faceGroups[meshPartCount];
|
||||
bool specifiesUV = false;
|
||||
foreach(OBJFace face, faceGroup) {
|
||||
// Go through all of the OBJ faces and determine the number of different materials necessary (each different material will be a unique mesh).
|
||||
|
@ -704,13 +718,12 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
|
|||
// Create a new HFMMesh for this material mapping.
|
||||
materialMeshIdMap.insert(face.materialName, materialMeshIdMap.count());
|
||||
|
||||
uint32_t partIndex = (int)hfmMeshParts.size();
|
||||
hfmMeshParts.push_back(HFMMeshPart());
|
||||
HFMMeshPart& meshPartNew = hfmMeshParts.back();
|
||||
hfmMeshParts.append(HFMMeshPart());
|
||||
HFMMeshPart& meshPartNew = hfmMeshParts.last();
|
||||
meshPartNew.quadIndices = QVector<int>(meshPart.quadIndices); // Copy over quad indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
|
||||
meshPartNew.quadTrianglesIndices = QVector<int>(meshPart.quadTrianglesIndices); // Copy over quad triangulated indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
|
||||
meshPartNew.triangleIndices = QVector<int>(meshPart.triangleIndices); // Copy over triangle indices.
|
||||
|
||||
|
||||
// Do some of the material logic (which previously lived below) now.
|
||||
// All the faces in the same group will have the same name and material.
|
||||
QString groupMaterialName = face.materialName;
|
||||
|
@ -732,26 +745,19 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
|
|||
needsMaterialLibrary = groupMaterialName != SMART_DEFAULT_MATERIAL_NAME;
|
||||
}
|
||||
materials[groupMaterialName] = material;
|
||||
meshPartNew.materialID = groupMaterialName;
|
||||
}
|
||||
materialNamePerShape.push_back(groupMaterialName);
|
||||
|
||||
|
||||
hfm::Shape shape;
|
||||
shape.mesh = meshIndex;
|
||||
shape.joint = jointIndex;
|
||||
shape.meshPart = partIndex;
|
||||
hfmModel.shapes.push_back(shape);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clean up old mesh parts.
|
||||
auto unmodifiedMeshPartCount = (uint32_t)mesh.parts.size();
|
||||
int unmodifiedMeshPartCount = mesh.parts.count();
|
||||
mesh.parts.clear();
|
||||
mesh.parts = hfmMeshParts;
|
||||
mesh.parts = QVector<HFMMeshPart>(hfmMeshParts);
|
||||
|
||||
for (uint32_t meshPartIndex = 0; meshPartIndex < unmodifiedMeshPartCount; meshPartIndex++) {
|
||||
FaceGroup faceGroup = faceGroups[meshPartIndex];
|
||||
for (int i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) {
|
||||
FaceGroup faceGroup = faceGroups[meshPartCount];
|
||||
|
||||
// Now that each mesh has been created with its own unique material mappings, fill them with data (vertex data is duplicated, face data is not).
|
||||
foreach(OBJFace face, faceGroup) {
|
||||
|
@ -817,13 +823,18 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
mesh.meshExtents.reset();
|
||||
foreach(const glm::vec3& vertex, mesh.vertices) {
|
||||
mesh.meshExtents.addPoint(vertex);
|
||||
hfmModel.meshExtents.addPoint(vertex);
|
||||
}
|
||||
|
||||
// hfmDebugDump(hfmModel);
|
||||
} catch(const std::exception& e) {
|
||||
qCDebug(modelformat) << "OBJSerializer fail: " << e.what();
|
||||
}
|
||||
|
||||
// At this point, the hfmModel joint, mesh, parts and shpaes have been defined
|
||||
// only no material assigned
|
||||
|
||||
QString queryPart = _url.query();
|
||||
bool suppressMaterialsHack = queryPart.contains("hifiusemat"); // If this appears in query string, don't fetch mtl even if used.
|
||||
OBJMaterial& preDefinedMaterial = materials[SMART_DEFAULT_MATERIAL_NAME];
|
||||
|
@ -875,23 +886,17 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
|
|||
}
|
||||
}
|
||||
|
||||
// As we are populating the material list in the hfmModel, let s also create the reverse map (from materialName to index)
|
||||
QMap<QString, uint32_t> materialNameToIndex;
|
||||
foreach (QString materialID, materials.keys()) {
|
||||
OBJMaterial& objMaterial = materials[materialID];
|
||||
if (!objMaterial.used) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// capture the name to index map
|
||||
materialNameToIndex[materialID] = (uint32_t) hfmModel.materials.size();
|
||||
|
||||
hfmModel.materials.emplace_back(objMaterial.diffuseColor,
|
||||
objMaterial.specularColor,
|
||||
objMaterial.emissiveColor,
|
||||
objMaterial.shininess,
|
||||
objMaterial.opacity);
|
||||
HFMMaterial& hfmMaterial = hfmModel.materials.back();
|
||||
HFMMaterial& hfmMaterial = hfmModel.materials[materialID] = HFMMaterial(objMaterial.diffuseColor,
|
||||
objMaterial.specularColor,
|
||||
objMaterial.emissiveColor,
|
||||
objMaterial.shininess,
|
||||
objMaterial.opacity);
|
||||
|
||||
hfmMaterial.name = materialID;
|
||||
hfmMaterial.materialID = materialID;
|
||||
|
@ -991,16 +996,77 @@ HFMModel::Pointer OBJSerializer::read(const hifi::ByteArray& data, const hifi::V
|
|||
modelMaterial->setOpacity(hfmMaterial.opacity);
|
||||
}
|
||||
|
||||
// GO over the shapes once more to assign the material index correctly
|
||||
for (uint32_t i = 0; i < (uint32_t)hfmModel.shapes.size(); ++i) {
|
||||
const auto& materialName = materialNamePerShape[i];
|
||||
if (!materialName.isEmpty()) {
|
||||
auto foundMaterialIndex = materialNameToIndex.find(materialName);
|
||||
if (foundMaterialIndex != materialNameToIndex.end()) {
|
||||
hfmModel.shapes[i].material = foundMaterialIndex.value();
|
||||
return hfmModelPtr;
|
||||
}
|
||||
|
||||
void hfmDebugDump(const HFMModel& hfmModel) {
|
||||
qCDebug(modelformat) << "---------------- hfmModel ----------------";
|
||||
qCDebug(modelformat) << " hasSkeletonJoints =" << hfmModel.hasSkeletonJoints;
|
||||
qCDebug(modelformat) << " offset =" << hfmModel.offset;
|
||||
qCDebug(modelformat) << " meshes.count() =" << hfmModel.meshes.count();
|
||||
foreach (HFMMesh mesh, hfmModel.meshes) {
|
||||
qCDebug(modelformat) << " vertices.count() =" << mesh.vertices.count();
|
||||
qCDebug(modelformat) << " colors.count() =" << mesh.colors.count();
|
||||
qCDebug(modelformat) << " normals.count() =" << mesh.normals.count();
|
||||
/*if (mesh.normals.count() == mesh.vertices.count()) {
|
||||
for (int i = 0; i < mesh.normals.count(); i++) {
|
||||
qCDebug(modelformat) << " " << mesh.vertices[ i ] << mesh.normals[ i ];
|
||||
}
|
||||
}*/
|
||||
qCDebug(modelformat) << " tangents.count() =" << mesh.tangents.count();
|
||||
qCDebug(modelformat) << " colors.count() =" << mesh.colors.count();
|
||||
qCDebug(modelformat) << " texCoords.count() =" << mesh.texCoords.count();
|
||||
qCDebug(modelformat) << " texCoords1.count() =" << mesh.texCoords1.count();
|
||||
qCDebug(modelformat) << " clusterIndices.count() =" << mesh.clusterIndices.count();
|
||||
qCDebug(modelformat) << " clusterWeights.count() =" << mesh.clusterWeights.count();
|
||||
qCDebug(modelformat) << " meshExtents =" << mesh.meshExtents;
|
||||
qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform;
|
||||
qCDebug(modelformat) << " parts.count() =" << mesh.parts.count();
|
||||
foreach (HFMMeshPart meshPart, mesh.parts) {
|
||||
qCDebug(modelformat) << " quadIndices.count() =" << meshPart.quadIndices.count();
|
||||
qCDebug(modelformat) << " triangleIndices.count() =" << meshPart.triangleIndices.count();
|
||||
/*
|
||||
qCDebug(modelformat) << " diffuseColor =" << meshPart.diffuseColor << "mat =" << meshPart._material->getDiffuse();
|
||||
qCDebug(modelformat) << " specularColor =" << meshPart.specularColor << "mat =" << meshPart._material->getMetallic();
|
||||
qCDebug(modelformat) << " emissiveColor =" << meshPart.emissiveColor << "mat =" << meshPart._material->getEmissive();
|
||||
qCDebug(modelformat) << " emissiveParams =" << meshPart.emissiveParams;
|
||||
qCDebug(modelformat) << " gloss =" << meshPart.shininess << "mat =" << meshPart._material->getRoughness();
|
||||
qCDebug(modelformat) << " opacity =" << meshPart.opacity << "mat =" << meshPart._material->getOpacity();
|
||||
*/
|
||||
qCDebug(modelformat) << " materialID =" << meshPart.materialID;
|
||||
/* qCDebug(modelformat) << " diffuse texture =" << meshPart.diffuseTexture.filename;
|
||||
qCDebug(modelformat) << " specular texture =" << meshPart.specularTexture.filename;
|
||||
*/
|
||||
}
|
||||
qCDebug(modelformat) << " clusters.count() =" << mesh.clusters.count();
|
||||
foreach (HFMCluster cluster, mesh.clusters) {
|
||||
qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex;
|
||||
qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix;
|
||||
}
|
||||
}
|
||||
|
||||
return hfmModelPtr;
|
||||
qCDebug(modelformat) << " jointIndices =" << hfmModel.jointIndices;
|
||||
qCDebug(modelformat) << " joints.count() =" << hfmModel.joints.count();
|
||||
|
||||
foreach (HFMJoint joint, hfmModel.joints) {
|
||||
|
||||
qCDebug(modelformat) << " parentIndex" << joint.parentIndex;
|
||||
qCDebug(modelformat) << " distanceToParent" << joint.distanceToParent;
|
||||
qCDebug(modelformat) << " translation" << joint.translation;
|
||||
qCDebug(modelformat) << " preTransform" << joint.preTransform;
|
||||
qCDebug(modelformat) << " preRotation" << joint.preRotation;
|
||||
qCDebug(modelformat) << " rotation" << joint.rotation;
|
||||
qCDebug(modelformat) << " postRotation" << joint.postRotation;
|
||||
qCDebug(modelformat) << " postTransform" << joint.postTransform;
|
||||
qCDebug(modelformat) << " transform" << joint.transform;
|
||||
qCDebug(modelformat) << " rotationMin" << joint.rotationMin;
|
||||
qCDebug(modelformat) << " rotationMax" << joint.rotationMax;
|
||||
qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation;
|
||||
qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation;
|
||||
qCDebug(modelformat) << " bindTransform" << joint.bindTransform;
|
||||
qCDebug(modelformat) << " name" << joint.name;
|
||||
qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint;
|
||||
}
|
||||
|
||||
qCDebug(modelformat) << "\n";
|
||||
}
|
||||
|
|
|
@ -120,5 +120,6 @@ private:
|
|||
|
||||
// What are these utilities doing here? One is used by fbx loading code in VHACD Utils, and the other a general debugging utility.
|
||||
void setMeshPartDefaults(HFMMeshPart& meshPart, QString materialID);
|
||||
void hfmDebugDump(const HFMModel& hfmModel);
|
||||
|
||||
#endif // hifi_OBJSerializer_h
|
||||
|
|
|
@ -76,7 +76,7 @@ QStringList HFMModel::getJointNames() const {
|
|||
}
|
||||
|
||||
bool HFMModel::hasBlendedMeshes() const {
|
||||
if (!meshes.empty()) {
|
||||
if (!meshes.isEmpty()) {
|
||||
foreach (const HFMMesh& mesh, meshes) {
|
||||
if (!mesh.blendshapes.isEmpty()) {
|
||||
return true;
|
||||
|
@ -166,16 +166,16 @@ void HFMModel::computeKdops() {
|
|||
glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3),
|
||||
glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3)
|
||||
};
|
||||
if (joints.size() != shapeVertices.size()) {
|
||||
if (joints.size() != (int)shapeVertices.size()) {
|
||||
return;
|
||||
}
|
||||
// now that all joints have been scanned compute a k-Dop bounding volume of mesh
|
||||
for (size_t i = 0; i < joints.size(); ++i) {
|
||||
for (int i = 0; i < joints.size(); ++i) {
|
||||
HFMJoint& joint = joints[i];
|
||||
|
||||
// NOTE: points are in joint-frame
|
||||
ShapeVertices& points = shapeVertices.at(i);
|
||||
glm::quat rotOffset = jointRotationOffsets.contains((int)i) ? glm::inverse(jointRotationOffsets[(int)i]) : quat();
|
||||
glm::quat rotOffset = jointRotationOffsets.contains(i) ? glm::inverse(jointRotationOffsets[i]) : quat();
|
||||
if (points.size() > 0) {
|
||||
// compute average point
|
||||
glm::vec3 avgPoint = glm::vec3(0.0f);
|
||||
|
@ -208,164 +208,3 @@ void HFMModel::computeKdops() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
void hfm::Model::debugDump() const {
|
||||
qCDebug(modelformat) << "---------------- hfmModel ----------------";
|
||||
qCDebug(modelformat) << " hasSkeletonJoints =" << hasSkeletonJoints;
|
||||
qCDebug(modelformat) << " offset =" << offset;
|
||||
|
||||
qCDebug(modelformat) << " neckPivot = " << neckPivot;
|
||||
|
||||
qCDebug(modelformat) << " bindExtents.size() = " << bindExtents.size();
|
||||
qCDebug(modelformat) << " meshExtents.size() = " << meshExtents.size();
|
||||
|
||||
qCDebug(modelformat) << "---------------- Shapes ----------------";
|
||||
qCDebug(modelformat) << " shapes.size() =" << shapes.size();
|
||||
for (const hfm::Shape& shape : shapes) {
|
||||
qCDebug(modelformat) << "\n";
|
||||
qCDebug(modelformat) << " mesh =" << shape.mesh;
|
||||
qCDebug(modelformat) << " meshPart =" << shape.meshPart;
|
||||
qCDebug(modelformat) << " material =" << shape.material;
|
||||
qCDebug(modelformat) << " joint =" << shape.joint;
|
||||
qCDebug(modelformat) << " transformedExtents =" << shape.transformedExtents;
|
||||
qCDebug(modelformat) << " skinDeformer =" << shape.skinDeformer;
|
||||
}
|
||||
|
||||
qCDebug(modelformat) << " jointIndices.size() =" << jointIndices.size();
|
||||
qCDebug(modelformat) << " joints.size() =" << joints.size();
|
||||
qCDebug(modelformat) << "---------------- Meshes ----------------";
|
||||
qCDebug(modelformat) << " meshes.size() =" << meshes.size();
|
||||
qCDebug(modelformat) << " blendshapeChannelNames = " << blendshapeChannelNames;
|
||||
for (const HFMMesh& mesh : meshes) {
|
||||
qCDebug(modelformat) << "\n";
|
||||
qCDebug(modelformat) << " meshpointer =" << mesh._mesh.get();
|
||||
qCDebug(modelformat) << " meshindex =" << mesh.meshIndex;
|
||||
qCDebug(modelformat) << " vertices.size() =" << mesh.vertices.size();
|
||||
qCDebug(modelformat) << " colors.size() =" << mesh.colors.size();
|
||||
qCDebug(modelformat) << " normals.size() =" << mesh.normals.size();
|
||||
qCDebug(modelformat) << " tangents.size() =" << mesh.tangents.size();
|
||||
qCDebug(modelformat) << " colors.size() =" << mesh.colors.size();
|
||||
qCDebug(modelformat) << " texCoords.size() =" << mesh.texCoords.size();
|
||||
qCDebug(modelformat) << " texCoords1.size() =" << mesh.texCoords1.size();
|
||||
qCDebug(modelformat) << " clusterIndices.size() =" << mesh.clusterIndices.size();
|
||||
qCDebug(modelformat) << " clusterWeights.size() =" << mesh.clusterWeights.size();
|
||||
qCDebug(modelformat) << " modelTransform =" << mesh.modelTransform;
|
||||
qCDebug(modelformat) << " parts.size() =" << mesh.parts.size();
|
||||
qCDebug(modelformat) << "---------------- Meshes (blendshapes)--------";
|
||||
for (HFMBlendshape bshape : mesh.blendshapes) {
|
||||
qCDebug(modelformat) << "\n";
|
||||
qCDebug(modelformat) << " bshape.indices.size() =" << bshape.indices.size();
|
||||
qCDebug(modelformat) << " bshape.vertices.size() =" << bshape.vertices.size();
|
||||
qCDebug(modelformat) << " bshape.normals.size() =" << bshape.normals.size();
|
||||
qCDebug(modelformat) << "\n";
|
||||
}
|
||||
qCDebug(modelformat) << "---------------- Meshes (meshparts)--------";
|
||||
for (HFMMeshPart meshPart : mesh.parts) {
|
||||
qCDebug(modelformat) << "\n";
|
||||
qCDebug(modelformat) << " quadIndices.size() =" << meshPart.quadIndices.size();
|
||||
qCDebug(modelformat) << " triangleIndices.size() =" << meshPart.triangleIndices.size();
|
||||
qCDebug(modelformat) << "\n";
|
||||
}
|
||||
}
|
||||
qCDebug(modelformat) << "---------------- AnimationFrames ----------------";
|
||||
for (HFMAnimationFrame anim : animationFrames) {
|
||||
qCDebug(modelformat) << " anim.translations = " << anim.translations;
|
||||
qCDebug(modelformat) << " anim.rotations = " << anim.rotations;
|
||||
}
|
||||
QList<int> mitomona_keys = meshIndicesToModelNames.keys();
|
||||
for (int key : mitomona_keys) {
|
||||
qCDebug(modelformat) << " meshIndicesToModelNames key =" << key
|
||||
<< " val =" << meshIndicesToModelNames[key];
|
||||
}
|
||||
|
||||
qCDebug(modelformat) << "---------------- Materials ----------------";
|
||||
|
||||
for (HFMMaterial mat : materials) {
|
||||
qCDebug(modelformat) << "\n";
|
||||
qCDebug(modelformat) << " mat.materialID =" << mat.materialID;
|
||||
qCDebug(modelformat) << " diffuseColor =" << mat.diffuseColor;
|
||||
qCDebug(modelformat) << " diffuseFactor =" << mat.diffuseFactor;
|
||||
qCDebug(modelformat) << " specularColor =" << mat.specularColor;
|
||||
qCDebug(modelformat) << " specularFactor =" << mat.specularFactor;
|
||||
qCDebug(modelformat) << " emissiveColor =" << mat.emissiveColor;
|
||||
qCDebug(modelformat) << " emissiveFactor =" << mat.emissiveFactor;
|
||||
qCDebug(modelformat) << " shininess =" << mat.shininess;
|
||||
qCDebug(modelformat) << " opacity =" << mat.opacity;
|
||||
qCDebug(modelformat) << " metallic =" << mat.metallic;
|
||||
qCDebug(modelformat) << " roughness =" << mat.roughness;
|
||||
qCDebug(modelformat) << " emissiveIntensity =" << mat.emissiveIntensity;
|
||||
qCDebug(modelformat) << " ambientFactor =" << mat.ambientFactor;
|
||||
|
||||
qCDebug(modelformat) << " materialID =" << mat.materialID;
|
||||
qCDebug(modelformat) << " name =" << mat.name;
|
||||
qCDebug(modelformat) << " shadingModel =" << mat.shadingModel;
|
||||
qCDebug(modelformat) << " _material =" << mat._material.get();
|
||||
|
||||
qCDebug(modelformat) << " normalTexture =" << mat.normalTexture.filename;
|
||||
qCDebug(modelformat) << " albedoTexture =" << mat.albedoTexture.filename;
|
||||
qCDebug(modelformat) << " opacityTexture =" << mat.opacityTexture.filename;
|
||||
|
||||
qCDebug(modelformat) << " lightmapParams =" << mat.lightmapParams;
|
||||
|
||||
qCDebug(modelformat) << " isPBSMaterial =" << mat.isPBSMaterial;
|
||||
qCDebug(modelformat) << " useNormalMap =" << mat.useNormalMap;
|
||||
qCDebug(modelformat) << " useAlbedoMap =" << mat.useAlbedoMap;
|
||||
qCDebug(modelformat) << " useOpacityMap =" << mat.useOpacityMap;
|
||||
qCDebug(modelformat) << " useRoughnessMap =" << mat.useRoughnessMap;
|
||||
qCDebug(modelformat) << " useSpecularMap =" << mat.useSpecularMap;
|
||||
qCDebug(modelformat) << " useMetallicMap =" << mat.useMetallicMap;
|
||||
qCDebug(modelformat) << " useEmissiveMap =" << mat.useEmissiveMap;
|
||||
qCDebug(modelformat) << " useOcclusionMap =" << mat.useOcclusionMap;
|
||||
qCDebug(modelformat) << "\n";
|
||||
}
|
||||
|
||||
qCDebug(modelformat) << "---------------- Joints ----------------";
|
||||
|
||||
for (const HFMJoint& joint : joints) {
|
||||
qCDebug(modelformat) << "\n";
|
||||
qCDebug(modelformat) << " shapeInfo.avgPoint =" << joint.shapeInfo.avgPoint;
|
||||
qCDebug(modelformat) << " shapeInfo.debugLines =" << joint.shapeInfo.debugLines;
|
||||
qCDebug(modelformat) << " shapeInfo.dots =" << joint.shapeInfo.dots;
|
||||
qCDebug(modelformat) << " shapeInfo.points =" << joint.shapeInfo.points;
|
||||
|
||||
qCDebug(modelformat) << " ---";
|
||||
|
||||
qCDebug(modelformat) << " parentIndex" << joint.parentIndex;
|
||||
qCDebug(modelformat) << " distanceToParent" << joint.distanceToParent;
|
||||
qCDebug(modelformat) << " localTransform" << joint.localTransform;
|
||||
qCDebug(modelformat) << " transform" << joint.transform;
|
||||
qCDebug(modelformat) << " globalTransform" << joint.globalTransform;
|
||||
|
||||
qCDebug(modelformat) << " ---";
|
||||
|
||||
qCDebug(modelformat) << " translation" << joint.translation;
|
||||
qCDebug(modelformat) << " preTransform" << joint.preTransform;
|
||||
qCDebug(modelformat) << " preRotation" << joint.preRotation;
|
||||
qCDebug(modelformat) << " rotation" << joint.rotation;
|
||||
qCDebug(modelformat) << " postRotation" << joint.postRotation;
|
||||
qCDebug(modelformat) << " postTransform" << joint.postTransform;
|
||||
qCDebug(modelformat) << " rotationMin" << joint.rotationMin;
|
||||
qCDebug(modelformat) << " rotationMax" << joint.rotationMax;
|
||||
qCDebug(modelformat) << " inverseDefaultRotation" << joint.inverseDefaultRotation;
|
||||
qCDebug(modelformat) << " inverseBindRotation" << joint.inverseBindRotation;
|
||||
qCDebug(modelformat) << " bindTransformFoundInCluster" << joint.bindTransformFoundInCluster;
|
||||
qCDebug(modelformat) << " bindTransform" << joint.bindTransform;
|
||||
qCDebug(modelformat) << " name" << joint.name;
|
||||
qCDebug(modelformat) << " isSkeletonJoint" << joint.isSkeletonJoint;
|
||||
qCDebug(modelformat) << " geometricOffset" << joint.geometricOffset;
|
||||
qCDebug(modelformat) << "\n";
|
||||
}
|
||||
|
||||
qCDebug(modelformat) << "------------- SkinDeformers ------------";
|
||||
qCDebug(modelformat) << " skinDeformers.size() =" << skinDeformers.size();
|
||||
for(const hfm::SkinDeformer& skinDeformer : skinDeformers) {
|
||||
qCDebug(modelformat) << "------- SkinDeformers (Clusters) -------";
|
||||
for (const hfm::Cluster& cluster : skinDeformer.clusters) {
|
||||
qCDebug(modelformat) << "\n";
|
||||
qCDebug(modelformat) << " jointIndex =" << cluster.jointIndex;
|
||||
qCDebug(modelformat) << " inverseBindMatrix =" << cluster.inverseBindMatrix;
|
||||
qCDebug(modelformat) << "\n";
|
||||
}
|
||||
}
|
||||
qCDebug(modelformat) << "\n";
|
||||
}
|
||||
|
|
|
@ -66,8 +66,6 @@ static const int DRACO_ATTRIBUTE_ORIGINAL_INDEX = DRACO_BEGIN_CUSTOM_HIFI_ATTRIB
|
|||
// High Fidelity Model namespace
|
||||
namespace hfm {
|
||||
|
||||
static const uint32_t UNDEFINED_KEY = (uint32_t)-1;
|
||||
|
||||
/// A single blendshape.
|
||||
class Blendshape {
|
||||
public:
|
||||
|
@ -113,22 +111,19 @@ public:
|
|||
bool isSkeletonJoint;
|
||||
bool bindTransformFoundInCluster;
|
||||
|
||||
|
||||
// geometric offset is applied in local space but does NOT affect children.
|
||||
// TODO: Apply hfm::Joint.geometricOffset to transforms in the model preparation step
|
||||
glm::mat4 geometricOffset;
|
||||
|
||||
// globalTransform is the transform of the joint with all parent transforms applied, plus the geometric offset
|
||||
glm::mat4 localTransform;
|
||||
glm::mat4 globalTransform;
|
||||
bool hasGeometricOffset;
|
||||
glm::vec3 geometricTranslation;
|
||||
glm::quat geometricRotation;
|
||||
glm::vec3 geometricScaling;
|
||||
};
|
||||
|
||||
|
||||
/// A single binding to a joint.
|
||||
class Cluster {
|
||||
public:
|
||||
static const uint32_t INVALID_JOINT_INDEX { (uint32_t)-1 };
|
||||
uint32_t jointIndex { INVALID_JOINT_INDEX };
|
||||
|
||||
int jointIndex;
|
||||
glm::mat4 inverseBindMatrix;
|
||||
Transform inverseBindTransform;
|
||||
};
|
||||
|
@ -160,6 +155,8 @@ public:
|
|||
QVector<int> quadIndices; // original indices from the FBX mesh
|
||||
QVector<int> quadTrianglesIndices; // original indices from the FBX mesh of the quad converted as triangles
|
||||
QVector<int> triangleIndices; // original indices from the FBX mesh
|
||||
|
||||
QString materialID;
|
||||
};
|
||||
|
||||
class Material {
|
||||
|
@ -230,20 +227,11 @@ public:
|
|||
bool needTangentSpace() const;
|
||||
};
|
||||
|
||||
|
||||
/// Simple Triangle List Mesh
|
||||
struct TriangleListMesh {
|
||||
std::vector<glm::vec3> vertices;
|
||||
std::vector<uint32_t> indices;
|
||||
std::vector<glm::ivec2> parts; // Offset in the indices, Number of indices
|
||||
std::vector<Extents> partExtents; // Extents of each part with no transform applied. Same length as parts.
|
||||
};
|
||||
|
||||
/// A single mesh (with optional blendshapes).
|
||||
class Mesh {
|
||||
public:
|
||||
|
||||
std::vector<MeshPart> parts;
|
||||
QVector<MeshPart> parts;
|
||||
|
||||
QVector<glm::vec3> vertices;
|
||||
QVector<glm::vec3> normals;
|
||||
|
@ -251,27 +239,21 @@ public:
|
|||
QVector<glm::vec3> colors;
|
||||
QVector<glm::vec2> texCoords;
|
||||
QVector<glm::vec2> texCoords1;
|
||||
QVector<uint16_t> clusterIndices;
|
||||
QVector<uint16_t> clusterWeights;
|
||||
QVector<int32_t> originalIndices;
|
||||
|
||||
Extents meshExtents; // DEPRECATED (see hfm::Shape::transformedExtents)
|
||||
glm::mat4 modelTransform; // DEPRECATED (see hfm::Joint::globalTransform, hfm::Shape::transform, hfm::Model::joints)
|
||||
QVector<Cluster> clusters;
|
||||
|
||||
// Skinning cluster attributes
|
||||
std::vector<uint16_t> clusterIndices;
|
||||
std::vector<uint16_t> clusterWeights;
|
||||
uint16_t clusterWeightsPerVertex { 0 };
|
||||
Extents meshExtents;
|
||||
glm::mat4 modelTransform;
|
||||
|
||||
// Blendshape attributes
|
||||
QVector<Blendshape> blendshapes;
|
||||
|
||||
// Simple Triangle List Mesh generated during baking
|
||||
hfm::TriangleListMesh triangleListMesh;
|
||||
|
||||
QVector<int32_t> originalIndices; // Original indices of the vertices
|
||||
unsigned int meshIndex; // the order the meshes appeared in the object file
|
||||
|
||||
graphics::MeshPointer _mesh;
|
||||
bool wasCompressed { false };
|
||||
|
||||
};
|
||||
|
||||
/// A single animation frame.
|
||||
|
@ -308,30 +290,6 @@ public:
|
|||
bool shouldInitCollisions() const { return _collisionsConfig.size() > 0; }
|
||||
};
|
||||
|
||||
// A different skinning representation, used by FBXSerializer. We convert this to our graphics-optimized runtime representation contained within the mesh.
|
||||
class SkinCluster {
|
||||
public:
|
||||
std::vector<uint32_t> indices;
|
||||
std::vector<float> weights;
|
||||
};
|
||||
|
||||
class SkinDeformer {
|
||||
public:
|
||||
std::vector<Cluster> clusters;
|
||||
};
|
||||
|
||||
// The lightweight model part description.
|
||||
class Shape {
|
||||
public:
|
||||
uint32_t mesh { UNDEFINED_KEY };
|
||||
uint32_t meshPart { UNDEFINED_KEY };
|
||||
uint32_t material { UNDEFINED_KEY };
|
||||
uint32_t joint { UNDEFINED_KEY }; // The hfm::Joint associated with this shape, containing transform information
|
||||
// TODO: Have all serializers calculate hfm::Shape::transformedExtents in world space where they previously calculated hfm::Mesh::meshExtents. Change all code that uses hfm::Mesh::meshExtents to use this instead.
|
||||
Extents transformedExtents; // The precise extents of the meshPart vertices in world space, after transform information is applied, while not taking into account rigging/skinning
|
||||
uint32_t skinDeformer { UNDEFINED_KEY };
|
||||
};
|
||||
|
||||
/// The runtime model format.
|
||||
class Model {
|
||||
public:
|
||||
|
@ -342,18 +300,15 @@ public:
|
|||
QString author;
|
||||
QString applicationName; ///< the name of the application that generated the model
|
||||
|
||||
std::vector<Shape> shapes;
|
||||
|
||||
std::vector<Mesh> meshes;
|
||||
std::vector<Material> materials;
|
||||
|
||||
std::vector<SkinDeformer> skinDeformers;
|
||||
|
||||
std::vector<Joint> joints;
|
||||
QVector<Joint> joints;
|
||||
QHash<QString, int> jointIndices; ///< 1-based, so as to more easily detect missing indices
|
||||
bool hasSkeletonJoints;
|
||||
|
||||
QVector<Mesh> meshes;
|
||||
QVector<QString> scripts;
|
||||
|
||||
QHash<QString, Material> materials;
|
||||
|
||||
glm::mat4 offset; // This includes offset, rotation, and scale as specified by the FST file
|
||||
|
||||
glm::vec3 neckPivot;
|
||||
|
@ -385,12 +340,19 @@ public:
|
|||
QMap<int, glm::quat> jointRotationOffsets;
|
||||
std::vector<ShapeVertices> shapeVertices;
|
||||
FlowData flowData;
|
||||
|
||||
void debugDump() const;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
class ExtractedMesh {
|
||||
public:
|
||||
hfm::Mesh mesh;
|
||||
QMultiHash<int, int> newIndices;
|
||||
QVector<QHash<int, int> > blendshapeIndexMaps;
|
||||
QVector<QPair<int, int> > partMaterialTextures;
|
||||
QHash<QString, size_t> texcoordSetMap;
|
||||
};
|
||||
|
||||
typedef hfm::Blendshape HFMBlendshape;
|
||||
typedef hfm::JointShapeInfo HFMJointShapeInfo;
|
||||
typedef hfm::Joint HFMJoint;
|
||||
|
@ -399,10 +361,8 @@ typedef hfm::Texture HFMTexture;
|
|||
typedef hfm::MeshPart HFMMeshPart;
|
||||
typedef hfm::Material HFMMaterial;
|
||||
typedef hfm::Mesh HFMMesh;
|
||||
typedef hfm::SkinDeformer HFMSkinDeformer;
|
||||
typedef hfm::AnimationFrame HFMAnimationFrame;
|
||||
typedef hfm::Light HFMLight;
|
||||
typedef hfm::Shape HFMShape;
|
||||
typedef hfm::Model HFMModel;
|
||||
typedef hfm::FlowData FlowData;
|
||||
|
||||
|
|
|
@ -1,212 +0,0 @@
|
|||
//
|
||||
// HFMModelMath.cpp
|
||||
// model-baker/src/model-baker
|
||||
//
|
||||
// Created by Sabrina Shanman on 2019/10/04.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "HFMModelMath.h"
|
||||
|
||||
#include <LogHandler.h>
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include <GLMHelpers.h>
|
||||
#include <glm/gtx/hash.hpp>
|
||||
|
||||
namespace hfm {
|
||||
|
||||
void forEachIndex(const hfm::MeshPart& meshPart, std::function<void(uint32_t)> func) {
|
||||
for (int i = 0; i <= meshPart.quadIndices.size() - 4; i += 4) {
|
||||
func((uint32_t)meshPart.quadIndices[i]);
|
||||
func((uint32_t)meshPart.quadIndices[i+1]);
|
||||
func((uint32_t)meshPart.quadIndices[i+2]);
|
||||
func((uint32_t)meshPart.quadIndices[i+3]);
|
||||
}
|
||||
for (int i = 0; i <= meshPart.triangleIndices.size() - 3; i += 3) {
|
||||
func((uint32_t)meshPart.triangleIndices[i]);
|
||||
func((uint32_t)meshPart.triangleIndices[i+1]);
|
||||
func((uint32_t)meshPart.triangleIndices[i+2]);
|
||||
}
|
||||
}
|
||||
|
||||
void thickenFlatExtents(Extents& extents) {
|
||||
// Add epsilon to extents to compensate for flat plane
|
||||
extents.minimum -= glm::vec3(EPSILON, EPSILON, EPSILON);
|
||||
extents.maximum += glm::vec3(EPSILON, EPSILON, EPSILON);
|
||||
}
|
||||
|
||||
void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh) {
|
||||
triangleListMesh.partExtents.resize(triangleListMesh.parts.size());
|
||||
for (size_t partIndex = 0; partIndex < triangleListMesh.parts.size(); ++partIndex) {
|
||||
const auto& part = triangleListMesh.parts[partIndex];
|
||||
auto& extents = triangleListMesh.partExtents[partIndex];
|
||||
int partEnd = part.x + part.y;
|
||||
for (int i = part.x; i < partEnd; ++i) {
|
||||
auto index = triangleListMesh.indices[i];
|
||||
const auto& position = triangleListMesh.vertices[index];
|
||||
extents.addPoint(position);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void calculateExtentsForShape(hfm::Shape& shape, const std::vector<hfm::TriangleListMesh>& triangleListMeshes, const std::vector<hfm::Joint>& joints) {
|
||||
auto& shapeExtents = shape.transformedExtents;
|
||||
shapeExtents.reset();
|
||||
|
||||
const auto& triangleListMesh = triangleListMeshes[shape.mesh];
|
||||
const auto& partExtent = triangleListMesh.partExtents[shape.meshPart];
|
||||
|
||||
const glm::mat4& transform = joints[shape.joint].transform;
|
||||
shapeExtents = partExtent;
|
||||
shapeExtents.transform(transform);
|
||||
|
||||
thickenFlatExtents(shapeExtents);
|
||||
}
|
||||
|
||||
void calculateExtentsForModel(Extents& modelExtents, const std::vector<hfm::Shape>& shapes) {
|
||||
modelExtents.reset();
|
||||
|
||||
for (size_t i = 0; i < shapes.size(); ++i) {
|
||||
const auto& shape = shapes[i];
|
||||
const auto& shapeExtents = shape.transformedExtents;
|
||||
modelExtents.addExtents(shapeExtents);
|
||||
}
|
||||
}
|
||||
|
||||
ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector<hfm::SkinCluster> skinClusters, const uint16_t weightsPerVertex) {
|
||||
ReweightedDeformers reweightedDeformers;
|
||||
if (skinClusters.size() == 0) {
|
||||
return reweightedDeformers;
|
||||
}
|
||||
|
||||
size_t numClusterIndices = numMeshVertices * weightsPerVertex;
|
||||
reweightedDeformers.indices.resize(numClusterIndices, (uint16_t)(skinClusters.size() - 1));
|
||||
reweightedDeformers.weights.resize(numClusterIndices, 0);
|
||||
reweightedDeformers.weightsPerVertex = weightsPerVertex;
|
||||
|
||||
std::vector<float> weightAccumulators;
|
||||
weightAccumulators.resize(numClusterIndices, 0.0f);
|
||||
for (uint16_t i = 0; i < (uint16_t)skinClusters.size(); ++i) {
|
||||
const hfm::SkinCluster& skinCluster = skinClusters[i];
|
||||
|
||||
if (skinCluster.indices.size() != skinCluster.weights.size()) {
|
||||
reweightedDeformers.trimmedToMatch = true;
|
||||
}
|
||||
size_t numIndicesOrWeights = std::min(skinCluster.indices.size(), skinCluster.weights.size());
|
||||
for (size_t j = 0; j < numIndicesOrWeights; ++j) {
|
||||
uint32_t index = skinCluster.indices[j];
|
||||
float weight = skinCluster.weights[j];
|
||||
|
||||
// look for an unused slot in the weights vector
|
||||
uint32_t weightIndex = index * weightsPerVertex;
|
||||
uint32_t lowestIndex = -1;
|
||||
float lowestWeight = FLT_MAX;
|
||||
uint16_t k = 0;
|
||||
for (; k < weightsPerVertex; k++) {
|
||||
if (weightAccumulators[weightIndex + k] == 0.0f) {
|
||||
reweightedDeformers.indices[weightIndex + k] = i;
|
||||
weightAccumulators[weightIndex + k] = weight;
|
||||
break;
|
||||
}
|
||||
if (weightAccumulators[weightIndex + k] < lowestWeight) {
|
||||
lowestIndex = k;
|
||||
lowestWeight = weightAccumulators[weightIndex + k];
|
||||
}
|
||||
}
|
||||
if (k == weightsPerVertex && weight > lowestWeight) {
|
||||
// no space for an additional weight; we must replace the lowest
|
||||
weightAccumulators[weightIndex + lowestIndex] = weight;
|
||||
reweightedDeformers.indices[weightIndex + lowestIndex] = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now that we've accumulated the most relevant weights for each vertex
|
||||
// normalize and compress to 16-bits
|
||||
for (size_t i = 0; i < numMeshVertices; ++i) {
|
||||
size_t j = i * weightsPerVertex;
|
||||
|
||||
// normalize weights into uint16_t
|
||||
float totalWeight = 0.0f;
|
||||
for (size_t k = j; k < j + weightsPerVertex; ++k) {
|
||||
totalWeight += weightAccumulators[k];
|
||||
}
|
||||
|
||||
const float ALMOST_HALF = 0.499f;
|
||||
if (totalWeight > 0.0f) {
|
||||
float weightScalingFactor = (float)(UINT16_MAX) / totalWeight;
|
||||
for (size_t k = j; k < j + weightsPerVertex; ++k) {
|
||||
reweightedDeformers.weights[k] = (uint16_t)(weightScalingFactor * weightAccumulators[k] + ALMOST_HALF);
|
||||
}
|
||||
} else {
|
||||
reweightedDeformers.weights[j] = (uint16_t)((float)(UINT16_MAX) + ALMOST_HALF);
|
||||
}
|
||||
}
|
||||
|
||||
return reweightedDeformers;
|
||||
}
|
||||
|
||||
const TriangleListMesh generateTriangleListMesh(const std::vector<glm::vec3>& srcVertices, const std::vector<HFMMeshPart>& srcParts) {
|
||||
|
||||
TriangleListMesh dest;
|
||||
|
||||
// copy vertices for now
|
||||
dest.vertices = srcVertices;
|
||||
|
||||
std::vector<uint32_t> oldToNewIndex(srcVertices.size());
|
||||
{
|
||||
std::unordered_map<glm::vec3, uint32_t> uniqueVertexToNewIndex;
|
||||
int oldIndex = 0;
|
||||
int newIndex = 0;
|
||||
for (const auto& srcVertex : srcVertices) {
|
||||
auto foundIndex = uniqueVertexToNewIndex.find(srcVertex);
|
||||
if (foundIndex != uniqueVertexToNewIndex.end()) {
|
||||
oldToNewIndex[oldIndex] = foundIndex->second;
|
||||
} else {
|
||||
uniqueVertexToNewIndex[srcVertex] = newIndex;
|
||||
oldToNewIndex[oldIndex] = newIndex;
|
||||
dest.vertices[newIndex] = srcVertex;
|
||||
++newIndex;
|
||||
}
|
||||
++oldIndex;
|
||||
}
|
||||
if (uniqueVertexToNewIndex.size() < srcVertices.size()) {
|
||||
dest.vertices.resize(uniqueVertexToNewIndex.size());
|
||||
dest.vertices.shrink_to_fit();
|
||||
}
|
||||
}
|
||||
|
||||
auto newIndicesCount = 0;
|
||||
for (const auto& part : srcParts) {
|
||||
newIndicesCount += part.triangleIndices.size() + part.quadTrianglesIndices.size();
|
||||
}
|
||||
|
||||
{
|
||||
dest.indices.resize(newIndicesCount);
|
||||
int i = 0;
|
||||
for (const auto& part : srcParts) {
|
||||
glm::ivec2 spart(i, 0);
|
||||
for (const auto& qti : part.quadTrianglesIndices) {
|
||||
dest.indices[i] = oldToNewIndex[qti];
|
||||
++i;
|
||||
}
|
||||
for (const auto& ti : part.triangleIndices) {
|
||||
dest.indices[i] = oldToNewIndex[ti];
|
||||
++i;
|
||||
}
|
||||
spart.y = i - spart.x;
|
||||
dest.parts.push_back(spart);
|
||||
}
|
||||
}
|
||||
|
||||
calculateExtentsForTriangleListMesh(dest);
|
||||
|
||||
return dest;
|
||||
}
|
||||
|
||||
};
|
|
@ -1,45 +0,0 @@
|
|||
//
|
||||
// HFMModelMath.h
|
||||
// model-baker/src/model-baker
|
||||
//
|
||||
// Created by Sabrina Shanman on 2019/10/04.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_hfm_ModelMath_h
|
||||
#define hifi_hfm_ModelMath_h
|
||||
|
||||
#include "HFM.h"
|
||||
|
||||
namespace hfm {
|
||||
|
||||
void forEachIndex(const hfm::MeshPart& meshPart, std::function<void(uint32_t)> func);
|
||||
|
||||
void initializeExtents(Extents& extents);
|
||||
|
||||
void calculateExtentsForTriangleListMesh(TriangleListMesh& triangleListMesh);
|
||||
|
||||
// This can't be moved to model-baker until
|
||||
void calculateExtentsForShape(hfm::Shape& shape, const std::vector<hfm::TriangleListMesh>& triangleListMeshes, const std::vector<hfm::Joint>& joints);
|
||||
|
||||
void calculateExtentsForModel(Extents& modelExtents, const std::vector<hfm::Shape>& shapes);
|
||||
|
||||
struct ReweightedDeformers {
|
||||
std::vector<uint16_t> indices;
|
||||
std::vector<uint16_t> weights;
|
||||
uint16_t weightsPerVertex { 0 };
|
||||
bool trimmedToMatch { false };
|
||||
};
|
||||
|
||||
const uint16_t DEFAULT_SKINNING_WEIGHTS_PER_VERTEX = 4;
|
||||
|
||||
ReweightedDeformers getReweightedDeformers(const size_t numMeshVertices, const std::vector<hfm::SkinCluster> skinClusters, const uint16_t weightsPerVertex = DEFAULT_SKINNING_WEIGHTS_PER_VERTEX);
|
||||
|
||||
const TriangleListMesh generateTriangleListMesh(const std::vector<glm::vec3>& srcVertices, const std::vector<HFMMeshPart>& srcParts);
|
||||
|
||||
};
|
||||
|
||||
#endif // #define hifi_hfm_ModelMath_h
|
|
@ -1,5 +1,5 @@
|
|||
//
|
||||
// HFMSerializer.h
|
||||
// FBXSerializer.h
|
||||
// libraries/hfm/src/hfm
|
||||
//
|
||||
// Created by Sabrina Shanman on 2018/11/07.
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace TextureUsage {
|
|||
/**jsdoc
|
||||
* <p>Describes the type of texture.</p>
|
||||
* <p>See also: {@link Material} and
|
||||
* {@link https://docs.projectathena.dev/create/3d-models/pbr-materials-guide.html|PBR Materials Guide}.</p>
|
||||
* {@link https://docs.vircadia.dev/create/3d-models/pbr-materials-guide.html|PBR Materials Guide}.</p>
|
||||
* <table>
|
||||
* <thead>
|
||||
* <tr><th>Value</th><th>Name</th><th>Description</th></tr>
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define khronos_khr_hpp
|
||||
|
||||
#include <unordered_map>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace khronos {
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
/**jsdoc
|
||||
* The <code>Midi</code> API provides the ability to connect Interface with musical instruments and other external or virtual
|
||||
* devices via the MIDI protocol. For further information and examples, see the tutorial:
|
||||
* <a href="https://docs.projectathena.dev/script/midi-tutorial.html">Use MIDI to Control Your Environment</a>.
|
||||
* <a href="https://docs.vircadia.dev/script/midi-tutorial.html">Use MIDI to Control Your Environment</a>.
|
||||
*
|
||||
* <p><strong>Note:</strong> Only works on Windows.</p>
|
||||
*
|
||||
|
|
|
@ -13,61 +13,34 @@
|
|||
|
||||
#include "BakerTypes.h"
|
||||
#include "ModelMath.h"
|
||||
#include "CollectShapeVerticesTask.h"
|
||||
#include "BuildGraphicsMeshTask.h"
|
||||
#include "CalculateMeshNormalsTask.h"
|
||||
#include "CalculateMeshTangentsTask.h"
|
||||
#include "CalculateBlendshapeNormalsTask.h"
|
||||
#include "CalculateBlendshapeTangentsTask.h"
|
||||
#include "PrepareJointsTask.h"
|
||||
#include "CalculateTransformedExtentsTask.h"
|
||||
#include "BuildDracoMeshTask.h"
|
||||
#include "ParseFlowDataTask.h"
|
||||
#include <hfm/HFMModelMath.h>
|
||||
|
||||
namespace baker {
|
||||
|
||||
class GetModelPartsTask {
|
||||
public:
|
||||
using Input = hfm::Model::Pointer;
|
||||
using Output = VaryingSet9<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector<hfm::Joint>, std::vector<hfm::Shape>, std::vector<hfm::SkinDeformer>, Extents, std::vector<hfm::Material>>;
|
||||
using Output = VaryingSet5<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::BlendshapesPerMesh, std::vector<hfm::Joint>>;
|
||||
using JobModel = Job::ModelIO<GetModelPartsTask, Input, Output>;
|
||||
|
||||
void run(const BakeContextPointer& context, const Input& input, Output& output) {
|
||||
const auto& hfmModelIn = input;
|
||||
output.edit0() = hfmModelIn->meshes;
|
||||
output.edit0() = hfmModelIn->meshes.toStdVector();
|
||||
output.edit1() = hfmModelIn->originalURL;
|
||||
output.edit2() = hfmModelIn->meshIndicesToModelNames;
|
||||
auto& blendshapesPerMesh = output.edit3();
|
||||
blendshapesPerMesh.reserve(hfmModelIn->meshes.size());
|
||||
for (size_t i = 0; i < hfmModelIn->meshes.size(); i++) {
|
||||
for (int i = 0; i < hfmModelIn->meshes.size(); i++) {
|
||||
blendshapesPerMesh.push_back(hfmModelIn->meshes[i].blendshapes.toStdVector());
|
||||
}
|
||||
output.edit4() = hfmModelIn->joints;
|
||||
output.edit5() = hfmModelIn->shapes;
|
||||
output.edit6() = hfmModelIn->skinDeformers;
|
||||
output.edit7() = hfmModelIn->meshExtents;
|
||||
output.edit8() = hfmModelIn->materials;
|
||||
}
|
||||
};
|
||||
|
||||
class BuildMeshTriangleListTask {
|
||||
public:
|
||||
using Input = std::vector<hfm::Mesh>;
|
||||
using Output = std::vector<hfm::TriangleListMesh>;
|
||||
using JobModel = Job::ModelIO<BuildMeshTriangleListTask, Input, Output>;
|
||||
|
||||
void run(const BakeContextPointer& context, const Input& input, Output& output) {
|
||||
const auto& meshesIn = input;
|
||||
auto& indexedTrianglesMeshOut = output;
|
||||
indexedTrianglesMeshOut.clear();
|
||||
indexedTrianglesMeshOut.resize(meshesIn.size());
|
||||
|
||||
for (size_t i = 0; i < meshesIn.size(); i++) {
|
||||
auto& mesh = meshesIn[i];
|
||||
const auto verticesStd = mesh.vertices.toStdVector();
|
||||
indexedTrianglesMeshOut[i] = hfm::generateTriangleListMesh(verticesStd, mesh.parts);
|
||||
}
|
||||
output.edit4() = hfmModelIn->joints.toStdVector();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -102,23 +75,21 @@ namespace baker {
|
|||
|
||||
class BuildMeshesTask {
|
||||
public:
|
||||
using Input = VaryingSet6<std::vector<hfm::Mesh>, std::vector<hfm::TriangleListMesh>, std::vector<graphics::MeshPointer>, NormalsPerMesh, TangentsPerMesh, BlendshapesPerMesh>;
|
||||
using Input = VaryingSet5<std::vector<hfm::Mesh>, std::vector<graphics::MeshPointer>, NormalsPerMesh, TangentsPerMesh, BlendshapesPerMesh>;
|
||||
using Output = std::vector<hfm::Mesh>;
|
||||
using JobModel = Job::ModelIO<BuildMeshesTask, Input, Output>;
|
||||
|
||||
void run(const BakeContextPointer& context, const Input& input, Output& output) {
|
||||
auto& meshesIn = input.get0();
|
||||
int numMeshes = (int)meshesIn.size();
|
||||
auto& triangleListMeshesIn = input.get1();
|
||||
auto& graphicsMeshesIn = input.get2();
|
||||
auto& normalsPerMeshIn = input.get3();
|
||||
auto& tangentsPerMeshIn = input.get4();
|
||||
auto& blendshapesPerMeshIn = input.get5();
|
||||
auto& graphicsMeshesIn = input.get1();
|
||||
auto& normalsPerMeshIn = input.get2();
|
||||
auto& tangentsPerMeshIn = input.get3();
|
||||
auto& blendshapesPerMeshIn = input.get4();
|
||||
|
||||
auto meshesOut = meshesIn;
|
||||
for (int i = 0; i < numMeshes; i++) {
|
||||
auto& meshOut = meshesOut[i];
|
||||
meshOut.triangleListMesh = triangleListMeshesIn[i];
|
||||
meshOut._mesh = safeGet(graphicsMeshesIn, i);
|
||||
meshOut.normals = QVector<glm::vec3>::fromStdVector(safeGet(normalsPerMeshIn, i));
|
||||
meshOut.tangents = QVector<glm::vec3>::fromStdVector(safeGet(tangentsPerMeshIn, i));
|
||||
|
@ -130,22 +101,17 @@ namespace baker {
|
|||
|
||||
class BuildModelTask {
|
||||
public:
|
||||
using Input = VaryingSet9<hfm::Model::Pointer, std::vector<hfm::Mesh>, std::vector<hfm::Joint>, QMap<int, glm::quat>, QHash<QString, int>, FlowData, std::vector<ShapeVertices>, std::vector<hfm::Shape>, Extents>;
|
||||
using Input = VaryingSet6<hfm::Model::Pointer, std::vector<hfm::Mesh>, std::vector<hfm::Joint>, QMap<int, glm::quat>, QHash<QString, int>, FlowData>;
|
||||
using Output = hfm::Model::Pointer;
|
||||
using JobModel = Job::ModelIO<BuildModelTask, Input, Output>;
|
||||
|
||||
void run(const BakeContextPointer& context, const Input& input, Output& output) {
|
||||
auto hfmModelOut = input.get0();
|
||||
hfmModelOut->meshes = input.get1();
|
||||
hfmModelOut->joints = input.get2();
|
||||
hfmModelOut->meshes = QVector<hfm::Mesh>::fromStdVector(input.get1());
|
||||
hfmModelOut->joints = QVector<hfm::Joint>::fromStdVector(input.get2());
|
||||
hfmModelOut->jointRotationOffsets = input.get3();
|
||||
hfmModelOut->jointIndices = input.get4();
|
||||
hfmModelOut->flowData = input.get5();
|
||||
hfmModelOut->shapeVertices = input.get6();
|
||||
hfmModelOut->shapes = input.get7();
|
||||
hfmModelOut->meshExtents = input.get8();
|
||||
// These depend on the ShapeVertices
|
||||
// TODO: Create a task for this rather than calculating it here
|
||||
hfmModelOut->computeKdops();
|
||||
output = hfmModelOut;
|
||||
}
|
||||
|
@ -168,10 +134,6 @@ namespace baker {
|
|||
const auto meshIndicesToModelNames = modelPartsIn.getN<GetModelPartsTask::Output>(2);
|
||||
const auto blendshapesPerMeshIn = modelPartsIn.getN<GetModelPartsTask::Output>(3);
|
||||
const auto jointsIn = modelPartsIn.getN<GetModelPartsTask::Output>(4);
|
||||
const auto shapesIn = modelPartsIn.getN<GetModelPartsTask::Output>(5);
|
||||
const auto skinDeformersIn = modelPartsIn.getN<GetModelPartsTask::Output>(6);
|
||||
const auto modelExtentsIn = modelPartsIn.getN<GetModelPartsTask::Output>(7);
|
||||
const auto materialsIn = modelPartsIn.getN<GetModelPartsTask::Output>(8);
|
||||
|
||||
// Calculate normals and tangents for meshes and blendshapes if they do not exist
|
||||
// Note: Normals are never calculated here for OBJ models. OBJ files optionally define normals on a per-face basis, so for consistency normals are calculated beforehand in OBJSerializer.
|
||||
|
@ -183,15 +145,8 @@ namespace baker {
|
|||
const auto calculateBlendshapeTangentsInputs = CalculateBlendshapeTangentsTask::Input(normalsPerBlendshapePerMesh, blendshapesPerMeshIn, meshesIn).asVarying();
|
||||
const auto tangentsPerBlendshapePerMesh = model.addJob<CalculateBlendshapeTangentsTask>("CalculateBlendshapeTangents", calculateBlendshapeTangentsInputs);
|
||||
|
||||
// Calculate shape vertices. These rely on the weight-normalized clusterIndices/clusterWeights in the mesh, and are used later for computing the joint kdops
|
||||
const auto collectShapeVerticesInputs = CollectShapeVerticesTask::Input(meshesIn, shapesIn, jointsIn, skinDeformersIn).asVarying();
|
||||
const auto shapeVerticesPerJoint = model.addJob<CollectShapeVerticesTask>("CollectShapeVertices", collectShapeVerticesInputs);
|
||||
|
||||
// Build the slim triangle list mesh for each hfm::mesh
|
||||
const auto triangleListMeshes = model.addJob<BuildMeshTriangleListTask>("BuildMeshTriangleListTask", meshesIn);
|
||||
|
||||
// Build the graphics::MeshPointer for each hfm::Mesh
|
||||
const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh, shapesIn, skinDeformersIn).asVarying();
|
||||
const auto buildGraphicsMeshInputs = BuildGraphicsMeshTask::Input(meshesIn, url, meshIndicesToModelNames, normalsPerMesh, tangentsPerMesh).asVarying();
|
||||
const auto graphicsMeshes = model.addJob<BuildGraphicsMeshTask>("BuildGraphicsMesh", buildGraphicsMeshInputs);
|
||||
|
||||
// Prepare joint information
|
||||
|
@ -201,12 +156,6 @@ namespace baker {
|
|||
const auto jointRotationOffsets = jointInfoOut.getN<PrepareJointsTask::Output>(1);
|
||||
const auto jointIndices = jointInfoOut.getN<PrepareJointsTask::Output>(2);
|
||||
|
||||
// Use transform information to compute extents
|
||||
const auto calculateExtentsInputs = CalculateTransformedExtentsTask::Input(modelExtentsIn, triangleListMeshes, shapesIn, jointsOut).asVarying();
|
||||
const auto calculateExtentsOutputs = model.addJob<CalculateTransformedExtentsTask>("CalculateExtents", calculateExtentsInputs);
|
||||
const auto modelExtentsOut = calculateExtentsOutputs.getN<CalculateTransformedExtentsTask::Output>(0);
|
||||
const auto shapesOut = calculateExtentsOutputs.getN<CalculateTransformedExtentsTask::Output>(1);
|
||||
|
||||
// Parse material mapping
|
||||
const auto parseMaterialMappingInputs = ParseMaterialMappingTask::Input(mapping, materialMappingBaseURL).asVarying();
|
||||
const auto materialMapping = model.addJob<ParseMaterialMappingTask>("ParseMaterialMapping", parseMaterialMappingInputs);
|
||||
|
@ -216,7 +165,7 @@ namespace baker {
|
|||
// TODO: Tangent support (Needs changes to FBXSerializer_Mesh as well)
|
||||
// NOTE: Due to an unresolved linker error, BuildDracoMeshTask is not functional on Android
|
||||
// TODO: Figure out why BuildDracoMeshTask.cpp won't link with draco on Android
|
||||
const auto buildDracoMeshInputs = BuildDracoMeshTask::Input(shapesOut, meshesIn, materialsIn, normalsPerMesh, tangentsPerMesh).asVarying();
|
||||
const auto buildDracoMeshInputs = BuildDracoMeshTask::Input(meshesIn, normalsPerMesh, tangentsPerMesh).asVarying();
|
||||
const auto buildDracoMeshOutputs = model.addJob<BuildDracoMeshTask>("BuildDracoMesh", buildDracoMeshInputs);
|
||||
const auto dracoMeshes = buildDracoMeshOutputs.getN<BuildDracoMeshTask::Output>(0);
|
||||
const auto dracoErrors = buildDracoMeshOutputs.getN<BuildDracoMeshTask::Output>(1);
|
||||
|
@ -228,9 +177,9 @@ namespace baker {
|
|||
// Combine the outputs into a new hfm::Model
|
||||
const auto buildBlendshapesInputs = BuildBlendshapesTask::Input(blendshapesPerMeshIn, normalsPerBlendshapePerMesh, tangentsPerBlendshapePerMesh).asVarying();
|
||||
const auto blendshapesPerMeshOut = model.addJob<BuildBlendshapesTask>("BuildBlendshapes", buildBlendshapesInputs);
|
||||
const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, triangleListMeshes, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying();
|
||||
const auto buildMeshesInputs = BuildMeshesTask::Input(meshesIn, graphicsMeshes, normalsPerMesh, tangentsPerMesh, blendshapesPerMeshOut).asVarying();
|
||||
const auto meshesOut = model.addJob<BuildMeshesTask>("BuildMeshes", buildMeshesInputs);
|
||||
const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData, shapeVerticesPerJoint, shapesOut, modelExtentsOut).asVarying();
|
||||
const auto buildModelInputs = BuildModelTask::Input(hfmModelIn, meshesOut, jointsOut, jointRotationOffsets, jointIndices, flowData).asVarying();
|
||||
const auto hfmModelOut = model.addJob<BuildModelTask>("BuildModel", buildModelInputs);
|
||||
|
||||
output = Output(hfmModelOut, materialMapping, dracoMeshes, dracoErrors, materialList);
|
||||
|
|
|
@ -36,14 +36,6 @@ namespace baker {
|
|||
using TangentsPerBlendshape = std::vector<std::vector<glm::vec3>>;
|
||||
|
||||
using MeshIndicesToModelNames = QHash<int, QString>;
|
||||
|
||||
class ReweightedDeformers {
|
||||
public:
|
||||
std::vector<uint16_t> indices;
|
||||
std::vector<uint16_t> weights;
|
||||
uint16_t weightsPerVertex { 0 };
|
||||
bool trimmedToMatch { false };
|
||||
};
|
||||
};
|
||||
|
||||
#endif // hifi_BakerTypes_h
|
||||
|
|
|
@ -39,47 +39,19 @@
|
|||
#include "ModelMath.h"
|
||||
|
||||
#ifndef Q_OS_ANDROID
|
||||
|
||||
void reindexMaterials(const std::vector<uint32_t>& originalMaterialIndices, std::vector<uint32_t>& materials, std::vector<uint16_t>& materialIndices) {
|
||||
materialIndices.resize(originalMaterialIndices.size());
|
||||
for (size_t i = 0; i < originalMaterialIndices.size(); ++i) {
|
||||
uint32_t material = originalMaterialIndices[i];
|
||||
auto foundMaterial = std::find(materials.cbegin(), materials.cend(), material);
|
||||
if (foundMaterial == materials.cend()) {
|
||||
materials.push_back(material);
|
||||
materialIndices[i] = (uint16_t)(materials.size() - 1);
|
||||
} else {
|
||||
materialIndices[i] = (uint16_t)(foundMaterial - materials.cbegin());
|
||||
std::vector<hifi::ByteArray> createMaterialList(const hfm::Mesh& mesh) {
|
||||
std::vector<hifi::ByteArray> materialList;
|
||||
for (const auto& meshPart : mesh.parts) {
|
||||
auto materialID = QVariant(meshPart.materialID).toByteArray();
|
||||
const auto materialIt = std::find(materialList.cbegin(), materialList.cend(), materialID);
|
||||
if (materialIt == materialList.cend()) {
|
||||
materialList.push_back(materialID);
|
||||
}
|
||||
}
|
||||
return materialList;
|
||||
}
|
||||
|
||||
void createMaterialLists(const std::vector<hfm::Shape>& shapes, const std::vector<hfm::Mesh>& meshes, const std::vector<hfm::Material>& hfmMaterials, std::vector<std::vector<hifi::ByteArray>>& materialIndexLists, std::vector<std::vector<uint16_t>>& partMaterialIndicesPerMesh) {
|
||||
std::vector<std::vector<uint32_t>> materialsPerMesh;
|
||||
for (const auto& mesh : meshes) {
|
||||
materialsPerMesh.emplace_back(mesh.parts.size(), hfm::UNDEFINED_KEY);
|
||||
}
|
||||
for (const auto& shape : shapes) {
|
||||
materialsPerMesh[shape.mesh][shape.meshPart] = shape.material;
|
||||
}
|
||||
|
||||
materialIndexLists.resize(materialsPerMesh.size());
|
||||
partMaterialIndicesPerMesh.resize(materialsPerMesh.size());
|
||||
for (size_t i = 0; i < materialsPerMesh.size(); ++i) {
|
||||
const std::vector<uint32_t>& materials = materialsPerMesh[i];
|
||||
std::vector<uint32_t> uniqueMaterials;
|
||||
|
||||
reindexMaterials(materials, uniqueMaterials, partMaterialIndicesPerMesh[i]);
|
||||
|
||||
materialIndexLists[i].reserve(uniqueMaterials.size());
|
||||
for (const uint32_t material : uniqueMaterials) {
|
||||
const auto& hfmMaterial = hfmMaterials[material];
|
||||
materialIndexLists[i].push_back(QVariant(hfmMaterial.materialID).toByteArray());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<std::unique_ptr<draco::Mesh>, bool> createDracoMesh(const hfm::Mesh& mesh, const std::vector<glm::vec3>& normals, const std::vector<glm::vec3>& tangents, const std::vector<uint16_t>& partMaterialIndices) {
|
||||
std::tuple<std::unique_ptr<draco::Mesh>, bool> createDracoMesh(const hfm::Mesh& mesh, const std::vector<glm::vec3>& normals, const std::vector<glm::vec3>& tangents, const std::vector<hifi::ByteArray>& materialList) {
|
||||
Q_ASSERT(normals.size() == 0 || (int)normals.size() == mesh.vertices.size());
|
||||
Q_ASSERT(mesh.colors.size() == 0 || mesh.colors.size() == mesh.vertices.size());
|
||||
Q_ASSERT(mesh.texCoords.size() == 0 || mesh.texCoords.size() == mesh.vertices.size());
|
||||
|
@ -150,9 +122,11 @@ std::tuple<std::unique_ptr<draco::Mesh>, bool> createDracoMesh(const hfm::Mesh&
|
|||
|
||||
auto partIndex = 0;
|
||||
draco::FaceIndex face;
|
||||
uint16_t materialID;
|
||||
|
||||
for (auto& part : mesh.parts) {
|
||||
uint16_t materialID = partMaterialIndices[partIndex];
|
||||
auto materialIt = std::find(materialList.cbegin(), materialList.cend(), QVariant(part.materialID).toByteArray());
|
||||
materialID = (uint16_t)(materialIt - materialList.cbegin());
|
||||
|
||||
auto addFace = [&](const QVector<int>& indices, int index, draco::FaceIndex face) {
|
||||
int32_t idx0 = indices[index];
|
||||
|
@ -240,33 +214,30 @@ void BuildDracoMeshTask::run(const baker::BakeContextPointer& context, const Inp
|
|||
#ifdef Q_OS_ANDROID
|
||||
qCWarning(model_baker) << "BuildDracoMesh is disabled on Android. Output meshes will be empty.";
|
||||
#else
|
||||
const auto& shapes = input.get0();
|
||||
const auto& meshes = input.get1();
|
||||
const auto& materials = input.get2();
|
||||
const auto& normalsPerMesh = input.get3();
|
||||
const auto& tangentsPerMesh = input.get4();
|
||||
const auto& meshes = input.get0();
|
||||
const auto& normalsPerMesh = input.get1();
|
||||
const auto& tangentsPerMesh = input.get2();
|
||||
auto& dracoBytesPerMesh = output.edit0();
|
||||
auto& dracoErrorsPerMesh = output.edit1();
|
||||
|
||||
auto& materialLists = output.edit2();
|
||||
std::vector<std::vector<uint16_t>> partMaterialIndicesPerMesh;
|
||||
createMaterialLists(shapes, meshes, materials, materialLists, partMaterialIndicesPerMesh);
|
||||
|
||||
dracoBytesPerMesh.reserve(meshes.size());
|
||||
// vector<bool> is an exception to the std::vector conventions as it is a bit field
|
||||
// So a bool reference to an element doesn't work
|
||||
dracoErrorsPerMesh.resize(meshes.size());
|
||||
materialLists.reserve(meshes.size());
|
||||
for (size_t i = 0; i < meshes.size(); i++) {
|
||||
const auto& mesh = meshes[i];
|
||||
const auto& normals = baker::safeGet(normalsPerMesh, i);
|
||||
const auto& tangents = baker::safeGet(tangentsPerMesh, i);
|
||||
dracoBytesPerMesh.emplace_back();
|
||||
auto& dracoBytes = dracoBytesPerMesh.back();
|
||||
const auto& partMaterialIndices = partMaterialIndicesPerMesh[i];
|
||||
materialLists.push_back(createMaterialList(mesh));
|
||||
const auto& materialList = materialLists.back();
|
||||
|
||||
bool dracoError;
|
||||
std::unique_ptr<draco::Mesh> dracoMesh;
|
||||
std::tie(dracoMesh, dracoError) = createDracoMesh(mesh, normals, tangents, partMaterialIndices);
|
||||
std::tie(dracoMesh, dracoError) = createDracoMesh(mesh, normals, tangents, materialList);
|
||||
dracoErrorsPerMesh[i] = dracoError;
|
||||
|
||||
if (dracoMesh) {
|
||||
|
|
|
@ -33,7 +33,7 @@ public:
|
|||
class BuildDracoMeshTask {
|
||||
public:
|
||||
using Config = BuildDracoMeshConfig;
|
||||
using Input = baker::VaryingSet5<std::vector<hfm::Shape>, std::vector<hfm::Mesh>, std::vector<hfm::Material>, baker::NormalsPerMesh, baker::TangentsPerMesh>;
|
||||
using Input = baker::VaryingSet3<std::vector<hfm::Mesh>, baker::NormalsPerMesh, baker::TangentsPerMesh>;
|
||||
using Output = baker::VaryingSet3<std::vector<hifi::ByteArray>, std::vector<bool>, std::vector<std::vector<hifi::ByteArray>>>;
|
||||
using JobModel = baker::Job::ModelIO<BuildDracoMeshTask, Input, Output, Config>;
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
// BuildGraphicsMeshTask.h
|
||||
// model-baker/src/model-baker
|
||||
//
|
||||
// Created by Sabrina Shanman on 2019/09/16.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
// Created by Sabrina Shanman on 2018/12/06.
|
||||
// Copyright 2018 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
@ -15,7 +15,6 @@
|
|||
|
||||
#include <LogHandler.h>
|
||||
#include "ModelBakerLogging.h"
|
||||
#include <hfm/HFMModelMath.h>
|
||||
#include "ModelMath.h"
|
||||
|
||||
using vec2h = glm::tvec2<glm::detail::hdata>;
|
||||
|
@ -28,7 +27,7 @@ glm::vec3 normalizeDirForPacking(const glm::vec3& dir) {
|
|||
return dir;
|
||||
}
|
||||
|
||||
void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn, uint16_t numDeformerControllers) {
|
||||
void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphicsMeshPointer, const baker::MeshNormals& meshNormals, const baker::MeshTangents& meshTangentsIn) {
|
||||
auto graphicsMesh = std::make_shared<graphics::Mesh>();
|
||||
|
||||
// Fill tangents with a dummy value to force tangents to be present if there are normals
|
||||
|
@ -87,24 +86,25 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics
|
|||
|
||||
// Support for 4 skinning clusters:
|
||||
// 4 Indices are uint8 ideally, uint16 if more than 256.
|
||||
const auto clusterIndiceElement = ((numDeformerControllers < (uint16_t)UINT8_MAX) ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW));
|
||||
const auto clusterIndiceElement = (hfmMesh.clusters.size() < UINT8_MAX ? gpu::Element(gpu::VEC4, gpu::UINT8, gpu::XYZW) : gpu::Element(gpu::VEC4, gpu::UINT16, gpu::XYZW));
|
||||
// 4 Weights are normalized 16bits
|
||||
const auto clusterWeightElement = gpu::Element(gpu::VEC4, gpu::NUINT16, gpu::XYZW);
|
||||
|
||||
// Record cluster sizes
|
||||
const size_t numVertClusters = hfmMesh.clusterWeightsPerVertex == 0 ? 0 : hfmMesh.clusterIndices.size() / hfmMesh.clusterWeightsPerVertex;
|
||||
const size_t clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize();
|
||||
const size_t clusterWeightsSize = numVertClusters * clusterWeightElement.getSize();
|
||||
// Cluster indices and weights must be the same sizes
|
||||
const int NUM_CLUSTERS_PER_VERT = 4;
|
||||
const int numVertClusters = (hfmMesh.clusterIndices.size() == hfmMesh.clusterWeights.size() ? hfmMesh.clusterIndices.size() / NUM_CLUSTERS_PER_VERT : 0);
|
||||
const int clusterIndicesSize = numVertClusters * clusterIndiceElement.getSize();
|
||||
const int clusterWeightsSize = numVertClusters * clusterWeightElement.getSize();
|
||||
|
||||
// Decide on where to put what seequencially in a big buffer:
|
||||
const size_t positionsOffset = 0;
|
||||
const size_t normalsAndTangentsOffset = positionsOffset + positionsSize;
|
||||
const size_t colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize;
|
||||
const size_t texCoordsOffset = colorsOffset + colorsSize;
|
||||
const size_t texCoords1Offset = texCoordsOffset + texCoordsSize;
|
||||
const size_t clusterIndicesOffset = texCoords1Offset + texCoords1Size;
|
||||
const size_t clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize;
|
||||
const size_t totalVertsSize = clusterWeightsOffset + clusterWeightsSize;
|
||||
const int positionsOffset = 0;
|
||||
const int normalsAndTangentsOffset = positionsOffset + positionsSize;
|
||||
const int colorsOffset = normalsAndTangentsOffset + normalsAndTangentsSize;
|
||||
const int texCoordsOffset = colorsOffset + colorsSize;
|
||||
const int texCoords1Offset = texCoordsOffset + texCoordsSize;
|
||||
const int clusterIndicesOffset = texCoords1Offset + texCoords1Size;
|
||||
const int clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize;
|
||||
const int totalVertsSize = clusterWeightsOffset + clusterWeightsSize;
|
||||
|
||||
// Copy all vertex data in a single buffer
|
||||
auto vertBuffer = std::make_shared<gpu::Buffer>();
|
||||
|
@ -181,22 +181,22 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics
|
|||
|
||||
// Clusters data
|
||||
if (clusterIndicesSize > 0) {
|
||||
if (numDeformerControllers < (uint16_t)UINT8_MAX) {
|
||||
if (hfmMesh.clusters.size() < UINT8_MAX) {
|
||||
// yay! we can fit the clusterIndices within 8-bits
|
||||
int32_t numIndices = (int32_t)hfmMesh.clusterIndices.size();
|
||||
std::vector<uint8_t> packedDeformerIndices;
|
||||
packedDeformerIndices.resize(numIndices);
|
||||
int32_t numIndices = hfmMesh.clusterIndices.size();
|
||||
QVector<uint8_t> clusterIndices;
|
||||
clusterIndices.resize(numIndices);
|
||||
for (int32_t i = 0; i < numIndices; ++i) {
|
||||
assert(hfmMesh.clusterIndices[i] <= UINT8_MAX);
|
||||
packedDeformerIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]);
|
||||
clusterIndices[i] = (uint8_t)(hfmMesh.clusterIndices[i]);
|
||||
}
|
||||
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) packedDeformerIndices.data());
|
||||
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) clusterIndices.constData());
|
||||
} else {
|
||||
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.data());
|
||||
vertBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (const gpu::Byte*) hfmMesh.clusterIndices.constData());
|
||||
}
|
||||
}
|
||||
if (clusterWeightsSize > 0) {
|
||||
vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.data());
|
||||
vertBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (const gpu::Byte*) hfmMesh.clusterWeights.constData());
|
||||
}
|
||||
|
||||
|
||||
|
@ -206,7 +206,7 @@ void buildGraphicsMesh(const hfm::Mesh& hfmMesh, graphics::MeshPointer& graphics
|
|||
auto vertexBufferStream = std::make_shared<gpu::BufferStream>();
|
||||
|
||||
gpu::BufferPointer attribBuffer;
|
||||
size_t totalAttribBufferSize = totalVertsSize;
|
||||
int totalAttribBufferSize = totalVertsSize;
|
||||
gpu::uint8 posChannel = 0;
|
||||
gpu::uint8 tangentChannel = posChannel;
|
||||
gpu::uint8 attribChannel = posChannel;
|
||||
|
@ -377,17 +377,6 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const
|
|||
const auto& meshIndicesToModelNames = input.get2();
|
||||
const auto& normalsPerMesh = input.get3();
|
||||
const auto& tangentsPerMesh = input.get4();
|
||||
const auto& shapes = input.get5();
|
||||
const auto& skinDeformers = input.get6();
|
||||
|
||||
// Currently, there is only (at most) one skinDeformer per mesh
|
||||
// An undefined shape.skinDeformer has the value hfm::UNDEFINED_KEY
|
||||
std::vector<uint32_t> skinDeformerPerMesh;
|
||||
skinDeformerPerMesh.resize(meshes.size(), hfm::UNDEFINED_KEY);
|
||||
for (const auto& shape : shapes) {
|
||||
uint32_t skinDeformerIndex = shape.skinDeformer;
|
||||
skinDeformerPerMesh[shape.mesh] = skinDeformerIndex;
|
||||
}
|
||||
|
||||
auto& graphicsMeshes = output;
|
||||
|
||||
|
@ -395,16 +384,9 @@ void BuildGraphicsMeshTask::run(const baker::BakeContextPointer& context, const
|
|||
for (int i = 0; i < n; i++) {
|
||||
graphicsMeshes.emplace_back();
|
||||
auto& graphicsMesh = graphicsMeshes[i];
|
||||
|
||||
uint16_t numDeformerControllers = 0;
|
||||
uint32_t skinDeformerIndex = skinDeformerPerMesh[i];
|
||||
if (skinDeformerIndex != hfm::UNDEFINED_KEY) {
|
||||
const hfm::SkinDeformer& skinDeformer = skinDeformers[skinDeformerIndex];
|
||||
numDeformerControllers = (uint16_t)skinDeformer.clusters.size();
|
||||
}
|
||||
|
||||
|
||||
// Try to create the graphics::Mesh
|
||||
buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i), numDeformerControllers);
|
||||
buildGraphicsMesh(meshes[i], graphicsMesh, baker::safeGet(normalsPerMesh, i), baker::safeGet(tangentsPerMesh, i));
|
||||
|
||||
// Choose a name for the mesh
|
||||
if (graphicsMesh) {
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
// BuildGraphicsMeshTask.h
|
||||
// model-baker/src/model-baker
|
||||
//
|
||||
// Created by Sabrina Shanman on 2019/09/16.
|
||||
// Copyright 2019 High Fidelity, Inc.
|
||||
// Created by Sabrina Shanman on 2018/12/06.
|
||||
// Copyright 2018 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
|
@ -20,7 +20,7 @@
|
|||
|
||||
class BuildGraphicsMeshTask {
|
||||
public:
|
||||
using Input = baker::VaryingSet7<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh, std::vector<hfm::Shape>, std::vector<hfm::SkinDeformer>>;
|
||||
using Input = baker::VaryingSet5<std::vector<hfm::Mesh>, hifi::URL, baker::MeshIndicesToModelNames, baker::NormalsPerMesh, baker::TangentsPerMesh>;
|
||||
using Output = std::vector<graphics::MeshPointer>;
|
||||
using JobModel = baker::Job::ModelIO<BuildGraphicsMeshTask, Input, Output>;
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue