diff --git a/CODING_STANDARD.md b/CODING_STANDARD.md new file mode 100644 index 0000000000..17f7ecb2f2 --- /dev/null +++ b/CODING_STANDARD.md @@ -0,0 +1,1008 @@ +# Coding Standards + +Note that the current code base does not necessarily follow this with 100% consistency. It will be an ongoing process to try and sanitize the existing code to match these guidelines. + +Basically taken directly from [http://geosoft.no/development/cppstyle.html](http://geosoft.no/development/cppstyle.html) with some subtle changes and omissions. + +## [1] Naming + +### [1.1] General Naming Conventions + +#### [1.1.1] Names representing types must be in mixed case starting with upper case. + +```cpp +Coach, PenaltyBox + +``` + +#### [1.1.2] Private class variables must be in mixed case prefixed with an underscore. + +```cpp +_puck, _team + +``` + +#### [1.1.3] Local variables must be in mixed case (and NOT prefixed with an underscore). + +```cpp +redLine, icingFrequency + +``` + +#### [1.1.4] Constants must be all uppercase using underscore to separate words. + +```cpp +MAX_RINK_LENGTH, COLOR_RED_LINE + +``` + +#### [1.1.5] Methods or functions must be verbs and written in mixed case starting with lower case. + +```cpp +getPlayerNumber(), computeGoalsAgainstAverage() + +``` + +#### [1.1.6] Names representing namespaces should be all lowercase. + +```cpp +puck::geometry, ice::math + +``` + +#### [1.1.7] Names representing template types should be a single uppercase letter. + +```cpp +template, template, template + +``` + +This makes template names stand out relative to all other names used. + +#### [1.1.8] Abbreviations and acronyms must be uppercase when used in a name or lowercase when used at the beginning of a variable + +```cpp +showNHLStandings(); // not showNhlStandings(); +exportASCIIStanleyCup(); // not exportAsciiStanleyCup(); +UDPSocket udpSocket; // not UDPSocket uDPSocket; + +``` + +#### [1.1.9] Global variables should always be referred to using the :: operator. + +```cpp +::jumbotron.powerOn(); +::league.lockout(); + +``` + +#### [1.1.10] Generic variables should have the same name as their type. + +```cpp +void setPuckLogo(Logo* logo) // not void setPuckLogo(Logo* aLogo) + +``` + +These will be discernible from class member variables since they are not prefixed with an underscore. + +#### [1.1.11] All names should be written in English. + +```cpp +int hockeyStick; // NOT: bastonDeHockey + +``` + +#### [1.1.12] The name of the object is implicit, and should be avoided in a method name. + +```cpp +puck.getDensity(); // NOT: puck.getPuckDensity(); + +``` + +### [1.2] Specific Naming Conventions + +#### [1.2.1] The terms get/set must be used where an attribute is accessed directly. + +```cpp +player.getNumber(); +player.setNumber(number); +stick.getFlex(); +stick.setFlex(flex); + +``` + +There is an exception for boolean getters. Naming for boolean attributes should follow [section 1.2.10](https://wiki.highfidelity.com/wiki/Coding_Standards#1-2-10-the-prefix-is-should-be-used-for-boolean-variables-and-methods-). The getter for a boolean attribute does not need to be prefixed with 'get', and should simply match the name of the boolean attribute. The following example is for a member variable `_isCaptain` on the `crosby` object. + +```cpp +crosby.setIsCaptain(true); +crosby.isCaptain(); + +``` + +#### [1.2.2] The term compute can be used in methods where something is computed. + +```cpp +team->computePowerPlayPercentage(); +player->computePointsPerGame(); + +``` + +Give the reader the immediate clue that this is a potentially time-consuming operation, and if used repeatedly, she might consider caching the result. Consistent use of the term enhances readability. + +#### [1.2.3] The term find can be used in methods where something is looked up. + +```cpp +net.findGoalLinePosition(); +team.findHeaviestPlayer(); + +``` + +Give the reader the immediate clue that this is a simple look up method with a minimum of computations involved. Consistent use of the term enhances readability. + +#### [1.2.4] The term initialize can be used where an object or a concept is established. + +``` +rink.initializePaintedLines(); +video.initializeOnScreenScore(); + +``` + +#### [1.2.5] Variables representing GUI components should be suffixed by the component type name. + +```cpp +scoreboardText, mainWindow, fileMenu + +``` + +#### [1.2.6] Plural form should be used on names representing a collection of objects. + +```cpp +std::vector players; +float savePercentages[]; + +``` + +#### [1.2.7] The prefix num should be used for variables representing a number of objects. + +```cpp +numGoals, numAssists + +``` + +#### [1.2.8] The suffix Num should be used for variables representing an entity number. + +```cpp +playerNum, teamNum + +``` + +#### [1.2.9] Iterator variables should be called i, j, k etc. + +```cpp +for (int i = 0; i < numGoals); i++) { + goals[i].playVideo(); +} + +``` + +#### [1.2.10] The prefix is should be used for boolean variables and methods. + +isGoodGoal, isRetired, isWinningTeam Occasionally the has, can, should, and want prefixes will be better choices. + +*Note: "want" should generally be used for optional items that are specified by some third party action, e.g. command line or menu options that enable additional functionality, or protocol versioning where negotiation occurs between client and server.* + +```cpp +hasWonStanleyCup, canPlay, shouldPass, wantDebugLogging + +``` + +#### [1.2.11] Complement names must be used for complement operations + +```cpp +get/set, add/remove, create/destroy, start/stop + +``` + +#### [1.2.12] Abbreviations in names should be avoided. + +```cpp +computeGoalsAgainstAverage(); // NOT: compGlsAgstAvg(); + +``` + +There are domain specific phrases that are more naturally known through their abbreviations/acronym. These phrases should be kept abbreviated. + +Use `html` instead of `hypertextMarkupLanguage`. + +#### [1.2.13] Naming pointers specifically should be avoided. + +```cpp +Puck* puck; // NOT: Puck * puckPtr; + +``` + +Many variables in a C/C++ environment are pointers, so a convention like this is almost impossible to follow. Also objects in C++ are often oblique types where the specific implementation should be ignored by the programmer. Only when the actual type of an object is of special significance, the name should emphasize the type. + +#### [1.2.14] Negated boolean variable names must be avoided. + +```cpp +bool isRetired; // NOT: isNotRetired or isNotPlaying + +``` + +This is done to avoid double negatives when used in conjunction with the logical negation operator. + +#### [1.2.15] Enumeration constants can be prefixed by a common type name. + +```cpp +enum Jersey { + JERSEY_HOME, + JERSEY_AWAY, + JERSEY_ALTERNATE +}; + +``` + +#### [1.2.16] Exception classes should be suffixed with Exception. + +```cpp +class GoalException { + ... +}; + +``` + +## [2] Files + +### [2.1] Source Files + +#### [2.1.1] C++ header files should have the extension .h. Source files should have the extension .cpp. + +```cpp +Puck.h, Puck.cpp + +``` + +#### [2.1.2] A class should always be declared in a header file and defined in a source file where the name of the files match the name of the class. + +`class Puck` defined in `Puck.h`, `Puck.cpp` + +#### [2.1.3] Most function implementations should reside in the source file. + +The header files should declare an interface, the source file should implement it. When looking for an implementation, the programmer should always know that it is found in the source file. + +- Simple getters and setters that just access private member variables should appear inline in the class definition in the header file. +- Simple methods like those making slight mutations (that can fit on the same line in the definition and don't require additional includes in the header file) can be inlined in the class definition. +- Methods that will be called multiple times in tight-loops or other high-performance situations and must be high performance can be included in the header file BELOW the class definition marked as inline. +- All other methods must be in a cpp file. + +```cpp +class Puck { +public: + // simple getters/setters should appear in the header file + int getRadius() const { return _radius; } + void setRadius(int radius) { _radius = radius; } + + // Allowed, ok to include this simple mutation in line + void addBlaToList(Blah* bla) { _blas.append(bla); } + + // Allowed, because this is a simple method + int calculateCircumference() { return PI * pow(_radius, 2.0); } + + // this routine needs to be fast, we'll inline it below + void doSomethingHighPerformance() const; + ... +private: + int _radius; +} + +inline void Puck::doSomethingHighPerformance() const { + ... +} + +``` + +#### [2.1.4] File content must be kept within 128 columns. + +#### [2.1.5] Special characters like TAB and page break must be avoided. + +Use four spaces for indentation. + +#### [2.1.6] The incompleteness of split lines must be made obvious. + +```cpp +teamGoals = iginlaGoals + crosbyGoals + + malkinGoals; + +addToScoreSheet(scorer, directAssister, + indirectAssister); + +setHeadline("Crosby scores 4" + " to force game 7."); + +for (int teamNum = 0; teamNum < numTeams; + teamNum++) { + ... +} + +``` + +Split lines occurs when a statement exceed the 128 column limit given above. It is difficult to provide rigid rules for how lines should be split, but the examples above should give a general hint. + +In general: Break after a comma. Break after an operator. Align the new line with the beginning of the expression on the previous line. + +### [2.2] Include Files and Include Statements + +#### [2.2.1] Header files must contain an include guard. + +Include guards should be in the following format: hifi_$BASENAME_h. + +```cpp +#ifndef hifi_SharedUtil_h +#define hifi_SharedUtil_h + +... + +#endif // hifi_SharedUtil_h + +``` + +#### [2.2.2] Include statements should be sorted and grouped. Sorted by their hierarchical position in the system with low level files included first. Leave an empty line between groups of include statements. + +```cpp +#include +#include + +#include +#include + +#include "Puck.h" +#include "PenaltyBox.h" + +``` + +#### [2.2.3] Include statements must be located at the top of a file only. + +## [3] Statements + +### [3.1] Types + +#### [3.1.1] The parts of a class must be sorted public, protected and private. All sections must be identified explicitly. Not applicable sections should be left out. + +The ordering is "most public first" so people who only wish to use the class can stop reading when they reach the protected/private sections. + +#### [3.1.2] Never rely on implicit type conversion. // NOT: floatValue = intValue; + +##### [3.1.2.1] Primitive types should use C style casting: + +```cpp +int foo = 1; +float bar = (float)foo; +// NOT this: float fubar = float(foo); + +uint8_t* barDataAt = (uint8_t*)&bar; // pointers to primitive types also use C style casting. + +``` + +##### [3.1.2.2] Class pointers must use C++ style casting: + +```cpp +Player* player = getPlayer("forward"); +Forward* forward = static_cast(player); + +``` + +For more info about C++ type casting: [http://stackoverflow.com/questions/1609163/what-is-the-difference-between-static-cast-and-c-style-casting](http://stackoverflow.com/questions/1609163/what-is-the-difference-between-static-cast-and-c-style-casting) + +#### [3.1.3] Use of *const* + +##### [3.1.3.1] Use const types for variables, parameters, return types, and methods whenever possible + +```cpp +void exampleBarAndFoo(const Bar& bar, const char* foo); // doesn't modify bar and foo, use const types +void ClassBar::spam() const { } // doesn't modify instance of ClassBar, use const method + +``` + +##### [3.1.3.2] Place the const keyword before the type + +```cpp +void foo(const Bar& bar); +// NOT: void foo(Bar const& bar); +void spam(const Foo* foo); +// NOT: void foo(Foo const* foo); + +``` + +##### [3.1.3.3] When implementing a getter for a class that returns a class member that is a complex data type, return a const& to that member. + +```cpp +const glm::vec3& AABox::getCorner() const; +// NOT: glm::vec3 AABox::getCorner() const; + +``` + +#### [3.1.4] Type aliases + +##### [3.1.4.1] When creating a type alias, prefer the using keyword. + +```cpp +template +using Vec = std::vector>; +using Nodes = Vec ; +// NOT: typedef std::vector Nodes; + +``` + +### [3.2] Variables + +#### [3.2.1] Variables should be initialized where they are declared. + +This ensures that variables are valid at any time. + +Sometimes it is impossible to initialize a variable to a valid value where it is declared: + +```cpp +Player crosby, dupuis, kunitz; +getLineStats(&crosby, &dupuis, &kunitz); + +``` + +In these cases it should be left uninitialized rather than initialized to some phony value. + +#### [3.2.2] Initialization of member variables with default values + +When possible, initialization of default values for class members should be included in the header file where the member variable is declared, as opposed to the constructor. Use the Universal Initializer format (brace initialization) rather than the assignment operator (equals). + +```cpp +private: + float _goalsPerGame { 0.0f }; // NOT float _goalsPerGame = 0.0f; + +``` + +However, brace initialization should be used with care when using container types that accept an initializer list as a constructor parameters. For instance, + +```cpp +std::vector _foo { 4, 100 } + +``` + +Might refer to `std::vector::vector(std::initializer_list)` or it might refer to `std::vector (size_type n, const T& val = value_type())`. Although the rules of precedence dictate that it will resolve to one of these, it's not immediately obvious to other developers which it is, so avoid such ambiguities. + +Classes that are forward declared and only known to the implementation may be initialized to a default value in the constructor initialization list. + +#### [3.2.3] Use of global variables should be minimized + +[http://stackoverflow.com/questions/484635/are-global-variables-bad](http://stackoverflow.com/questions/484635/are-global-variables-bad) + +#### [3.2.4] Class variables should never be declared public + +Use private variables and access functions instead. + +One exception to this rule is when the class is essentially a data structure, with no behavior (equivalent to a C struct). In this case it is appropriate to make the class' instance variables public. + +*Note that structs are kept in C++ for compatibility with C only, and avoiding them increases the readability of the code by reducing the number of constructs used. Use a class instead.* + +#### [3.2.5] C++ pointers and references should have their reference symbol next to the type rather than to the name. + +```cpp +float* savePercentages; +// NOT: float *savePercentages; or float * savePercentages; + +void checkCups(int& numCups); +// NOT: int &numCups or int & numCups + +``` + +The pointer-ness or reference-ness of a variable is a property of the type rather than the name. Also see [rule 3.1.3.2](https://wiki.highfidelity.com/wiki/Coding_Standards#constplacement) regarding placement the const keyword before the type. + +#### [3.2.6] Implicit test for 0 should not be used other than for boolean variables or non-NULL pointers. + +```cpp +if (numGoals != 0) // NOT: if (numGoals) +if (savePercentage != 0.0) // NOT: if (savePercentage) + +// Testing pointers for non-NULL is prefered, e.g. where +// childNode is Node* and you’re testing for non NULL +if (childNode) + +// Testing for null is also preferred +if (!childNode) + +``` + +It is not necessarily defined by the C++ standard that ints and floats 0 are implemented as binary 0. + +#### [3.2.7] Variables should be declared in the smallest scope possible. + +Keeping the operations on a variable within a small scope, it is easier to control the effects and side effects of the variable. + +### [3.3] Loops + +#### [3.3.1] Loop variables should be initialized immediately before the loop. + +#### [3.3.2] The form while (true) should be used for infinite loops. + +```cpp +while (true) { + : +} + +// NOT: + +for (;;) { + : +} + +while (1) { + : +} + +``` + +### [3.4] Conditionals + +#### [3.4.1] The nominal case should be put in the if-part and the exception in the else-part of an if statement + +```cpp +bool isGoal = pastGoalLine(position); + +if (isGoal) { + ... +} else { + ... +} + +``` + +Makes sure that the exceptions don't obscure the normal path of execution. This is important for both the readability and performance. + +#### [3.4.2] The conditional should be put on a separate line and wrapped in braces. + +```cpp +if (isGoal) { + lightTheLamp(); +} + +// NOT: if (isGoal) lightTheLamp(); + +``` + +#### [3.4.3] Write the expression of a conditional similar to how you would speak it out loud. + +```cpp +if (someVariable == 0) { + doSomething(); +} +// NOT: if (0 == someVariable) + +``` + +### [3.5] Miscellaneous + +#### [3.5.1] Constants and Magic Numbers + +##### [3.5.1.1] The use of magic numbers in the code should be avoided. + +- Numbers other than 0 and 1 should be considered declared as named constants instead. +- If the number does not have an obvious meaning by itself, the readability is enhanced by introducing a named constant instead. +- A different approach is to introduce a method from which the constant can be accessed. + +##### [3.5.1.2] Declare constants closest to the scope of their use. + +```cpp +bool checkValueLimit(int value) { + const int ValueLimit = 10; // I only use this constant here, define it here in context + return (value > ValueLimit); +} + +``` + +##### [3.5.1.3] Use const typed variables instead of #define + +```cpp +const float LARGEST_VALUE = 10000.0f; +// NOT: #define LARGEST_VALUE 10000.0f + +``` + +#### [3.5.2] Floating point constants should always be written with decimal point and at least one decimal. + +```cpp +double stickLength = 0.0; // NOT: double stickLength = 0; + +double penaltyMinutes; +... +penaltyMinutes = (minor + misconduct) * 2.0; + +``` + +#### [3.5.3] Floating point constants should always be written with a digit before the decimal point. + +```cpp +double penaltyMinutes = 0.5; // NOT: double penaltyMinutes = .5; + +``` + +#### [3.5.4] When using a single precision float type, include the trailing f. + +```cpp +float penaltyMinutes = 0.5f; // NOT: float penaltyMinutes = 0.5; + +``` + +## [4] Layout and Comments + +### [4.1] Layout + +#### [4.1.1] Basic indentation should be 4. + +```cpp +if (player.isCaptain) { + player.yellAtReferee(); +} + +``` + +#### [4.1.2] Use inline braces for block layout + +```cpp +while (!puckHeld) { + lookForRebound(); +} + +// NOT: +// while (!puckHeld) +// { +// lookForRebound(); +// } + +``` + +#### [4.1.3] The class declarations should have the following form: + +```cpp +class GoalieStick : public HockeyStick { +public: + ... +protected: + ... +private: + ... +}; + +``` + +#### [4.1.4] Method definitions should have the following form: + +```cpp +void goalCelebration() { + ... +} + +``` + +#### [4.1.5] The if-else class of statements should have the following form: + +```cpp +if (isScorer) { + scoreGoal(); +} + +if (isScorer) { + scoreGoal(); +} else { + saucerPass(); +} + +if (isScorer) { + scoreGoal(); +} else if (isPlaymaker) { + saucerPass(); +} else { + startFight(); +} + +``` + +#### [4.1.6] A for statement should have the following form: + +```cpp +for (int i = 0; i < GRETZKY_NUMBER; i++) { + getActivePlayerWithNumber(i); +} + +``` + +#### [4.1.7] A while statement should have the following form: + +```cpp +while (!whistle) { + keepPlaying(); +} + +``` + +#### [4.1.8] A do-while statement should have the following form: + +```cpp +do { + skate(); +} while (!tired); + +``` + +#### [4.1.9] Switch/Case Statements: + +A switch statements should follow the following basic formatting rules: + +- The case statements are indented one indent (4 spaces) from the switch. +- The code for each case should be indented one indent (4 spaces) from the case statement. +- Each separate case should have a break statement, unless it is explicitly intended for the case to fall through to the subsequent cases. In the event that a case statement executes some code, then falls through to the next case, you must include an explicit comment noting that this is intentional. +- Break statements should be aligned with the code of the case, e.g. indented 4 spaces from the case statement. +- In the event that brackets are required to create local scope, the open bracket should appear on the same line as the case, and the close bracket should appear on the line immediately following the break aligned with the case statement. + +Examples of acceptable form are: + +```cpp +switch (foo) { + case BAR: + doBar(); + break; + + // notice brackets below follow the standard bracket placement for other control structures + case SPAM: { + int spam = 0; + doSomethingElse(spam); + break; + } + + case SPAZZ: + case BAZZ: + doSomething(); + // fall through to next case + + case RAZZ: + default: + doSomethingElseEntirely(); + break; +} + +// or in cases where returns occur at each case, this form is also accpetable +switch (jerseyNumber) { + case 87: + return crosby; + case 66: + return lemieux; + case 99: + return gretzky; + default: + return NULL; +} + +``` + +#### [4.1.10] A try-catch statement should have the following form: + +```cpp +try { + tradePlayer(); +} catch (const NoTradeClauseException& exception) { + negotiateNoTradeClause(); +} + +``` + +#### [4.1.11] Single statement if-else, for or while statements must be written with braces. + +```cpp +// GOOD: +for (int i = 0; i < numItems; i++) { + item[i].manipulate(); +} + +// BAD: braces are missing +for (int i = 0; i < numItems; i++) + item[i].manipulate(); +``` + +### [4.2] White space + +#### [4.2.1] Conventional operators should be surrounded by a space character, except in cases like mathematical expressions where it is easier to visually parse when spaces are used to enhance the grouping. + +```cpp +potential = (age + skill) * injuryChance; +// NOT: potential = (age+skill)*injuryChance; + +// Assignment operators always have spaces around them. +x = 0; + +// Other binary operators usually have spaces around them, but it's +// OK to remove spaces around factors. Parentheses should have no +// internal padding. +v = w * x + y / z; +v = w*x + y/z; +v = w * (x + z); + +``` + +#### [4.2.2] C++ reserved words should be followed by a white space. + +```cpp +setLine(leftWing, center, rightWing, leftDefense, rightDefense); +// NOT: setLine(leftWing,center,rightWing,leftDefense,rightDefense); + +``` + +#### [4.2.3] Semicolons in for statments should be followed by a space character. + +```cpp +for (i = 0; i < 10; i++) { // NOT: for(i=0;i<10;i++){ + +``` + +#### [4.2.4] Declaring and Calling Functions + +- Function names should not be followed by a white space. +- And there should be no space between the open parenthesis and the first parameter, and no space between the last parameter and the close parenthesis. + +Examples: + +```cpp +setCaptain(ovechkin); +// NOT: setCaptain (ovechkin); +// NOT: doSomething( int foo, float bar ); + +``` + +#### [4.2.6] Logical units within a block should be separated by one blank line. + +```cpp +Team penguins = new Team(); + +Player crosby = new Player(); +Player fleury = new Player(); + +penguins.setCaptain(crosby); +penguins.setGoalie(fleury); + +penguins.hireCoach(); + +``` + +#### [4.2.6] Avoid adding optional spaces across multi-line statements and adjacent statements. + +Avoid the following: + +``` +oddsToWin = (averageAge * veteranWeight) + + (numStarPlayers * starPlayerWeight) + + (goalieOverall * goalieWeight); + +theGreatOneSlapShotSpeed = computeShot(stickFlex, chara); +charaSlapShotSpeed = computeShot(stickFlex, weber); + +``` + +A change to the length of a variable in these sections causes unnecessary changes to the other lines. + +#### [4.2.7] Multi-line statements must have all n+1 lines indented at least one level (four spaces). + +Align all n+2 lines with the indentation of the n+1 line. + +When the multiple lines are bound by parentheses (as in arguments to a function call), the prefered style has no whitespace after the opening parenthesis or before the closing parenthesis. The n+1 lines are generally indented to the column immediately after the opening parenthesis (following the style for split expressions in 2.1.6). + +When the multiple lines are bound by braces (as in C++ initializers or JavaScript object notation), the preferred style has a newline after the opening brace and newline before the closing brace. The final line should not end in a comma, and no line should begin with a comma. The closing brace should begin in the same colum as the line that has the opening brace (following the style for split control statements in 4.1). + +Expressions, including C++ initializers and JavaScript object notation literals, can be placed on a single line if they are not deeply nested and end well within the column limit (2.1.4). + +The following are all acceptable: + +```cpp +shootOnNet(puckVelocity, + playerStrength, + randomChance); + +shootOnNet(puckVelocty, + playerStrength, + randomChance); + +if (longBooleanThatHasToDoWithHockey + && anotherBooleanOnANewLine); + +isGoodGoal = playerSlapShotVelocity > 100 + ? true + : false; + +var foo = { + spam: 1.0, + bar: "bar", + complex: { + red: 1, + white: 'blue' + }, + blah: zed +}; + +aJavascriptFunctionOfTwoFunctions(function (entity) { + print(entity); + foo(entity, 3); +}, function (entity) { + print('in second function'); + bar(entity, 4); +}); + +aCPlusPlusFunctionOfTwoLambdas([](gpu::Batch& batch) { + batch.setFramebuffer(nullptr); +}, [this](int count, float amount) { + frob(count, amount); +}); + +``` + +### [4.3] Comments + +#### [4.3.1] All comments should be written in English + +In an international environment English is the preferred language. + +#### [4.3.2] Use // for all comments, including multi-line comments. + +An exception to this rule applies for jsdoc or Doxygen comments. + +```cpp +// Comment spanning +// more than one line. + +``` + +There should be a space between the "//" and the actual comment + +#### [4.3.3] Comments should be included relative to their position in the code + +```cpp +while (true) { + // crosby is always injured + crosbyInjury(); +} + +// NOT: +// crosby is always injured +while (true) { + crosbyInjury(); +} + +``` + +#### [4.3.4] Source files (header and implementation) must include a boilerplate. + +Boilerplates should include the filename, location, creator, copyright, and Apache 2.0 License information and be placed at the top of the file. + +```cpp +// +// NodeList.h +// libraries/shared/src +// +// Created by Stephen Birarda on 2/15/13. +// Copyright 2013 High Fidelity, Inc. +// +// This is where you could place an optional one line comment about the file. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +``` + +#### [4.3.5] Never include Horizontal "line break" style comment blocks + +These types of comments are explicitly not allowed. If you need to break up sections of code, just leave an extra blank line. + +```cpp +////////////////////////////////////////////////////////////////////////////////// + +/********************************************************************************/ + +//-------------------------------------------------------------------------------- +``` + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4654c311cc..f9a54f1adc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,7 +16,7 @@ Contributing git checkout -b new_branch_name ``` 4. Code - * Follow the [coding standard](https://docs.highfidelity.com/build-guide/coding-standards) + * Follow the [coding standard](CODING_STANDARD.md) 5. Commit * Use [well formed commit messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) 6. Update your branch diff --git a/domain-server/src/DomainGatekeeper.cpp b/domain-server/src/DomainGatekeeper.cpp index e23d9e57a8..8c7beaa614 100644 --- a/domain-server/src/DomainGatekeeper.cpp +++ b/domain-server/src/DomainGatekeeper.cpp @@ -1010,7 +1010,7 @@ void DomainGatekeeper::refreshGroupsCache() { nodeList->eachNode([this](const SharedNodePointer& node) { if (!node->getPermissions().isAssignment) { // this node is an agent - const QString& verifiedUserName = node->getPermissions().getVerifiedUserName(); + QString verifiedUserName = node->getPermissions().getVerifiedUserName(); if (!verifiedUserName.isEmpty()) { getGroupMemberships(verifiedUserName); } diff --git a/interface/resources/avatar/avatar-animation.json b/interface/resources/avatar/avatar-animation.json index 27e45daa7b..8a212a16da 100644 --- a/interface/resources/avatar/avatar-animation.json +++ b/interface/resources/avatar/avatar-animation.json @@ -197,260 +197,100 @@ "id": "rightHandStateMachine", "type": "stateMachine", "data": { - "currentState": "rightHandGrasp", + "currentState": "rightHandAnimNone", "states": [ { - "id": "rightHandGrasp", - "interpTarget": 3, + "id": "rightHandAnimNone", + "interpTarget": 1, "interpDuration": 3, "transitions": [ - { "var": "isRightIndexPoint", "state": "rightIndexPoint" }, - { "var": "isRightThumbRaise", "state": "rightThumbRaise" }, - { "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" } + { "var": "rightHandAnimA", "state": "rightHandAnimA" }, + { "var": "rightHandAnimB", "state": "rightHandAnimB" } ] }, { - "id": "rightIndexPoint", - "interpTarget": 15, + "id": "rightHandAnimA", + "interpTarget": 1, "interpDuration": 3, "transitions": [ - { "var": "isRightHandGrasp", "state": "rightHandGrasp" }, - { "var": "isRightThumbRaise", "state": "rightThumbRaise" }, - { "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" } + { "var": "rightHandAnimNone", "state": "rightHandAnimNone" }, + { "var": "rightHandAnimB", "state": "rightHandAnimB" } ] }, { - "id": "rightThumbRaise", - "interpTarget": 15, + "id": "rightHandAnimB", + "interpTarget": 1, "interpDuration": 3, "transitions": [ - { "var": "isRightHandGrasp", "state": "rightHandGrasp" }, - { "var": "isRightIndexPoint", "state": "rightIndexPoint" }, - { "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" } - ] - }, - { - "id": "rightIndexPointAndThumbRaise", - "interpTarget": 15, - "interpDuration": 3, - "transitions": [ - { "var": "isRightHandGrasp", "state": "rightHandGrasp" }, - { "var": "isRightIndexPoint", "state": "rightIndexPoint" }, - { "var": "isRightThumbRaise", "state": "rightThumbRaise" } + { "var": "rightHandAnimNone", "state": "rightHandAnimNone" }, + { "var": "rightHandAnimA", "state": "rightHandAnimA" } ] } ] }, "children": [ { - "id": "rightHandGrasp", - "type": "blendLinear", - "data": { - "alpha": 0.0, - "alphaVar": "rightHandGraspAlpha" - }, - "children": [ - { - "id": "rightHandGraspOpen", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/hydra_pose_open_right.fbx", - "startFrame": 0.0, - "endFrame": 0.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - }, - { - "id": "rightHandGraspClosed", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/hydra_pose_closed_right.fbx", - "startFrame": 0.0, - "endFrame": 0.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - } - ] - }, - { - "id": "rightIndexPoint", - "type": "blendLinear", - "data": { - "alpha": 0.0, - "alphaVar": "rightHandGraspAlpha" - }, - "children": [ - { - "id": "rightIndexPointOpen", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/touch_point_open_right.fbx", - "startFrame": 15.0, - "endFrame": 15.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - }, - { - "id": "rightIndexPointClosed", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/touch_point_closed_right.fbx", - "startFrame": 15.0, - "endFrame": 15.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - } - ] - }, - { - "id": "rightThumbRaise", - "type": "blendLinear", - "data": { - "alpha": 0.0, - "alphaVar": "rightHandGraspAlpha" - }, - "children": [ - { - "id": "rightThumbRaiseOpen", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/touch_thumb_open_right.fbx", - "startFrame": 15.0, - "endFrame": 15.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - }, - { - "id": "rightThumbRaiseClosed", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/touch_thumb_closed_right.fbx", - "startFrame": 15.0, - "endFrame": 15.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - } - ] - }, - { - "id": "rightIndexPointAndThumbRaise", - "type": "blendLinear", - "data": { - "alpha": 0.0, - "alphaVar": "rightHandGraspAlpha" - }, - "children": [ - { - "id": "rightIndexPointAndThumbRaiseOpen", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx", - "startFrame": 15.0, - "endFrame": 15.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - }, - { - "id": "rightIndexPointAndThumbRaiseClosed", - "type": "clip", - "data": { - "url": "qrc:///avatar/animations/touch_thumb_point_closed_right.fbx", - "startFrame": 15.0, - "endFrame": 15.0, - "timeScale": 1.0, - "loopFlag": true - }, - "children": [] - } - ] - } - ] - }, - { - "id": "leftHandOverlay", - "type": "overlay", - "data": { - "alpha": 0.0, - "boneSet": "leftHand", - "alphaVar": "leftHandOverlayAlpha" - }, - "children": [ - { - "id": "leftHandStateMachine", + "id": "rightHandAnimNone", "type": "stateMachine", "data": { - "currentState": "leftHandGrasp", + "currentState": "rightHandGrasp", "states": [ { - "id": "leftHandGrasp", + "id": "rightHandGrasp", "interpTarget": 3, "interpDuration": 3, "transitions": [ - { "var": "isLeftIndexPoint", "state": "leftIndexPoint" }, - { "var": "isLeftThumbRaise", "state": "leftThumbRaise" }, - { "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" } + { "var": "isRightIndexPoint", "state": "rightIndexPoint" }, + { "var": "isRightThumbRaise", "state": "rightThumbRaise" }, + { "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" } ] }, { - "id": "leftIndexPoint", + "id": "rightIndexPoint", "interpTarget": 15, "interpDuration": 3, "transitions": [ - { "var": "isLeftHandGrasp", "state": "leftHandGrasp" }, - { "var": "isLeftThumbRaise", "state": "leftThumbRaise" }, - { "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" } + { "var": "isRightHandGrasp", "state": "rightHandGrasp" }, + { "var": "isRightThumbRaise", "state": "rightThumbRaise" }, + { "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" } ] }, { - "id": "leftThumbRaise", + "id": "rightThumbRaise", "interpTarget": 15, "interpDuration": 3, "transitions": [ - { "var": "isLeftHandGrasp", "state": "leftHandGrasp" }, - { "var": "isLeftIndexPoint", "state": "leftIndexPoint" }, - { "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" } + { "var": "isRightHandGrasp", "state": "rightHandGrasp" }, + { "var": "isRightIndexPoint", "state": "rightIndexPoint" }, + { "var": "isRightIndexPointAndThumbRaise", "state": "rightIndexPointAndThumbRaise" } ] }, { - "id": "leftIndexPointAndThumbRaise", + "id": "rightIndexPointAndThumbRaise", "interpTarget": 15, "interpDuration": 3, "transitions": [ - { "var": "isLeftHandGrasp", "state": "leftHandGrasp" }, - { "var": "isLeftIndexPoint", "state": "leftIndexPoint" }, - { "var": "isLeftThumbRaise", "state": "leftThumbRaise" } + { "var": "isRightHandGrasp", "state": "rightHandGrasp" }, + { "var": "isRightIndexPoint", "state": "rightIndexPoint" }, + { "var": "isRightThumbRaise", "state": "rightThumbRaise" } ] } ] }, "children": [ { - "id": "leftHandGrasp", + "id": "rightHandGrasp", "type": "blendLinear", "data": { "alpha": 0.0, - "alphaVar": "leftHandGraspAlpha" + "alphaVar": "rightHandGraspAlpha" }, "children": [ { - "id": "leftHandGraspOpen", + "id": "rightHandGraspOpen", "type": "clip", "data": { - "url": "qrc:///avatar/animations/hydra_pose_open_left.fbx", + "url": "qrc:///avatar/animations/hydra_pose_open_right.fbx", "startFrame": 0.0, "endFrame": 0.0, "timeScale": 1.0, @@ -459,12 +299,12 @@ "children": [] }, { - "id": "leftHandGraspClosed", + "id": "rightHandGraspClosed", "type": "clip", "data": { - "url": "qrc:///avatar/animations/hydra_pose_closed_left.fbx", - "startFrame": 10.0, - "endFrame": 10.0, + "url": "qrc:///avatar/animations/hydra_pose_closed_right.fbx", + "startFrame": 0.0, + "endFrame": 0.0, "timeScale": 1.0, "loopFlag": true }, @@ -473,18 +313,18 @@ ] }, { - "id": "leftIndexPoint", + "id": "rightIndexPoint", "type": "blendLinear", - "data": { + "data": { "alpha": 0.0, - "alphaVar": "leftHandGraspAlpha" + "alphaVar": "rightHandGraspAlpha" }, "children": [ { - "id": "leftIndexPointOpen", + "id": "rightIndexPointOpen", "type": "clip", "data": { - "url": "qrc:///avatar/animations/touch_point_open_left.fbx", + "url": "qrc:///avatar/animations/touch_point_open_right.fbx", "startFrame": 15.0, "endFrame": 15.0, "timeScale": 1.0, @@ -493,10 +333,10 @@ "children": [] }, { - "id": "leftIndexPointClosed", + "id": "rightIndexPointClosed", "type": "clip", "data": { - "url": "qrc:///avatar/animations/touch_point_closed_left.fbx", + "url": "qrc:///avatar/animations/touch_point_closed_right.fbx", "startFrame": 15.0, "endFrame": 15.0, "timeScale": 1.0, @@ -507,18 +347,18 @@ ] }, { - "id": "leftThumbRaise", + "id": "rightThumbRaise", "type": "blendLinear", - "data": { + "data": { "alpha": 0.0, - "alphaVar": "leftHandGraspAlpha" + "alphaVar": "rightHandGraspAlpha" }, "children": [ { - "id": "leftThumbRaiseOpen", + "id": "rightThumbRaiseOpen", "type": "clip", "data": { - "url": "qrc:///avatar/animations/touch_thumb_open_left.fbx", + "url": "qrc:///avatar/animations/touch_thumb_open_right.fbx", "startFrame": 15.0, "endFrame": 15.0, "timeScale": 1.0, @@ -527,10 +367,10 @@ "children": [] }, { - "id": "leftThumbRaiseClosed", + "id": "rightThumbRaiseClosed", "type": "clip", "data": { - "url": "qrc:///avatar/animations/touch_thumb_closed_left.fbx", + "url": "qrc:///avatar/animations/touch_thumb_closed_right.fbx", "startFrame": 15.0, "endFrame": 15.0, "timeScale": 1.0, @@ -541,18 +381,18 @@ ] }, { - "id": "leftIndexPointAndThumbRaise", + "id": "rightIndexPointAndThumbRaise", "type": "blendLinear", - "data": { + "data": { "alpha": 0.0, - "alphaVar": "leftHandGraspAlpha" + "alphaVar": "rightHandGraspAlpha" }, "children": [ { - "id": "leftIndexPointAndThumbRaiseOpen", + "id": "rightIndexPointAndThumbRaiseOpen", "type": "clip", "data": { - "url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx", + "url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx", "startFrame": 15.0, "endFrame": 15.0, "timeScale": 1.0, @@ -561,10 +401,10 @@ "children": [] }, { - "id": "leftIndexPointAndThumbRaiseClosed", + "id": "rightIndexPointAndThumbRaiseClosed", "type": "clip", "data": { - "url": "qrc:///avatar/animations/touch_thumb_point_closed_left.fbx", + "url": "qrc:///avatar/animations/touch_thumb_point_closed_right.fbx", "startFrame": 15.0, "endFrame": 15.0, "timeScale": 1.0, @@ -577,6 +417,290 @@ ] }, { + "id": "rightHandAnimA", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + }, + { + "id": "rightHandAnimB", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_point_open_right.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + } + ] + }, + { + "id": "leftHandOverlay", + "type": "overlay", + "data": { + "alpha": 0.0, + "boneSet": "leftHand", + "alphaVar": "leftHandOverlayAlpha" + }, + "children": [ + { + "id": "leftHandStateMachine", + "type": "stateMachine", + "data": { + "currentState": "leftHandAnimNone", + "states": [ + { + "id": "leftHandAnimNone", + "interpTarget": 1, + "interpDuration": 3, + "transitions": [ + { "var": "leftHandAnimA", "state": "leftHandAnimA" }, + { "var": "leftHandAnimB", "state": "leftHandAnimB" } + ] + }, + { + "id": "leftHandAnimA", + "interpTarget": 1, + "interpDuration": 3, + "transitions": [ + { "var": "leftHandAnimNone", "state": "leftHandAnimNone" }, + { "var": "leftHandAnimB", "state": "leftHandAnimB" } + ] + }, + { + "id": "leftHandAnimB", + "interpTarget": 1, + "interpDuration": 3, + "transitions": [ + { "var": "leftHandAnimNone", "state": "leftHandAnimNone" }, + { "var": "leftHandAnimA", "state": "leftHandAnimA" } + ] + } + ] + }, + "children": [ + { + "id": "leftHandAnimNone", + "type": "stateMachine", + "data": { + "currentState": "leftHandGrasp", + "states": [ + { + "id": "leftHandGrasp", + "interpTarget": 3, + "interpDuration": 3, + "transitions": [ + { "var": "isLeftIndexPoint", "state": "leftIndexPoint" }, + { "var": "isLeftThumbRaise", "state": "leftThumbRaise" }, + { "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" } + ] + }, + { + "id": "leftIndexPoint", + "interpTarget": 15, + "interpDuration": 3, + "transitions": [ + { "var": "isLeftHandGrasp", "state": "leftHandGrasp" }, + { "var": "isLeftThumbRaise", "state": "leftThumbRaise" }, + { "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" } + ] + }, + { + "id": "leftThumbRaise", + "interpTarget": 15, + "interpDuration": 3, + "transitions": [ + { "var": "isLeftHandGrasp", "state": "leftHandGrasp" }, + { "var": "isLeftIndexPoint", "state": "leftIndexPoint" }, + { "var": "isLeftIndexPointAndThumbRaise", "state": "leftIndexPointAndThumbRaise" } + ] + }, + { + "id": "leftIndexPointAndThumbRaise", + "interpTarget": 15, + "interpDuration": 3, + "transitions": [ + { "var": "isLeftHandGrasp", "state": "leftHandGrasp" }, + { "var": "isLeftIndexPoint", "state": "leftIndexPoint" }, + { "var": "isLeftThumbRaise", "state": "leftThumbRaise" } + ] + } + ] + }, + "children": [ + { + "id": "leftHandGrasp", + "type": "blendLinear", + "data": { + "alpha": 0.0, + "alphaVar": "leftHandGraspAlpha" + }, + "children": [ + { + "id": "leftHandGraspOpen", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/hydra_pose_open_left.fbx", + "startFrame": 0.0, + "endFrame": 0.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + }, + { + "id": "leftHandGraspClosed", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/hydra_pose_closed_left.fbx", + "startFrame": 10.0, + "endFrame": 10.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + } + ] + }, + { + "id": "leftIndexPoint", + "type": "blendLinear", + "data": { + "alpha": 0.0, + "alphaVar": "leftHandGraspAlpha" + }, + "children": [ + { + "id": "leftIndexPointOpen", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_point_open_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + }, + { + "id": "leftIndexPointClosed", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_point_closed_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + } + ] + }, + { + "id": "leftThumbRaise", + "type": "blendLinear", + "data": { + "alpha": 0.0, + "alphaVar": "leftHandGraspAlpha" + }, + "children": [ + { + "id": "leftThumbRaiseOpen", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_open_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + }, + { + "id": "leftThumbRaiseClosed", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_closed_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + } + ] + }, + { + "id": "leftIndexPointAndThumbRaise", + "type": "blendLinear", + "data": { + "alpha": 0.0, + "alphaVar": "leftHandGraspAlpha" + }, + "children": [ + { + "id": "leftIndexPointAndThumbRaiseOpen", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + }, + { + "id": "leftIndexPointAndThumbRaiseClosed", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_point_closed_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + } + ] + } + ] + }, + { + "id": "leftHandAnimA", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + }, + { + "id": "leftHandAnimB", + "type": "clip", + "data": { + "url": "qrc:///avatar/animations/touch_thumb_point_open_left.fbx", + "startFrame": 15.0, + "endFrame": 15.0, + "timeScale": 1.0, + "loopFlag": true + }, + "children": [] + } + ] + }, + { "id": "mainStateMachine", "type": "stateMachine", "data": { @@ -1594,4 +1718,4 @@ } ] } -} +} \ No newline at end of file diff --git a/interface/src/FancyCamera.h b/interface/src/FancyCamera.h index 4ca073fb4f..aead54d0fd 100644 --- a/interface/src/FancyCamera.h +++ b/interface/src/FancyCamera.h @@ -19,14 +19,22 @@ class FancyCamera : public Camera { Q_OBJECT /**jsdoc - * @namespace - * @augments Camera - */ - - // FIXME: JSDoc 3.5.5 doesn't augment @property definitions. The following definition is repeated in Camera.h. - /**jsdoc - * @property {Uuid} cameraEntity The ID of the entity that the camera position and orientation follow when the camera is in - * entity mode. + * The Camera API provides access to the "camera" that defines your view in desktop and HMD display modes. + * + * @namespace Camera + * + * @hifi-interface + * @hifi-client-entity + * @hifi-avatar + * + * @property {Vec3} position - The position of the camera. You can set this value only when the camera is in independent + * mode. + * @property {Quat} orientation - The orientation of the camera. You can set this value only when the camera is in + * independent mode. + * @property {Camera.Mode} mode - The camera mode. + * @property {ViewFrustum} frustum - The camera frustum. + * @property {Uuid} cameraEntity - The ID of the entity that is used for the camera position and orientation when the + * camera is in entity mode. */ Q_PROPERTY(QUuid cameraEntity READ getCameraEntity WRITE setCameraEntity) @@ -38,25 +46,25 @@ public: public slots: - /**jsdoc - * Get the ID of the entity that the camera is set to use the position and orientation from when it's in entity mode. You can - * also get the entity ID using the Camera.cameraEntity property. - * @function Camera.getCameraEntity - * @returns {Uuid} The ID of the entity that the camera is set to follow when in entity mode; null if no camera - * entity has been set. - */ + /**jsdoc + * Gets the ID of the entity that the camera is set to follow (i.e., use the position and orientation from) when it's in + * entity mode. You can also get the entity ID using the {@link Camera|Camera.cameraEntity} property. + * @function Camera.getCameraEntity + * @returns {Uuid} The ID of the entity that the camera is set to follow when in entity mode; null if no + * camera entity has been set. + */ QUuid getCameraEntity() const; /**jsdoc - * Set the entity that the camera should use the position and orientation from when it's in entity mode. You can also set the - * entity using the Camera.cameraEntity property. - * @function Camera.setCameraEntity - * @param {Uuid} entityID The entity that the camera should follow when it's in entity mode. - * @example Move your camera to the position and orientation of the closest entity. - * Camera.setModeString("entity"); - * var entity = Entities.findClosestEntity(MyAvatar.position, 100.0); - * Camera.setCameraEntity(entity); - */ + * Sets the entity that the camera should follow (i.e., use the position and orientation from) when it's in entity mode. + * You can also set the entity using the {@link Camera|Camera.cameraEntity} property. + * @function Camera.setCameraEntity + * @param {Uuid} entityID - The entity that the camera should follow when it's in entity mode. + * @example Move your camera to the position and orientation of the closest entity. + * Camera.setModeString("entity"); + * var entity = Entities.findClosestEntity(MyAvatar.position, 100.0); + * Camera.setCameraEntity(entity); + */ void setCameraEntity(QUuid entityID); private: diff --git a/interface/src/avatar/AvatarManager.cpp b/interface/src/avatar/AvatarManager.cpp index 575d87dfb7..8274259922 100755 --- a/interface/src/avatar/AvatarManager.cpp +++ b/interface/src/avatar/AvatarManager.cpp @@ -498,8 +498,10 @@ void AvatarManager::handleRemovedAvatar(const AvatarSharedPointer& removedAvatar // on the creation of entities for that avatar instance and the deletion of entities for this instance avatar->removeAvatarEntitiesFromTree(); if (removalReason != KillAvatarReason::AvatarDisconnected) { - emit AvatarInputs::getInstance()->avatarEnteredIgnoreRadius(avatar->getSessionUUID()); - emit DependencyManager::get()->enteredIgnoreRadius(); + if (removalReason == KillAvatarReason::TheirAvatarEnteredYourBubble) { + emit AvatarInputs::getInstance()->avatarEnteredIgnoreRadius(avatar->getSessionUUID()); + emit DependencyManager::get()->enteredIgnoreRadius(); + } workload::Transaction workloadTransaction; workloadTransaction.remove(avatar->getSpaceIndex()); @@ -932,6 +934,18 @@ void AvatarManager::setAvatarSortCoefficient(const QString& name, const QScriptV } } +/**jsdoc + * PAL (People Access List) data for an avatar. + * @typedef {object} AvatarManager.PalData + * @property {Uuid} sessionUUID - The avatar's session ID. "" if the avatar is your own. + * @property {string} sessionDisplayName - The avatar's display name, sanitized and versioned, as defined by the avatar mixer. + * It is unique among all avatars present in the domain at the time. + * @property {number} audioLoudness - The instantaneous loudness of the audio input that the avatar is injecting into the + * domain. + * @property {boolean} isReplicated - Deprecated. + * @property {Vec3} position - The position of the avatar. + * @property {number} palOrbOffset - The vertical offset from the avatar's position that an overlay orb should be displayed at. + */ QVariantMap AvatarManager::getPalData(const QStringList& specificAvatarIdentifiers) { QJsonArray palData; diff --git a/interface/src/avatar/AvatarManager.h b/interface/src/avatar/AvatarManager.h index f9b82da0c1..1bddaedc42 100644 --- a/interface/src/avatar/AvatarManager.h +++ b/interface/src/avatar/AvatarManager.h @@ -37,10 +37,11 @@ using SortedAvatar = std::pair>; /**jsdoc - * The AvatarManager API has properties and methods which manage Avatars within the same domain. + * The AvatarManager API provides information about avatars within the current domain. The avatars available are + * those that Interface has displayed and therefore knows about. * - *

Note: This API is also provided to Interface and client entity scripts as the synonym, - * AvatarList. For assignment client scripts, see the separate {@link AvatarList} API. + *

Warning: This API is also provided to Interface, client entity, and avatar scripts as the synonym, + * "AvatarList". For assignment client scripts, see the separate {@link AvatarList} API.

* * @namespace AvatarManager * @@ -48,8 +49,9 @@ using SortedAvatar = std::pair>; * @hifi-client-entity * @hifi-avatar * - * @borrows AvatarList.getAvatarIdentifiers as getAvatarIdentifiers - * @borrows AvatarList.getAvatarsInRange as getAvatarsInRange + * @borrows AvatarList.getAvatar as getAvatar + * @comment AvatarList.getAvatarIdentifiers as getAvatarIdentifiers - Don't borrow because behavior is slightly different. + * @comment AvatarList.getAvatarsInRange as getAvatarsInRange - Don't borrow because behavior is slightly different. * @borrows AvatarList.avatarAddedEvent as avatarAddedEvent * @borrows AvatarList.avatarRemovedEvent as avatarRemovedEvent * @borrows AvatarList.avatarSessionChangedEvent as avatarSessionChangedEvent @@ -67,6 +69,31 @@ class AvatarManager : public AvatarHashMap { public: + /**jsdoc + * Gets the IDs of all avatars known about in the domain. + * Your own avatar is included in the list as a null value. + * @function AvatarManager.getAvatarIdentifiers + * @returns {Uuid[]} The IDs of all known avatars in the domain. + * @example Report the IDS of all avatars within the domain. + * var avatars = AvatarManager.getAvatarIdentifiers(); + * print("Avatars in the domain: " + JSON.stringify(avatars)); + * // A null item is included for your avatar. + */ + + /**jsdoc + * Gets the IDs of all avatars known about within a specified distance from a point. + * Your own avatar's ID is included in the list if it is in range. + * @function AvatarManager.getAvatarsInRange + * @param {Vec3} position - The point about which the search is performed. + * @param {number} range - The search radius. + * @returns {Uuid[]} The IDs of all known avatars within the search distance from the position. + * @example Report the IDs of all avatars within 10m of your avatar. + * var RANGE = 10; + * var avatars = AvatarManager.getAvatarsInRange(MyAvatar.position, RANGE); + * print("Nearby avatars: " + JSON.stringify(avatars)); + * print("Own avatar: " + MyAvatar.sessionUUID); + */ + /// Registers the script types associated with the avatar manager. static void registerMetaTypes(QScriptEngine* engine); @@ -79,9 +106,7 @@ public: glm::vec3 getMyAvatarPosition() const { return _myAvatar->getWorldPosition(); } /**jsdoc - * @function AvatarManager.getAvatar - * @param {Uuid} avatarID - * @returns {AvatarData} + * @comment Uses the base class's JSDoc. */ // Null/Default-constructed QUuids will return MyAvatar Q_INVOKABLE virtual ScriptAvatarData* getAvatar(QUuid avatarID) override { return new ScriptAvatar(getAvatarBySessionID(avatarID)); } @@ -112,36 +137,53 @@ public: void handleCollisionEvents(const CollisionEvents& collisionEvents); /**jsdoc + * Gets the amount of avatar mixer data being generated by an avatar other than your own. * @function AvatarManager.getAvatarDataRate - * @param {Uuid} sessionID - * @param {string} [rateName=""] - * @returns {number} + * @param {Uuid} sessionID - The ID of the avatar whose data rate you're retrieving. + * @param {AvatarDataRate} [rateName=""] - The type of avatar mixer data to get the data rate of. + * @returns {number} The data rate in kbps; 0 if the avatar is your own. */ Q_INVOKABLE float getAvatarDataRate(const QUuid& sessionID, const QString& rateName = QString("")) const; /**jsdoc + * Gets the update rate of avatar mixer data being generated by an avatar other than your own. * @function AvatarManager.getAvatarUpdateRate - * @param {Uuid} sessionID - * @param {string} [rateName=""] - * @returns {number} + * @param {Uuid} sessionID - The ID of the avatar whose update rate you're retrieving. + * @param {AvatarUpdateRate} [rateName=""] - The type of avatar mixer data to get the update rate of. + * @returns {number} The update rate in Hz; 0 if the avatar is your own. */ Q_INVOKABLE float getAvatarUpdateRate(const QUuid& sessionID, const QString& rateName = QString("")) const; /**jsdoc + * Gets the simulation rate of an avatar other than your own. * @function AvatarManager.getAvatarSimulationRate - * @param {Uuid} sessionID - * @param {string} [rateName=""] - * @returns {number} + * @param {Uuid} sessionID - The ID of the avatar whose simulation you're retrieving. + * @param {AvatarSimulationRate} [rateName=""] - The type of avatar data to get the simulation rate of. + * @returns {number} The simulation rate in Hz; 0 if the avatar is your own. */ Q_INVOKABLE float getAvatarSimulationRate(const QUuid& sessionID, const QString& rateName = QString("")) const; /**jsdoc + * Find the first avatar intersected by a {@link PickRay}. * @function AvatarManager.findRayIntersection - * @param {PickRay} ray - * @param {Uuid[]} [avatarsToInclude=[]] - * @param {Uuid[]} [avatarsToDiscard=[]] - * @param {boolean} pickAgainstMesh - * @returns {RayToAvatarIntersectionResult} + * @param {PickRay} ray - The ray to use for finding avatars. + * @param {Uuid[]} [avatarsToInclude=[]] - If not empty then search is restricted to these avatars. + * @param {Uuid[]} [avatarsToDiscard=[]] - Avatars to ignore in the search. + * @param {boolean} [pickAgainstMesh=true] - If true then the exact intersection with the avatar mesh is + * calculated, if false then the intersection is approximate. + * @returns {RayToAvatarIntersectionResult} The result of the search for the first intersected avatar. + * @example Find the first avatar directly in front of you. + * var pickRay = { + * origin: MyAvatar.position, + * direction: Quat.getFront(MyAvatar.orientation) + * }; + * + * var intersection = AvatarManager.findRayIntersection(pickRay); + * if (intersection.intersects) { + * print("Avatar found: " + JSON.stringify(intersection)); + * } else { + * print("No avatar found."); + * } */ Q_INVOKABLE RayToAvatarIntersectionResult findRayIntersection(const PickRay& ray, const QScriptValue& avatarIdsToInclude = QScriptValue(), @@ -149,11 +191,12 @@ public: bool pickAgainstMesh = true); /**jsdoc * @function AvatarManager.findRayIntersectionVector - * @param {PickRay} ray - * @param {Uuid[]} avatarsToInclude - * @param {Uuid[]} avatarsToDiscard - * @param {boolean} pickAgainstMesh - * @returns {RayToAvatarIntersectionResult} + * @param {PickRay} ray - Ray. + * @param {Uuid[]} avatarsToInclude - Avatars to include. + * @param {Uuid[]} avatarsToDiscard - Avatars to discard. + * @param {boolean} pickAgainstMesh - Pick against mesh. + * @returns {RayToAvatarIntersectionResult} Intersection result. + * @deprecated This function is deprecated and will be removed. */ Q_INVOKABLE RayToAvatarIntersectionResult findRayIntersectionVector(const PickRay& ray, const QVector& avatarsToInclude, @@ -162,10 +205,11 @@ public: /**jsdoc * @function AvatarManager.findParabolaIntersectionVector - * @param {PickParabola} pick - * @param {Uuid[]} avatarsToInclude - * @param {Uuid[]} avatarsToDiscard - * @returns {ParabolaToAvatarIntersectionResult} + * @param {PickParabola} pick - Pick. + * @param {Uuid[]} avatarsToInclude - Avatars to include. + * @param {Uuid[]} avatarsToDiscard - Avatars to discard. + * @returns {ParabolaToAvatarIntersectionResult} Intersection result. + * @deprecated This function is deprecated and will be removed. */ Q_INVOKABLE ParabolaToAvatarIntersectionResult findParabolaIntersectionVector(const PickParabola& pick, const QVector& avatarsToInclude, @@ -173,27 +217,31 @@ public: /**jsdoc * @function AvatarManager.getAvatarSortCoefficient - * @param {string} name - * @returns {number} + * @param {string} name - Name. + * @returns {number} Value. + * @deprecated This function is deprecated and will be removed. */ // TODO: remove this HACK once we settle on optimal default sort coefficients Q_INVOKABLE float getAvatarSortCoefficient(const QString& name); /**jsdoc * @function AvatarManager.setAvatarSortCoefficient - * @param {string} name - * @param {number} value + * @param {string} name - Name + * @param {number} value - Value. + * @deprecated This function is deprecated and will be removed. */ Q_INVOKABLE void setAvatarSortCoefficient(const QString& name, const QScriptValue& value); /**jsdoc - * Used in the PAL for getting PAL-related data about avatars nearby. Using this method is faster - * than iterating over each avatar and obtaining data about them in JavaScript, as that method - * locks and unlocks each avatar's data structure potentially hundreds of times per update tick. + * Gets PAL (People Access List) data for one or more avatars. Using this method is faster than iterating over each avatar + * and obtaining data about each individually. * @function AvatarManager.getPalData - * @param {string[]} [specificAvatarIdentifiers=[]] - The list of IDs of the avatars you want the PAL data for. - * If an empty list, the PAL data for all nearby avatars is returned. - * @returns {object[]} An array of objects, each object being the PAL data for an avatar. + * @param {string[]} [avatarIDs=[]] - The IDs of the avatars to get the PAL data for. If empty, then PAL data is obtained + * for all avatars. + * @returns {object<"data", AvatarManager.PalData[]>} An array of objects, each object being the PAL data for an avatar. + * @example Report the PAL data for an avatar nearby. + * var palData = AvatarManager.getPalData(); + * print("PAL data for one avatar: " + JSON.stringify(palData.data[0])); */ Q_INVOKABLE QVariantMap getPalData(const QStringList& specificAvatarIdentifiers = QStringList()); @@ -209,7 +257,8 @@ public: public slots: /**jsdoc * @function AvatarManager.updateAvatarRenderStatus - * @param {boolean} shouldRenderAvatars + * @param {boolean} shouldRenderAvatars - Should render avatars. + * @deprecated This function is deprecated and will be removed. */ void updateAvatarRenderStatus(bool shouldRenderAvatars); diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index c0cf63d7e4..25c7a788b3 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -1199,6 +1199,15 @@ void MyAvatar::overrideAnimation(const QString& url, float fps, bool loop, float _skeletonModel->getRig().overrideAnimation(url, fps, loop, firstFrame, lastFrame); } +void MyAvatar::overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame) { + if (QThread::currentThread() != thread()) { + QMetaObject::invokeMethod(this, "overrideHandAnimation", Q_ARG(bool, isLeft), Q_ARG(const QString&, url), Q_ARG(float, fps), + Q_ARG(bool, loop), Q_ARG(float, firstFrame), Q_ARG(float, lastFrame)); + return; + } + _skeletonModel->getRig().overrideHandAnimation(isLeft, url, fps, loop, firstFrame, lastFrame); +} + void MyAvatar::restoreAnimation() { if (QThread::currentThread() != thread()) { QMetaObject::invokeMethod(this, "restoreAnimation"); @@ -1207,6 +1216,14 @@ void MyAvatar::restoreAnimation() { _skeletonModel->getRig().restoreAnimation(); } +void MyAvatar::restoreHandAnimation(bool isLeft) { + if (QThread::currentThread() != thread()) { + QMetaObject::invokeMethod(this, "restoreHandAnimation", Q_ARG(bool, isLeft)); + return; + } + _skeletonModel->getRig().restoreHandAnimation(isLeft); +} + QStringList MyAvatar::getAnimationRoles() { if (QThread::currentThread() != thread()) { QStringList result; @@ -3172,17 +3189,40 @@ int MyAvatar::sendAvatarDataPacket(bool sendAll) { return bytesSent; } -const float RENDER_HEAD_CUTOFF_DISTANCE = 0.47f; - bool MyAvatar::cameraInsideHead(const glm::vec3& cameraPosition) const { + if (!_skeletonModel) { + return false; + } + + // transform cameraPosition into rig coordinates + AnimPose rigToWorld = AnimPose(getWorldOrientation() * Quaternions::Y_180, getWorldPosition()); + AnimPose worldToRig = rigToWorld.inverse(); + glm::vec3 rigCameraPosition = worldToRig * cameraPosition; + + // use head k-dop shape to determine if camera is inside head. + const Rig& rig = _skeletonModel->getRig(); + int headJointIndex = rig.indexOfJoint("Head"); + if (headJointIndex >= 0) { + const HFMModel& hfmModel = _skeletonModel->getHFMModel(); + AnimPose headPose; + if (rig.getAbsoluteJointPoseInRigFrame(headJointIndex, headPose)) { + glm::vec3 displacement; + const HFMJointShapeInfo& headShapeInfo = hfmModel.joints[headJointIndex].shapeInfo; + return findPointKDopDisplacement(rigCameraPosition, headPose, headShapeInfo, displacement); + } + } + + // fall back to simple distance check. + const float RENDER_HEAD_CUTOFF_DISTANCE = 0.47f; return glm::length(cameraPosition - getHeadPosition()) < (RENDER_HEAD_CUTOFF_DISTANCE * getModelScale()); } bool MyAvatar::shouldRenderHead(const RenderArgs* renderArgs) const { bool defaultMode = renderArgs->_renderMode == RenderArgs::DEFAULT_RENDER_MODE; bool firstPerson = qApp->getCamera().getMode() == CAMERA_MODE_FIRST_PERSON; + bool overrideAnim = _skeletonModel ? _skeletonModel->getRig().isPlayingOverrideAnimation() : false; bool insideHead = cameraInsideHead(renderArgs->getViewFrustum().getPosition()); - return !defaultMode || !firstPerson || !insideHead; + return !defaultMode || (!firstPerson && !insideHead) || (overrideAnim && !insideHead); } void MyAvatar::setHasScriptedBlendshapes(bool hasScriptedBlendshapes) { @@ -4798,7 +4838,12 @@ bool MyAvatar::isReadyForPhysics() const { } void MyAvatar::setSprintMode(bool sprint) { - _walkSpeedScalar = sprint ? AVATAR_SPRINT_SPEED_SCALAR : AVATAR_WALK_SPEED_SCALAR; + if (qApp->isHMDMode()) { + _walkSpeedScalar = sprint ? AVATAR_DESKTOP_SPRINT_SPEED_SCALAR : AVATAR_WALK_SPEED_SCALAR; + } + else { + _walkSpeedScalar = sprint ? AVATAR_HMD_SPRINT_SPEED_SCALAR : AVATAR_WALK_SPEED_SCALAR; + } } void MyAvatar::setIsInWalkingState(bool isWalking) { diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h index 804e2687e7..4cf13e54dc 100755 --- a/interface/src/avatar/MyAvatar.h +++ b/interface/src/avatar/MyAvatar.h @@ -597,6 +597,26 @@ public: */ Q_INVOKABLE void overrideAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame); + /**jsdoc + * overrideHandAnimation() Gets the overrides the default hand poses that are triggered with controller buttons. + * use {@link MyAvatar.restoreHandAnimation}.

to restore the default poses. + * @function MyAvatar.overrideHandAnimation + * @param isLeft {boolean} Set true if using the left hand + * @param url {string} The URL to the animation file. Animation files need to be FBX format, but only need to contain the + * avatar skeleton and animation data. + * @param fps {number} The frames per second (FPS) rate for the animation playback. 30 FPS is normal speed. + * @param loop {boolean} Set to true if the animation should loop. + * @param firstFrame {number} The frame the animation should start at. + * @param lastFrame {number} The frame the animation should end at + * @example Override left hand animation for three seconds. + * // Override the left hand pose then restore the default pose. + * MyAvatar.overrideHandAnimation(isLeft, ANIM_URL, 30, true, 0, 53); + * Script.setTimeout(function () { + * MyAvatar.restoreHandAnimation(); + * }, 3000); + */ + Q_INVOKABLE void overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame); + /**jsdoc * Restores the default animations. *

The avatar animation system includes a set of default animations along with rules for how those animations are blended @@ -615,6 +635,24 @@ public: */ Q_INVOKABLE void restoreAnimation(); + /**jsdoc + * Restores the default hand animation state machine that is driven by the state machine in the avatar-animation json. + *

The avatar animation system includes a set of default animations along with rules for how those animations are blended + * together with procedural data (such as look at vectors, hand sensors etc.). Playing your own custom animations will + * override the default animations. restoreHandAnimation() is used to restore the default hand poses + * If you aren't currently playing an override hand + * animation, this function has no effect.

+ * @function MyAvatar.restoreHandAnimation + * @param isLeft {boolean} Set to true if using the left hand + * @example Override left hand animation for three seconds. + * // Override the left hand pose then restore the default pose. + * MyAvatar.overrideHandAnimation(isLeft, ANIM_URL, 30, true, 0, 53); + * Script.setTimeout(function () { + * MyAvatar.restoreHandAnimation(); + * }, 3000); + */ + Q_INVOKABLE void restoreHandAnimation(bool isLeft); + /**jsdoc * Gets the current animation roles. *

Each avatar has an avatar-animation.json file that defines which animations are used and how they are blended together diff --git a/interface/src/avatar/MySkeletonModel.cpp b/interface/src/avatar/MySkeletonModel.cpp index 55c29b66c1..df46b428e7 100755 --- a/interface/src/avatar/MySkeletonModel.cpp +++ b/interface/src/avatar/MySkeletonModel.cpp @@ -334,7 +334,9 @@ void MySkeletonModel::updateRig(float deltaTime, glm::mat4 parentTransform) { eyeParams.leftEyeJointIndex = _rig.indexOfJoint("LeftEye"); eyeParams.rightEyeJointIndex = _rig.indexOfJoint("RightEye"); - _rig.updateFromEyeParameters(eyeParams); + if (_owningAvatar->getHasProceduralEyeFaceMovement()) { + _rig.updateFromEyeParameters(eyeParams); + } updateFingers(); } diff --git a/interface/src/commerce/Wallet.cpp b/interface/src/commerce/Wallet.cpp index 37f28960e5..5644f9ea4c 100644 --- a/interface/src/commerce/Wallet.cpp +++ b/interface/src/commerce/Wallet.cpp @@ -96,28 +96,32 @@ int passwordCallback(char* password, int maxPasswordSize, int rwFlag, void* u) { } } -EC_KEY* readKeys(const char* filename) { - FILE* fp; - EC_KEY *key = NULL; - if ((fp = fopen(filename, "rt"))) { +EC_KEY* readKeys(QString filename) { + QFile file(filename); + EC_KEY* key = NULL; + if (file.open(QFile::ReadOnly)) { // file opened successfully qCDebug(commerce) << "opened key file" << filename; - if ((key = PEM_read_EC_PUBKEY(fp, NULL, NULL, NULL))) { + QByteArray pemKeyBytes = file.readAll(); + BIO* bufio = BIO_new_mem_buf((void*)pemKeyBytes.constData(), pemKeyBytes.length()); + if ((key = PEM_read_bio_EC_PUBKEY(bufio, NULL, NULL, NULL))) { // now read private key qCDebug(commerce) << "read public key"; - if ((key = PEM_read_ECPrivateKey(fp, &key, passwordCallback, NULL))) { + if ((key = PEM_read_bio_ECPrivateKey(bufio, &key, passwordCallback, NULL))) { qCDebug(commerce) << "read private key"; - fclose(fp); - return key; + BIO_free(bufio); + file.close(); + } else { + qCDebug(commerce) << "failed to read private key"; } - qCDebug(commerce) << "failed to read private key"; } else { qCDebug(commerce) << "failed to read public key"; } - fclose(fp); + BIO_free(bufio); + file.close(); } else { qCDebug(commerce) << "failed to open key file" << filename; } @@ -131,8 +135,7 @@ bool Wallet::writeBackupInstructions() { QFile outputFile(outputFilename); bool retval = false; - if (getKeyFilePath().isEmpty()) - { + if (getKeyFilePath().isEmpty()) { return false; } @@ -152,7 +155,7 @@ bool Wallet::writeBackupInstructions() { outputFile.write(text.toUtf8()); // Close the output file - outputFile.close(); + outputFile.close(); retval = true; qCDebug(commerce) << "wrote html file successfully"; @@ -165,28 +168,35 @@ bool Wallet::writeBackupInstructions() { return retval; } -bool writeKeys(const char* filename, EC_KEY* keys) { - FILE* fp; +bool writeKeys(QString filename, EC_KEY* keys) { + BIO* bio = BIO_new(BIO_s_mem()); bool retval = false; - if ((fp = fopen(filename, "wt"))) { - if (!PEM_write_EC_PUBKEY(fp, keys)) { - fclose(fp); - qCCritical(commerce) << "failed to write public key"; - return retval; - } + if (!PEM_write_bio_EC_PUBKEY(bio, keys)) { + BIO_free(bio); + qCCritical(commerce) << "failed to write public key"; + return retval; + } - if (!PEM_write_ECPrivateKey(fp, keys, EVP_des_ede3_cbc(), NULL, 0, passwordCallback, NULL)) { - fclose(fp); - qCCritical(commerce) << "failed to write private key"; - return retval; - } + if (!PEM_write_bio_ECPrivateKey(bio, keys, EVP_des_ede3_cbc(), NULL, 0, passwordCallback, NULL)) { + BIO_free(bio); + qCCritical(commerce) << "failed to write private key"; + return retval; + } + QFile file(filename); + if (!file.open(QIODevice::WriteOnly)) { + const char* bio_data; + long bio_size = BIO_get_mem_data(bio, &bio_data); + + QByteArray keyBytes(bio_data, bio_size); + file.write(keyBytes); retval = true; qCDebug(commerce) << "wrote keys successfully"; - fclose(fp); + file.close(); } else { qCDebug(commerce) << "failed to open key file" << filename; } + BIO_free(bio); return retval; } @@ -215,7 +225,6 @@ QByteArray Wallet::getWallet() { } QPair generateECKeypair() { - EC_KEY* keyPair = EC_KEY_new_by_curve_name(NID_secp256k1); QPair retval{}; @@ -235,7 +244,6 @@ QPair generateECKeypair() { if (publicKeyLength <= 0 || privateKeyLength <= 0) { qCDebug(commerce) << "Error getting DER public or private key from EC struct -" << ERR_get_error(); - // cleanup the EC struct EC_KEY_free(keyPair); @@ -251,8 +259,7 @@ QPair generateECKeypair() { return retval; } - - if (!writeKeys(keyFilePath().toStdString().c_str(), keyPair)) { + if (!writeKeys(keyFilePath(), keyPair)) { qCDebug(commerce) << "couldn't save keys!"; return retval; } @@ -273,13 +280,18 @@ QPair generateECKeypair() { // END copied code (which will soon change) // the public key can just go into a byte array -QByteArray readPublicKey(const char* filename) { - FILE* fp; - EC_KEY* key = NULL; - if ((fp = fopen(filename, "r"))) { +QByteArray readPublicKey(QString filename) { + QByteArray retval; + QFile file(filename); + if (file.open(QIODevice::ReadOnly)) { // file opened successfully qCDebug(commerce) << "opened key file" << filename; - if ((key = PEM_read_EC_PUBKEY(fp, NULL, NULL, NULL))) { + + QByteArray pemKeyBytes = file.readAll(); + BIO* bufio = BIO_new_mem_buf((void*)pemKeyBytes.constData(), pemKeyBytes.length()); + + EC_KEY* key = PEM_read_bio_EC_PUBKEY(bufio, NULL, NULL, NULL); + if (key) { // file read successfully unsigned char* publicKeyDER = NULL; int publicKeyLength = i2d_EC_PUBKEY(key, &publicKeyDER); @@ -287,17 +299,19 @@ QByteArray readPublicKey(const char* filename) { // cleanup EC_KEY_free(key); - fclose(fp); qCDebug(commerce) << "parsed public key file successfully"; QByteArray retval((char*)publicKeyDER, publicKeyLength); OPENSSL_free(publicKeyDER); + BIO_free(bufio); + file.close(); return retval; } else { qCDebug(commerce) << "couldn't parse" << filename; } - fclose(fp); + BIO_free(bufio); + file.close(); } else { qCDebug(commerce) << "couldn't open" << filename; } @@ -306,13 +320,17 @@ QByteArray readPublicKey(const char* filename) { // the private key should be read/copied into heap memory. For now, we need the EC_KEY struct // so I'll return that. -EC_KEY* readPrivateKey(const char* filename) { - FILE* fp; +EC_KEY* readPrivateKey(QString filename) { + QFile file(filename); EC_KEY* key = NULL; - if ((fp = fopen(filename, "r"))) { + if (file.open(QIODevice::ReadOnly)) { // file opened successfully qCDebug(commerce) << "opened key file" << filename; - if ((key = PEM_read_ECPrivateKey(fp, &key, passwordCallback, NULL))) { + + QByteArray pemKeyBytes = file.readAll(); + BIO* bufio = BIO_new_mem_buf((void*)pemKeyBytes.constData(), pemKeyBytes.length()); + + if ((key = PEM_read_bio_ECPrivateKey(bufio, &key, passwordCallback, NULL))) { qCDebug(commerce) << "parsed private key file successfully"; } else { @@ -320,7 +338,8 @@ EC_KEY* readPrivateKey(const char* filename) { // if the passphrase is wrong, then let's not cache it DependencyManager::get()->setPassphrase(""); } - fclose(fp); + BIO_free(bufio); + file.close(); } else { qCDebug(commerce) << "couldn't open" << filename; } @@ -361,7 +380,7 @@ Wallet::Wallet() { if (wallet->getKeyFilePath().isEmpty() || !wallet->getSecurityImage()) { if (keyStatus == "preexisting") { status = (uint) WalletStatus::WALLET_STATUS_PREEXISTING; - } else{ + } else { status = (uint) WalletStatus::WALLET_STATUS_NOT_SET_UP; } } else if (!wallet->walletIsAuthenticatedWithPassphrase()) { @@ -371,7 +390,6 @@ Wallet::Wallet() { } else { status = (uint) WalletStatus::WALLET_STATUS_READY; } - walletScriptingInterface->setWalletStatus(status); }); @@ -569,10 +587,10 @@ bool Wallet::walletIsAuthenticatedWithPassphrase() { } // otherwise, we have a passphrase but no keys, so we have to check - auto publicKey = readPublicKey(keyFilePath().toStdString().c_str()); + auto publicKey = readPublicKey(keyFilePath()); if (publicKey.size() > 0) { - if (auto key = readPrivateKey(keyFilePath().toStdString().c_str())) { + if (auto key = readPrivateKey(keyFilePath())) { EC_KEY_free(key); // be sure to add the public key so we don't do this over and over @@ -631,8 +649,7 @@ QStringList Wallet::listPublicKeys() { QString Wallet::signWithKey(const QByteArray& text, const QString& key) { EC_KEY* ecPrivateKey = NULL; - auto keyFilePathString = keyFilePath().toStdString(); - if ((ecPrivateKey = readPrivateKey(keyFilePath().toStdString().c_str()))) { + if ((ecPrivateKey = readPrivateKey(keyFilePath()))) { unsigned char* sig = new unsigned char[ECDSA_size(ecPrivateKey)]; unsigned int signatureBytes = 0; @@ -641,12 +658,8 @@ QString Wallet::signWithKey(const QByteArray& text, const QString& key) { QByteArray hashedPlaintext = QCryptographicHash::hash(text, QCryptographicHash::Sha256); - - int retrn = ECDSA_sign(0, - reinterpret_cast(hashedPlaintext.constData()), - hashedPlaintext.size(), - sig, - &signatureBytes, ecPrivateKey); + int retrn = ECDSA_sign(0, reinterpret_cast(hashedPlaintext.constData()), hashedPlaintext.size(), + sig, &signatureBytes, ecPrivateKey); EC_KEY_free(ecPrivateKey); QByteArray signature(reinterpret_cast(sig), signatureBytes); @@ -682,7 +695,6 @@ void Wallet::updateImageProvider() { } void Wallet::chooseSecurityImage(const QString& filename) { - if (_securityImage) { delete _securityImage; } @@ -754,7 +766,7 @@ QString Wallet::getKeyFilePath() { } bool Wallet::writeWallet(const QString& newPassphrase) { - EC_KEY* keys = readKeys(keyFilePath().toStdString().c_str()); + EC_KEY* keys = readKeys(keyFilePath()); auto ledger = DependencyManager::get(); // Remove any existing locker, because it will be out of date. if (!_publicKeys.isEmpty() && !ledger->receiveAt(_publicKeys.first(), _publicKeys.first(), QByteArray())) { @@ -768,7 +780,7 @@ bool Wallet::writeWallet(const QString& newPassphrase) { setPassphrase(newPassphrase); } - if (writeKeys(tempFileName.toStdString().c_str(), keys)) { + if (writeKeys(tempFileName, keys)) { if (writeSecurityImage(_securityImage, tempFileName)) { // ok, now move the temp file to the correct spot QFile(QString(keyFilePath())).remove(); @@ -834,10 +846,10 @@ void Wallet::handleChallengeOwnershipPacket(QSharedPointer pack challengingNodeUUID = packet->read(challengingNodeUUIDByteArraySize); } - EC_KEY* ec = readKeys(keyFilePath().toStdString().c_str()); + EC_KEY* ec = readKeys(keyFilePath()); QString sig; - if (ec) { + if (ec) { ERR_clear_error(); sig = signWithKey(text, ""); // base64 signature, QByteArray cast (on return) to QString FIXME should pass ec as string so we can tell which key to sign with status = 1; diff --git a/interface/src/raypick/LaserPointer.cpp b/interface/src/raypick/LaserPointer.cpp index bd746c9090..12daae0351 100644 --- a/interface/src/raypick/LaserPointer.cpp +++ b/interface/src/raypick/LaserPointer.cpp @@ -233,16 +233,19 @@ PointerEvent LaserPointer::buildPointerEvent(const PickedObject& target, const P // If we just started triggering and we haven't moved too much, don't update intersection and pos2D TriggerState& state = hover ? _latestState : _states[button]; - float sensorToWorldScale = DependencyManager::get()->getMyAvatar()->getSensorToWorldScale(); - float deadspotSquared = TOUCH_PRESS_TO_MOVE_DEADSPOT_SQUARED * sensorToWorldScale * sensorToWorldScale; - bool withinDeadspot = usecTimestampNow() - state.triggerStartTime < POINTER_MOVE_DELAY && glm::distance2(pos2D, state.triggerPos2D) < deadspotSquared; - if ((state.triggering || state.wasTriggering) && !state.deadspotExpired && withinDeadspot) { - pos2D = state.triggerPos2D; - intersection = state.intersection; - surfaceNormal = state.surfaceNormal; - } - if (!withinDeadspot) { - state.deadspotExpired = true; + auto avatar = DependencyManager::get()->getMyAvatar(); + if (avatar) { + float sensorToWorldScale = avatar->getSensorToWorldScale(); + float deadspotSquared = TOUCH_PRESS_TO_MOVE_DEADSPOT_SQUARED * sensorToWorldScale * sensorToWorldScale; + bool withinDeadspot = usecTimestampNow() - state.triggerStartTime < POINTER_MOVE_DELAY && glm::distance2(pos2D, state.triggerPos2D) < deadspotSquared; + if ((state.triggering || state.wasTriggering) && !state.deadspotExpired && withinDeadspot) { + pos2D = state.triggerPos2D; + intersection = state.intersection; + surfaceNormal = state.surfaceNormal; + } + if (!withinDeadspot) { + state.deadspotExpired = true; + } } return PointerEvent(pos2D, intersection, surfaceNormal, direction); diff --git a/libraries/animation/src/AnimTwoBoneIK.cpp b/libraries/animation/src/AnimTwoBoneIK.cpp index c91518d5db..b3686b4b57 100644 --- a/libraries/animation/src/AnimTwoBoneIK.cpp +++ b/libraries/animation/src/AnimTwoBoneIK.cpp @@ -128,7 +128,7 @@ const AnimPoseVec& AnimTwoBoneIK::evaluate(const AnimVariantMap& animVars, const if (triggersOut.hasKey(endEffectorPositionVar)) { targetPose.trans() = triggersOut.lookupRigToGeometry(endEffectorPositionVar, tipPose.trans()); - } else if (animVars.hasKey(endEffectorRotationVar)) { + } else if (animVars.hasKey(endEffectorPositionVar)) { targetPose.trans() = animVars.lookupRigToGeometry(endEffectorPositionVar, tipPose.trans()); } @@ -147,9 +147,11 @@ const AnimPoseVec& AnimTwoBoneIK::evaluate(const AnimVariantMap& animVars, const // http://mathworld.wolfram.com/Circle-CircleIntersection.html float midAngle = 0.0f; - if (d < r0 + r1) { + if ((d < r0 + r1) && (d > 0.0f) && (r0 > 0.0f) && (r1 > 0.0f)) { float y = sqrtf((-d + r1 - r0) * (-d - r1 + r0) * (-d + r1 + r0) * (d + r1 + r0)) / (2.0f * d); - midAngle = PI - (acosf(y / r0) + acosf(y / r1)); + float yR0Quotient = glm::clamp(y / r0, -1.0f, 1.0f); + float yR1Quotient = glm::clamp(y / r1, -1.0f, 1.0f); + midAngle = PI - (acosf(yR0Quotient) + acosf(yR1Quotient)); } // compute midJoint rotation diff --git a/libraries/animation/src/AnimUtil.cpp b/libraries/animation/src/AnimUtil.cpp index c23e228556..5fca2b4f88 100644 --- a/libraries/animation/src/AnimUtil.cpp +++ b/libraries/animation/src/AnimUtil.cpp @@ -142,3 +142,72 @@ glm::quat computeBodyFacingFromHead(const glm::quat& headRot, const glm::vec3& u return glmExtractRotation(bodyMat); } + + +const float INV_SQRT_3 = 1.0f / sqrtf(3.0f); +const int DOP14_COUNT = 14; +const glm::vec3 DOP14_NORMALS[DOP14_COUNT] = { + Vectors::UNIT_X, + -Vectors::UNIT_X, + Vectors::UNIT_Y, + -Vectors::UNIT_Y, + Vectors::UNIT_Z, + -Vectors::UNIT_Z, + glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3), + -glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3), + glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3), + -glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3), + glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3), + -glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3), + glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3), + -glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3) +}; + +// returns true if the given point lies inside of the k-dop, specified by shapeInfo & shapePose. +// if the given point does lie within the k-dop, it also returns the amount of displacement necessary to push that point outward +// such that it lies on the surface of the kdop. +bool findPointKDopDisplacement(const glm::vec3& point, const AnimPose& shapePose, const HFMJointShapeInfo& shapeInfo, glm::vec3& displacementOut) { + + // transform point into local space of jointShape. + glm::vec3 localPoint = shapePose.inverse().xformPoint(point); + + // Only works for 14-dop shape infos. + if (shapeInfo.dots.size() != DOP14_COUNT) { + return false; + } + + glm::vec3 minDisplacement(FLT_MAX); + float minDisplacementLen = FLT_MAX; + glm::vec3 p = localPoint - shapeInfo.avgPoint; + float pLen = glm::length(p); + if (pLen > 0.0f) { + int slabCount = 0; + for (int i = 0; i < DOP14_COUNT; i++) { + float dot = glm::dot(p, DOP14_NORMALS[i]); + if (dot > 0.0f && dot < shapeInfo.dots[i]) { + slabCount++; + float distToPlane = pLen * (shapeInfo.dots[i] / dot); + float displacementLen = distToPlane - pLen; + + // keep track of the smallest displacement + if (displacementLen < minDisplacementLen) { + minDisplacementLen = displacementLen; + minDisplacement = (p / pLen) * displacementLen; + } + } + } + if (slabCount == (DOP14_COUNT / 2) && minDisplacementLen != FLT_MAX) { + // we are within the k-dop so push the point along the minimum displacement found + displacementOut = shapePose.xformVectorFast(minDisplacement); + return true; + } else { + // point is outside of kdop + return false; + } + } else { + // point is directly on top of shapeInfo.avgPoint. + // push the point out along the x axis. + displacementOut = shapePose.xformVectorFast(shapeInfo.points[0]); + return true; + } +} diff --git a/libraries/animation/src/AnimUtil.h b/libraries/animation/src/AnimUtil.h index cf190e8dbf..c2925e31e8 100644 --- a/libraries/animation/src/AnimUtil.h +++ b/libraries/animation/src/AnimUtil.h @@ -128,4 +128,10 @@ protected: bool _snapshotValid { false }; }; + +// returns true if the given point lies inside of the k-dop, specified by shapeInfo & shapePose. +// if the given point does lie within the k-dop, it also returns the amount of displacement necessary to push that point outward +// such that it lies on the surface of the kdop. +bool findPointKDopDisplacement(const glm::vec3& point, const AnimPose& shapePose, const HFMJointShapeInfo& shapeInfo, glm::vec3& displacementOut); + #endif diff --git a/libraries/animation/src/Rig.cpp b/libraries/animation/src/Rig.cpp index 43e94d23e8..a4c57025be 100644 --- a/libraries/animation/src/Rig.cpp +++ b/libraries/animation/src/Rig.cpp @@ -370,6 +370,88 @@ void Rig::restoreAnimation() { } } +void Rig::overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame) { + HandAnimState::ClipNodeEnum clipNodeEnum; + if (isLeft) { + if (_leftHandAnimState.clipNodeEnum == HandAnimState::None || _leftHandAnimState.clipNodeEnum == HandAnimState::B) { + clipNodeEnum = HandAnimState::A; + } else { + clipNodeEnum = HandAnimState::B; + } + } else { + if (_rightHandAnimState.clipNodeEnum == HandAnimState::None || _rightHandAnimState.clipNodeEnum == HandAnimState::B) { + clipNodeEnum = HandAnimState::A; + } else { + clipNodeEnum = HandAnimState::B; + } + } + + if (_animNode) { + std::shared_ptr clip; + if (isLeft) { + if (clipNodeEnum == HandAnimState::A) { + clip = std::dynamic_pointer_cast(_animNode->findByName("leftHandAnimA")); + } else { + clip = std::dynamic_pointer_cast(_animNode->findByName("leftHandAnimB")); + } + } else { + if (clipNodeEnum == HandAnimState::A) { + clip = std::dynamic_pointer_cast(_animNode->findByName("rightHandAnimA")); + } else { + clip = std::dynamic_pointer_cast(_animNode->findByName("rightHandAnimB")); + } + } + + if (clip) { + // set parameters + clip->setLoopFlag(loop); + clip->setStartFrame(firstFrame); + clip->setEndFrame(lastFrame); + const float REFERENCE_FRAMES_PER_SECOND = 30.0f; + float timeScale = fps / REFERENCE_FRAMES_PER_SECOND; + clip->setTimeScale(timeScale); + clip->loadURL(url); + } + } + + // notify the handAnimStateMachine the desired state. + if (isLeft) { + // store current hand anim state. + _leftHandAnimState = { clipNodeEnum, url, fps, loop, firstFrame, lastFrame }; + _animVars.set("leftHandAnimNone", false); + _animVars.set("leftHandAnimA", clipNodeEnum == HandAnimState::A); + _animVars.set("leftHandAnimB", clipNodeEnum == HandAnimState::B); + } else { + // store current hand anim state. + _rightHandAnimState = { clipNodeEnum, url, fps, loop, firstFrame, lastFrame }; + _animVars.set("rightHandAnimNone", false); + _animVars.set("rightHandAnimA", clipNodeEnum == HandAnimState::A); + _animVars.set("rightHandAnimB", clipNodeEnum == HandAnimState::B); + } +} + +void Rig::restoreHandAnimation(bool isLeft) { + if (isLeft) { + if (_leftHandAnimState.clipNodeEnum != HandAnimState::None) { + _leftHandAnimState.clipNodeEnum = HandAnimState::None; + + // notify the handAnimStateMachine the desired state. + _animVars.set("leftHandAnimNone", true); + _animVars.set("leftHandAnimA", false); + _animVars.set("leftHandAnimB", false); + } + } else { + if (_rightHandAnimState.clipNodeEnum != HandAnimState::None) { + _rightHandAnimState.clipNodeEnum = HandAnimState::None; + + // notify the handAnimStateMachine the desired state. + _animVars.set("rightHandAnimNone", true); + _animVars.set("rightHandAnimA", false); + _animVars.set("rightHandAnimB", false); + } + } +} + void Rig::overrideNetworkAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame) { NetworkAnimState::ClipNodeEnum clipNodeEnum = NetworkAnimState::None; @@ -1521,74 +1603,6 @@ void Rig::updateHead(bool headEnabled, bool hipsEnabled, const AnimPose& headPos } } -const float INV_SQRT_3 = 1.0f / sqrtf(3.0f); -const int DOP14_COUNT = 14; -const glm::vec3 DOP14_NORMALS[DOP14_COUNT] = { - Vectors::UNIT_X, - -Vectors::UNIT_X, - Vectors::UNIT_Y, - -Vectors::UNIT_Y, - Vectors::UNIT_Z, - -Vectors::UNIT_Z, - glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3), - -glm::vec3(INV_SQRT_3, INV_SQRT_3, INV_SQRT_3), - glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3), - -glm::vec3(INV_SQRT_3, -INV_SQRT_3, INV_SQRT_3), - glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3), - -glm::vec3(INV_SQRT_3, INV_SQRT_3, -INV_SQRT_3), - glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3), - -glm::vec3(INV_SQRT_3, -INV_SQRT_3, -INV_SQRT_3) -}; - -// returns true if the given point lies inside of the k-dop, specified by shapeInfo & shapePose. -// if the given point does lie within the k-dop, it also returns the amount of displacement necessary to push that point outward -// such that it lies on the surface of the kdop. -static bool findPointKDopDisplacement(const glm::vec3& point, const AnimPose& shapePose, const HFMJointShapeInfo& shapeInfo, glm::vec3& displacementOut) { - - // transform point into local space of jointShape. - glm::vec3 localPoint = shapePose.inverse().xformPoint(point); - - // Only works for 14-dop shape infos. - if (shapeInfo.dots.size() != DOP14_COUNT) { - return false; - } - - glm::vec3 minDisplacement(FLT_MAX); - float minDisplacementLen = FLT_MAX; - glm::vec3 p = localPoint - shapeInfo.avgPoint; - float pLen = glm::length(p); - if (pLen > 0.0f) { - int slabCount = 0; - for (int i = 0; i < DOP14_COUNT; i++) { - float dot = glm::dot(p, DOP14_NORMALS[i]); - if (dot > 0.0f && dot < shapeInfo.dots[i]) { - slabCount++; - float distToPlane = pLen * (shapeInfo.dots[i] / dot); - float displacementLen = distToPlane - pLen; - - // keep track of the smallest displacement - if (displacementLen < minDisplacementLen) { - minDisplacementLen = displacementLen; - minDisplacement = (p / pLen) * displacementLen; - } - } - } - if (slabCount == (DOP14_COUNT / 2) && minDisplacementLen != FLT_MAX) { - // we are within the k-dop so push the point along the minimum displacement found - displacementOut = shapePose.xformVectorFast(minDisplacement); - return true; - } else { - // point is outside of kdop - return false; - } - } else { - // point is directly on top of shapeInfo.avgPoint. - // push the point out along the x axis. - displacementOut = shapePose.xformVectorFast(shapeInfo.points[0]); - return true; - } -} - glm::vec3 Rig::deflectHandFromTorso(const glm::vec3& handPosition, const HFMJointShapeInfo& hipsShapeInfo, const HFMJointShapeInfo& spineShapeInfo, const HFMJointShapeInfo& spine1ShapeInfo, const HFMJointShapeInfo& spine2ShapeInfo) const { glm::vec3 position = handPosition; @@ -2136,6 +2150,20 @@ void Rig::initAnimGraph(const QUrl& url) { overrideAnimation(origState.url, origState.fps, origState.loop, origState.firstFrame, origState.lastFrame); } + if (_rightHandAnimState.clipNodeEnum != HandAnimState::None) { + // restore the right hand animation we had before reset. + HandAnimState origState = _rightHandAnimState; + _rightHandAnimState = { HandAnimState::None, "", 30.0f, false, 0.0f, 0.0f }; + overrideHandAnimation(false, origState.url, origState.fps, origState.loop, origState.firstFrame, origState.lastFrame); + } + + if (_leftHandAnimState.clipNodeEnum != HandAnimState::None) { + // restore the left hand animation we had before reset. + HandAnimState origState = _leftHandAnimState; + _leftHandAnimState = { HandAnimState::None, "", 30.0f, false, 0.0f, 0.0f }; + overrideHandAnimation(true, origState.url, origState.fps, origState.loop, origState.firstFrame, origState.lastFrame); + } + // restore the role animations we had before reset. for (auto& roleAnimState : _roleAnimStates) { auto roleState = roleAnimState.second; diff --git a/libraries/animation/src/Rig.h b/libraries/animation/src/Rig.h index df13ff5c2b..b9a7f73117 100644 --- a/libraries/animation/src/Rig.h +++ b/libraries/animation/src/Rig.h @@ -116,8 +116,12 @@ public: void destroyAnimGraph(); void overrideAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame); + bool isPlayingOverrideAnimation() const { return _userAnimState.clipNodeEnum != UserAnimState::None; }; void restoreAnimation(); + void overrideHandAnimation(bool isLeft, const QString& url, float fps, bool loop, float firstFrame, float lastFrame); + void restoreHandAnimation(bool isLeft); + void overrideNetworkAnimation(const QString& url, float fps, bool loop, float firstFrame, float lastFrame); void triggerNetworkRole(const QString& role); void restoreNetworkAnimation(); @@ -333,7 +337,7 @@ protected: RigRole _state { RigRole::Idle }; RigRole _desiredState { RigRole::Idle }; float _desiredStateAge { 0.0f }; - + struct NetworkAnimState { enum ClipNodeEnum { None = 0, @@ -356,6 +360,27 @@ protected: float blendTime; }; + struct HandAnimState { + enum ClipNodeEnum { + None = 0, + A, + B + }; + + HandAnimState() : clipNodeEnum(HandAnimState::None) {} + HandAnimState(ClipNodeEnum clipNodeEnumIn, const QString& urlIn, float fpsIn, bool loopIn, float firstFrameIn, float lastFrameIn) : + clipNodeEnum(clipNodeEnumIn), url(urlIn), fps(fpsIn), loop(loopIn), firstFrame(firstFrameIn), lastFrame(lastFrameIn) { + } + + + ClipNodeEnum clipNodeEnum; + QString url; + float fps; + bool loop; + float firstFrame; + float lastFrame; + }; + struct UserAnimState { enum ClipNodeEnum { None = 0, @@ -390,6 +415,8 @@ protected: UserAnimState _userAnimState; NetworkAnimState _networkAnimState; + HandAnimState _rightHandAnimState; + HandAnimState _leftHandAnimState; std::map _roleAnimStates; float _leftHandOverlayAlpha { 0.0f }; diff --git a/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp b/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp index b6c5c6d235..b86c56bb0c 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp +++ b/libraries/avatars-renderer/src/avatars-renderer/Avatar.cpp @@ -509,6 +509,26 @@ void Avatar::relayJointDataToChildren() { _reconstructSoftEntitiesJointMap = false; } +/**jsdoc + * An avatar has different types of data simulated at different rates, in Hz. + * + * + * + * + * + * + * + * + * + * + * + * + *
Rate NameDescription
"avatar" or ""The rate at which the avatar is updated even if not in view.
"avatarInView"The rate at which the avatar is updated if in view.
"skeletonModel"The rate at which the skeleton model is being updated, even if there are no + * joint data available.
"jointData"The rate at which joint data are being updated.
""When no rate name is specified, the "avatar" update rate is + * provided.
+ * + * @typedef {string} AvatarSimulationRate + */ float Avatar::getSimulationRate(const QString& rateName) const { if (rateName == "") { return _simulationRate.rate(); diff --git a/libraries/avatars-renderer/src/avatars-renderer/Avatar.h b/libraries/avatars-renderer/src/avatars-renderer/Avatar.h index aef5ac09e9..3d14418157 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/Avatar.h +++ b/libraries/avatars-renderer/src/avatars-renderer/Avatar.h @@ -501,8 +501,8 @@ public: /**jsdoc * @function MyAvatar.getSimulationRate - * @param {string} [rateName=""] - Rate name. - * @returns {number} Simulation rate. + * @param {AvatarSimulationRate} [rateName=""] - Rate name. + * @returns {number} Simulation rate in Hz. * @deprecated This function is deprecated and will be removed. */ Q_INVOKABLE float getSimulationRate(const QString& rateName = QString("")) const; diff --git a/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp b/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp index fbcf36a8c9..295a0e9f52 100644 --- a/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp +++ b/libraries/avatars-renderer/src/avatars-renderer/SkeletonModel.cpp @@ -270,28 +270,19 @@ bool SkeletonModel::getEyeModelPositions(glm::vec3& firstEyePosition, glm::vec3& getJointPosition(_rig.indexOfJoint("RightEye"), secondEyePosition)) { return true; } - // no eye joints; try to estimate based on head/neck joints - glm::vec3 neckPosition, headPosition; - if (getJointPosition(_rig.indexOfJoint("Neck"), neckPosition) && - getJointPosition(_rig.indexOfJoint("Head"), headPosition)) { - const float EYE_PROPORTION = 0.6f; - glm::vec3 baseEyePosition = glm::mix(neckPosition, headPosition, EYE_PROPORTION); + + int headJointIndex = _rig.indexOfJoint("Head"); + glm::vec3 headPosition; + if (getJointPosition(headJointIndex, headPosition)) { + + // get head joint rotation. glm::quat headRotation; - getJointRotation(_rig.indexOfJoint("Head"), headRotation); - const float EYES_FORWARD = 0.25f; - const float EYE_SEPARATION = 0.1f; - float headHeight = glm::distance(neckPosition, headPosition); - firstEyePosition = baseEyePosition + headRotation * glm::vec3(EYE_SEPARATION, 0.0f, EYES_FORWARD) * headHeight; - secondEyePosition = baseEyePosition + headRotation * glm::vec3(-EYE_SEPARATION, 0.0f, EYES_FORWARD) * headHeight; - return true; - } else if (getJointPosition(_rig.indexOfJoint("Head"), headPosition)) { - glm::vec3 baseEyePosition = headPosition; - glm::quat headRotation; - getJointRotation(_rig.indexOfJoint("Head"), headRotation); - const float EYES_FORWARD_HEAD_ONLY = 0.30f; - const float EYE_SEPARATION = 0.1f; - firstEyePosition = baseEyePosition + headRotation * glm::vec3(EYE_SEPARATION, 0.0f, EYES_FORWARD_HEAD_ONLY); - secondEyePosition = baseEyePosition + headRotation * glm::vec3(-EYE_SEPARATION, 0.0f, EYES_FORWARD_HEAD_ONLY); + getJointRotation(headJointIndex, headRotation); + + float heightRatio = _rig.getUnscaledEyeHeight() / DEFAULT_AVATAR_EYE_HEIGHT; + glm::vec3 ipdOffset = glm::vec3(DEFAULT_AVATAR_IPD / 2.0f, 0.0f, 0.0f); + firstEyePosition = headPosition + headRotation * heightRatio * (DEFAULT_AVATAR_HEAD_TO_MIDDLE_EYE_OFFSET + ipdOffset); + secondEyePosition = headPosition + headRotation * heightRatio * (DEFAULT_AVATAR_HEAD_TO_MIDDLE_EYE_OFFSET - ipdOffset); return true; } return false; diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index a2b0b808ba..f460881a45 100755 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -1545,7 +1545,6 @@ float AvatarData::getDataRate(const QString& rateName) const { * Rate NameDescription * * - * "globalPosition"Global position. * "localPosition"Local position. * "avatarBoundingBox"Avatar bounding box. @@ -1559,7 +1558,6 @@ float AvatarData::getDataRate(const QString& rateName) const { * "faceTracker"Face tracker data. * "jointData"Joint data. * "farGrabJointData"Far grab joint data. - * ""When no rate name is specified, the overall update rate is provided. * * @@ -1721,7 +1719,6 @@ glm::vec3 AvatarData::getJointTranslation(const QString& name) const { // on another thread in between the call to getJointIndex and getJointTranslation // return getJointTranslation(getJointIndex(name)); return readLockWithNamedJointIndex(name, [this](int index) { - return _jointData.at(index).translation; return getJointTranslation(index); }); } @@ -1809,8 +1806,8 @@ glm::quat AvatarData::getJointRotation(const QString& name) const { // Can't do this, not thread safe // return getJointRotation(getJointIndex(name)); - return readLockWithNamedJointIndex(name, [&](int index) { - return _jointData.at(index).rotation; + return readLockWithNamedJointIndex(name, [this](int index) { + return getJointRotation(index); }); } @@ -2905,6 +2902,20 @@ glm::mat4 AvatarData::getControllerRightHandMatrix() const { return _controllerRightHandMatrixCache.get(); } +/**jsdoc + * Information about a ray-to-avatar intersection. + * @typedef {object} RayToAvatarIntersectionResult + * @property {boolean} intersects - true if an avatar is intersected, false if it isn't. + * @property {string} avatarID - The ID of the avatar that is intersected. + * @property {number} distance - The distance from the ray origin to the intersection. + * @property {string} face - The name of the box face that is intersected; "UNKNOWN_FACE" if mesh was picked + * against. + * @property {Vec3} intersection - The ray intersection point in world coordinates. + * @property {Vec3} surfaceNormal - The surface normal at the intersection point. + * @property {number} jointIndex - The index of the joint intersected. + * @property {SubmeshIntersection} extraInfo - Extra information on the mesh intersected if mesh was picked against, + * {} if it wasn't. + */ QScriptValue RayToAvatarIntersectionResultToScriptValue(QScriptEngine* engine, const RayToAvatarIntersectionResult& value) { QScriptValue obj = engine->newObject(); obj.setProperty("intersects", value.intersects); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 1c4b0cfc53..79c82d4f29 100755 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -479,7 +479,8 @@ class AvatarData : public QObject, public SpatiallyNestable { * avatar. Read-only. * @property {number} sensorToWorldScale - The scale that transforms dimensions in the user's real world to the avatar's * size in the virtual world. Read-only. - * @property {boolean} hasPriority - is the avatar in a Hero zone? Read-only. + * @property {boolean} hasPriority - true if the avatar is in a "hero" zone, false if it isn't. + * Read-only. */ Q_PROPERTY(glm::vec3 position READ getWorldPosition WRITE setPositionViaScript) Q_PROPERTY(float scale READ getDomainLimitedScale WRITE setTargetScale) @@ -1751,14 +1752,11 @@ protected: template T readLockWithNamedJointIndex(const QString& name, const T& defaultValue, F f) const { - int index = getFauxJointIndex(name); QReadLocker readLock(&_jointDataLock); - - // The first conditional is superfluous, but illustrative - if (index == -1 || index < _jointData.size()) { + int index = getJointIndex(name); + if (index == -1) { return defaultValue; } - return f(index); } @@ -1769,8 +1767,8 @@ protected: template void writeLockWithNamedJointIndex(const QString& name, F f) { - int index = getFauxJointIndex(name); QWriteLocker writeLock(&_jointDataLock); + int index = getJointIndex(name); if (index == -1) { return; } diff --git a/libraries/avatars/src/AvatarHashMap.h b/libraries/avatars/src/AvatarHashMap.h index 8395651d6b..17a3d28eb0 100644 --- a/libraries/avatars/src/AvatarHashMap.h +++ b/libraries/avatars/src/AvatarHashMap.h @@ -36,8 +36,10 @@ const int CLIENT_TO_AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND = 50; const quint64 MIN_TIME_BETWEEN_MY_AVATAR_DATA_SENDS = USECS_PER_SECOND / CLIENT_TO_AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND; /**jsdoc - * Note: An AvatarList API is also provided for Interface and client entity scripts: it is a - * synonym for the {@link AvatarManager} API. + * The AvatarList API provides information about avatars within the current domain. + * + *

Warning: An API named "AvatarList" is also provided for Interface, client entity, and avatar + * scripts, however, it is a synonym for the {@link AvatarManager} API.

* * @namespace AvatarList * @@ -78,23 +80,37 @@ public: // Currently, your own avatar will be included as the null avatar id. /**jsdoc + * Gets the IDs of all avatars in the domain. + *

Warning: If the AC script is acting as an avatar (i.e., Agent.isAvatar == true) the + * avatar's ID is NOT included in results.

* @function AvatarList.getAvatarIdentifiers - * @returns {Uuid[]} + * @returns {Uuid[]} The IDs of all avatars in the domain (excluding AC script's avatar). + * @example Report the IDS of all avatars within the domain. + * var avatars = AvatarList.getAvatarIdentifiers(); + * print("Avatars in the domain: " + JSON.stringify(avatars)); */ Q_INVOKABLE QVector getAvatarIdentifiers(); /**jsdoc + * Gets the IDs of all avatars within a specified distance from a point. + *

Warning: If the AC script is acting as an avatar (i.e., Agent.isAvatar == true) the + * avatar's ID is NOT included in results.

* @function AvatarList.getAvatarsInRange - * @param {Vec3} position - * @param {number} range - * @returns {Uuid[]} + * @param {Vec3} position - The point about which the search is performed. + * @param {number} range - The search radius. + * @returns {Uuid[]} The IDs of all avatars within the search distance from the position (excluding AC script's avatar). + * @example Report the IDs of all avatars within 10m of the origin. + * var RANGE = 10; + * var avatars = AvatarList.getAvatarsInRange(Vec3.ZERO, RANGE); + * print("Avatars near the origin: " + JSON.stringify(avatars)); */ Q_INVOKABLE QVector getAvatarsInRange(const glm::vec3& position, float rangeMeters) const; /**jsdoc + * Gets information about an avatar. * @function AvatarList.getAvatar - * @param {Uuid} avatarID - * @returns {AvatarData} + * @param {Uuid} avatarID - The ID of the avatar. + * @returns {AvatarData} Information about the avatar. */ // Null/Default-constructed QUuids will return MyAvatar Q_INVOKABLE virtual ScriptAvatarData* getAvatar(QUuid avatarID) { return new ScriptAvatarData(getAvatarBySessionID(avatarID)); } @@ -110,34 +126,57 @@ public: signals: /**jsdoc + * Triggered when an avatar arrives in the domain. * @function AvatarList.avatarAddedEvent - * @param {Uuid} sessionUUID + * @param {Uuid} sessionUUID - The ID of the avatar that arrived in the domain. * @returns {Signal} + * @example Report when an avatar arrives in the domain. + * AvatarManager.avatarAddedEvent.connect(function (sessionID) { + * print("Avatar arrived: " + sessionID); + * }); + * + * // Note: If using from the AvatarList API, replace "AvatarManager" with "AvatarList". */ void avatarAddedEvent(const QUuid& sessionUUID); /**jsdoc + * Triggered when an avatar leaves the domain. * @function AvatarList.avatarRemovedEvent - * @param {Uuid} sessionUUID + * @param {Uuid} sessionUUID - The ID of the avatar that left the domain. * @returns {Signal} + * @example Report when an avatar leaves the domain. + * AvatarManager.avatarRemovedEvent.connect(function (sessionID) { + * print("Avatar left: " + sessionID); + * }); + * + * // Note: If using from the AvatarList API, replace "AvatarManager" with "AvatarList". */ void avatarRemovedEvent(const QUuid& sessionUUID); /**jsdoc + * Triggered when an avatar's session ID changes. * @function AvatarList.avatarSessionChangedEvent - * @param {Uuid} sessionUUID - * @param {Uuid} oldSessionUUID + * @param {Uuid} newSessionUUID - The new session ID. + * @param {Uuid} oldSessionUUID - The old session ID. * @returns {Signal} + * @example Report when an avatar's session ID changes. + * AvatarManager.avatarSessionChangedEvent.connect(function (newSessionID, oldSessionID) { + * print("Avatar session ID changed from " + oldSessionID + " to " + newSessionID); + * }); + * + * // Note: If using from the AvatarList API, replace "AvatarManager" with "AvatarList". */ void avatarSessionChangedEvent(const QUuid& sessionUUID,const QUuid& oldUUID); public slots: /**jsdoc + * Checks whether there is an avatar within a specified distance from a point. * @function AvatarList.isAvatarInRange - * @param {string} position - * @param {string} range - * @returns {boolean} + * @param {string} position - The test position. + * @param {string} range - The test distance. + * @returns {boolean} true if there's an avatar within the specified distance of the point, false + * if not. */ bool isAvatarInRange(const glm::vec3 & position, const float range); @@ -145,36 +184,41 @@ protected slots: /**jsdoc * @function AvatarList.sessionUUIDChanged - * @param {Uuid} sessionUUID - * @param {Uuid} oldSessionUUID + * @param {Uuid} sessionUUID - New session ID. + * @param {Uuid} oldSessionUUID - Old session ID. + * @deprecated This function is deprecated and will be removed. */ void sessionUUIDChanged(const QUuid& sessionUUID, const QUuid& oldUUID); /**jsdoc * @function AvatarList.processAvatarDataPacket - * @param {} message - * @param {} sendingNode + * @param {object} message - Message. + * @param {object} sendingNode - Sending node. + * @deprecated This function is deprecated and will be removed. */ void processAvatarDataPacket(QSharedPointer message, SharedNodePointer sendingNode); /**jsdoc * @function AvatarList.processAvatarIdentityPacket - * @param {} message - * @param {} sendingNode + * @param {object} message - Message. + * @param {object} sendingNode - Sending node. + * @deprecated This function is deprecated and will be removed. */ void processAvatarIdentityPacket(QSharedPointer message, SharedNodePointer sendingNode); /**jsdoc * @function AvatarList.processBulkAvatarTraits - * @param {} message - * @param {} sendingNode + * @param {object} message - Message. + * @param {object} sendingNode - Sending node. + * @deprecated This function is deprecated and will be removed. */ void processBulkAvatarTraits(QSharedPointer message, SharedNodePointer sendingNode); /**jsdoc * @function AvatarList.processKillAvatar - * @param {} message - * @param {} sendingNode + * @param {object} message - Message. + * @param {object} sendingNode - Sending node. + * @deprecated This function is deprecated and will be removed. */ void processKillAvatar(QSharedPointer message, SharedNodePointer sendingNode); diff --git a/libraries/avatars/src/ScriptAvatarData.h b/libraries/avatars/src/ScriptAvatarData.h index 01f7ff360a..7e33618ba9 100644 --- a/libraries/avatars/src/ScriptAvatarData.h +++ b/libraries/avatars/src/ScriptAvatarData.h @@ -16,6 +16,52 @@ #include "AvatarData.h" +/**jsdoc + * Information about an avatar. + * @typedef {object} AvatarData + * @property {Vec3} position - The avatar's position. + * @property {number} scale - The target scale of the avatar without any restrictions on permissible values imposed by the + * domain. + * @property {Vec3} handPosition - A user-defined hand position, in world coordinates. The position moves with the avatar but + * is otherwise not used or changed by Interface. + * @property {number} bodyPitch - The pitch of the avatar's body, in degrees. + * @property {number} bodyYaw - The yaw of the avatar's body, in degrees. + * @property {number} bodyRoll - The roll of the avatar's body, in degrees. + * @property {Quat} orientation - The orientation of the avatar's body. + * @property {Quat} headOrientation - The orientation of the avatar's head. + * @property {number} headPitch - The pitch of the avatar's head relative to the body, in degrees. + * @property {number} headYaw - The yaw of the avatar's head relative to the body, in degrees. + * @property {number} headRoll - The roll of the avatar's head relative to the body, in degrees. + * + * @property {Vec3} velocity - The linear velocity of the avatar. + * @property {Vec3} angularVelocity - The angular velocity of the avatar. + * + * @property {Uuid} sessionUUID - The avatar's session ID. + * @property {string} displayName - The avatar's display name. + * @property {string} sessionDisplayName - The avatar's display name, sanitized and versioned, as defined by the avatar mixer. + * It is unique among all avatars present in the domain at the time. + * @property {boolean} isReplicated - Deprecated. + * @property {boolean} lookAtSnappingEnabled - true if the avatar's eyes snap to look at another avatar's eyes + * when the other avatar is in the line of sight and also has lookAtSnappingEnabled == true. + * + * @property {string} skeletonModelURL - The avatar's FST file. + * @property {AttachmentData[]} attachmentData - Information on the avatar's attachments.
+ * Deprecated: Use avatar entities instead. + * @property {string[]} jointNames - The list of joints in the current avatar model. + * + * @property {number} audioLoudness - The instantaneous loudness of the audio input that the avatar is injecting into the + * domain. + * @property {number} audioAverageLoudness - The rolling average loudness of the audio input that the avatar is injecting into + * the domain. + * + * @property {Mat4} sensorToWorldMatrix - The scale, rotation, and translation transform from the user's real world to the + * avatar's size, orientation, and position in the virtual world. + * @property {Mat4} controllerLeftHandMatrix - The rotation and translation of the left hand controller relative to the avatar. + * @property {Mat4} controllerRightHandMatrix - The rotation and translation of the right hand controller relative to the + * avatar. + * + * @property {boolean} hasPriority - true if the avatar is in a "hero" zone, false if it isn't. + */ class ScriptAvatarData : public QObject { Q_OBJECT diff --git a/libraries/baking/src/MaterialBaker.cpp b/libraries/baking/src/MaterialBaker.cpp index 9fcd7d0354..7fc2573d7b 100644 --- a/libraries/baking/src/MaterialBaker.cpp +++ b/libraries/baking/src/MaterialBaker.cpp @@ -144,7 +144,12 @@ void MaterialBaker::processMaterial() { connect(textureBaker.data(), &TextureBaker::finished, this, &MaterialBaker::handleFinishedTextureBaker); _textureBakers.insert(textureKey, textureBaker); textureBaker->moveToThread(_getNextOvenWorkerThreadOperator ? _getNextOvenWorkerThreadOperator() : thread()); - QMetaObject::invokeMethod(textureBaker.data(), "bake"); + // By default, Qt will invoke this bake immediately if the TextureBaker is on the same worker thread as this MaterialBaker. + // We don't want that, because threads may be waiting for work while this thread is stuck processing a TextureBaker. + // On top of that, _textureBakers isn't fully populated. + // So, use Qt::QueuedConnection. + // TODO: Better thread utilization at the top level, not just the MaterialBaker level + QMetaObject::invokeMethod(textureBaker.data(), "bake", Qt::QueuedConnection); } _materialsNeedingRewrite.insert(textureKey, networkMaterial.second); } else { diff --git a/libraries/baking/src/TextureBaker.cpp b/libraries/baking/src/TextureBaker.cpp index 3756ae86de..54d304b7d8 100644 --- a/libraries/baking/src/TextureBaker.cpp +++ b/libraries/baking/src/TextureBaker.cpp @@ -131,7 +131,10 @@ void TextureBaker::handleTextureNetworkReply() { void TextureBaker::processTexture() { // the baked textures need to have the source hash added for cache checks in Interface // so we add that to the processed texture before handling it off to be serialized - auto hashData = QCryptographicHash::hash(_originalTexture, QCryptographicHash::Md5); + QCryptographicHash hasher(QCryptographicHash::Md5); + hasher.addData(_originalTexture); + hasher.addData((const char*)&_textureType, sizeof(_textureType)); + auto hashData = hasher.result(); std::string hash = hashData.toHex().toStdString(); TextureMeta meta; @@ -206,7 +209,7 @@ void TextureBaker::processTexture() { } // Uncompressed KTX - if (_textureType == image::TextureUsage::Type::CUBE_TEXTURE) { + if (_textureType == image::TextureUsage::Type::SKY_TEXTURE || _textureType == image::TextureUsage::Type::AMBIENT_TEXTURE) { buffer->reset(); auto processedTexture = image::processImage(std::move(buffer), _textureURL.toString().toStdString(), image::ColorChannel::NONE, ABSOLUTE_MAX_TEXTURE_NUM_PIXELS, _textureType, false, gpu::BackendTarget::GL45, _abortProcessing); diff --git a/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp b/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp index 8a7fa3f8e7..64cca404cb 100644 --- a/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp +++ b/libraries/entities-renderer/src/RenderableZoneEntityItem.cpp @@ -465,7 +465,7 @@ void ZoneEntityRenderer::setAmbientURL(const QString& ambientUrl) { } else { _pendingAmbientTexture = true; auto textureCache = DependencyManager::get(); - _ambientTexture = textureCache->getTexture(_ambientTextureURL, image::TextureUsage::CUBE_TEXTURE); + _ambientTexture = textureCache->getTexture(_ambientTextureURL, image::TextureUsage::AMBIENT_TEXTURE); // keep whatever is assigned on the ambient map/sphere until texture is loaded } @@ -506,7 +506,7 @@ void ZoneEntityRenderer::setSkyboxURL(const QString& skyboxUrl) { } else { _pendingSkyboxTexture = true; auto textureCache = DependencyManager::get(); - _skyboxTexture = textureCache->getTexture(_skyboxTextureURL, image::TextureUsage::CUBE_TEXTURE); + _skyboxTexture = textureCache->getTexture(_skyboxTextureURL, image::TextureUsage::SKY_TEXTURE); } } diff --git a/libraries/image/CMakeLists.txt b/libraries/image/CMakeLists.txt index 0c733ae789..62f48f66e2 100644 --- a/libraries/image/CMakeLists.txt +++ b/libraries/image/CMakeLists.txt @@ -2,6 +2,7 @@ set(TARGET_NAME image) setup_hifi_library() link_hifi_libraries(shared gpu) target_nvtt() +target_tbb() target_etc2comp() target_openexr() diff --git a/libraries/image/src/image/CubeMap.cpp b/libraries/image/src/image/CubeMap.cpp new file mode 100644 index 0000000000..9196377daa --- /dev/null +++ b/libraries/image/src/image/CubeMap.cpp @@ -0,0 +1,660 @@ +// +// CubeMap.h +// image/src/image +// +// Created by Olivier Prat on 03/27/2019. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#include "CubeMap.h" + +#include +#include + +#include "RandomAndNoise.h" +#include "BRDF.h" +#include "ImageLogging.h" + +#ifndef M_PI +#define M_PI 3.14159265359 +#endif + +#include + +using namespace image; + +static const glm::vec3 FACE_NORMALS[24] = { + // POSITIVE X + glm::vec3(1.0f, 1.0f, 1.0f), + glm::vec3(1.0f, 1.0f, -1.0f), + glm::vec3(1.0f, -1.0f, 1.0f), + glm::vec3(1.0f, -1.0f, -1.0f), + // NEGATIVE X + glm::vec3(-1.0f, 1.0f, -1.0f), + glm::vec3(-1.0f, 1.0f, 1.0f), + glm::vec3(-1.0f, -1.0f, -1.0f), + glm::vec3(-1.0f, -1.0f, 1.0f), + // POSITIVE Y + glm::vec3(-1.0f, 1.0f, -1.0f), + glm::vec3(1.0f, 1.0f, -1.0f), + glm::vec3(-1.0f, 1.0f, 1.0f), + glm::vec3(1.0f, 1.0f, 1.0f), + // NEGATIVE Y + glm::vec3(-1.0f, -1.0f, 1.0f), + glm::vec3(1.0f, -1.0f, 1.0f), + glm::vec3(-1.0f, -1.0f, -1.0f), + glm::vec3(1.0f, -1.0f, -1.0f), + // POSITIVE Z + glm::vec3(-1.0f, 1.0f, 1.0f), + glm::vec3(1.0f, 1.0f, 1.0f), + glm::vec3(-1.0f, -1.0f, 1.0f), + glm::vec3(1.0f, -1.0f, 1.0f), + // NEGATIVE Z + glm::vec3(1.0f, 1.0f, -1.0f), + glm::vec3(-1.0f, 1.0f, -1.0f), + glm::vec3(1.0f, -1.0f, -1.0f), + glm::vec3(-1.0f, -1.0f, -1.0f) +}; + +struct CubeFaceMip { + + CubeFaceMip(gpu::uint16 level, const CubeMap* cubemap) { + _dims = cubemap->getMipDimensions(level); + _lineStride = cubemap->getMipLineStride(level); + } + + CubeFaceMip(const CubeFaceMip& other) : _dims(other._dims), _lineStride(other._lineStride) { + + } + + gpu::Vec2i _dims; + size_t _lineStride; +}; + +class CubeMap::ConstMip : public CubeFaceMip { +public: + + ConstMip(gpu::uint16 level, const CubeMap* cubemap) : + CubeFaceMip(level, cubemap), _faces(cubemap->_mips[level]) { + } + + glm::vec4 fetch(int face, glm::vec2 uv) const { + glm::vec2 coordFrac = uv * glm::vec2(_dims) - 0.5f; + glm::vec2 coords = glm::floor(coordFrac); + + coordFrac -= coords; + + coords += (float)EDGE_WIDTH; + + const auto& pixels = _faces[face]; + gpu::Vec2i loCoords(coords); + gpu::Vec2i hiCoords; + + hiCoords = glm::clamp(loCoords + 1, gpu::Vec2i(0, 0), _dims - 1 + (int)EDGE_WIDTH); + loCoords = glm::clamp(loCoords, gpu::Vec2i(0, 0), _dims - 1 + (int)EDGE_WIDTH); + + const size_t offsetLL = loCoords.x + loCoords.y * _lineStride; + const size_t offsetHL = hiCoords.x + loCoords.y * _lineStride; + const size_t offsetLH = loCoords.x + hiCoords.y * _lineStride; + const size_t offsetHH = hiCoords.x + hiCoords.y * _lineStride; + assert(offsetLL >= 0 && offsetLL < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + assert(offsetHL >= 0 && offsetHL < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + assert(offsetLH >= 0 && offsetLH < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + assert(offsetHH >= 0 && offsetHH < _lineStride * (_dims.y + 2 * EDGE_WIDTH)); + glm::vec4 colorLL = pixels[offsetLL]; + glm::vec4 colorHL = pixels[offsetHL]; + glm::vec4 colorLH = pixels[offsetLH]; + glm::vec4 colorHH = pixels[offsetHH]; + + colorLL += (colorHL - colorLL) * coordFrac.x; + colorLH += (colorHH - colorLH) * coordFrac.x; + return colorLL + (colorLH - colorLL) * coordFrac.y; + } + +private: + + const Faces& _faces; + +}; + +class CubeMap::Mip : public CubeFaceMip { +public: + + explicit Mip(gpu::uint16 level, CubeMap* cubemap) : + CubeFaceMip(level, cubemap), _faces(cubemap->_mips[level]) { + } + + Mip(const Mip& other) : CubeFaceMip(other), _faces(other._faces) { + } + + void applySeams() { + if (EDGE_WIDTH == 0) { + return; + } + + // Copy edge rows and columns from neighbouring faces to fix seam filtering issues + seamColumnAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, -1, -1); + seamColumnAndRow(gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, _dims.y, 1); + seamColumnAndColumn(gpu::Texture::CUBE_FACE_FRONT_NEG_Z, -1, gpu::Texture::CUBE_FACE_RIGHT_POS_X, _dims.x, 1); + seamColumnAndColumn(gpu::Texture::CUBE_FACE_BACK_POS_Z, _dims.x, gpu::Texture::CUBE_FACE_RIGHT_POS_X, -1, 1); + + seamRowAndRow(gpu::Texture::CUBE_FACE_BACK_POS_Z, -1, gpu::Texture::CUBE_FACE_TOP_POS_Y, _dims.y, 1); + seamRowAndRow(gpu::Texture::CUBE_FACE_BACK_POS_Z, _dims.y, gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, -1, 1); + seamColumnAndColumn(gpu::Texture::CUBE_FACE_BACK_POS_Z, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, _dims.x, 1); + + seamRowAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, -1, gpu::Texture::CUBE_FACE_FRONT_NEG_Z, -1, -1); + seamColumnAndRow(gpu::Texture::CUBE_FACE_TOP_POS_Y, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, -1, 1); + + seamColumnAndColumn(gpu::Texture::CUBE_FACE_LEFT_NEG_X, -1, gpu::Texture::CUBE_FACE_FRONT_NEG_Z, _dims.x, 1); + seamColumnAndRow(gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, -1, gpu::Texture::CUBE_FACE_LEFT_NEG_X, _dims.y, -1); + + seamRowAndRow(gpu::Texture::CUBE_FACE_FRONT_NEG_Z, _dims.y, gpu::Texture::CUBE_FACE_BOTTOM_NEG_Y, _dims.y, -1); + + // Duplicate corner pixels + for (int face = 0; face < 6; face++) { + auto& pixels = _faces[face]; + + pixels[0] = pixels[1]; + pixels[_dims.x + 1] = pixels[_dims.x]; + pixels[(_dims.y + 1)*(_dims.x + 2)] = pixels[(_dims.y + 1)*(_dims.x + 2) + 1]; + pixels[(_dims.y + 2)*(_dims.x + 2) - 1] = pixels[(_dims.y + 2)*(_dims.x + 2) - 2]; + } + } + +private: + + Faces& _faces; + + inline static void copy(CubeMap::Face::const_iterator srcFirst, CubeMap::Face::const_iterator srcLast, size_t srcStride, CubeMap::Face::iterator dstBegin, size_t dstStride) { + while (srcFirst <= srcLast) { + *dstBegin = *srcFirst; + srcFirst += srcStride; + dstBegin += dstStride; + } + } + + static std::pair getSrcAndDst(int dim, int value) { + int src; + int dst; + + if (value < 0) { + src = 1; + dst = 0; + } else if (value >= dim) { + src = dim; + dst = dim + 1; + } + return std::make_pair(src, dst); + } + + void seamColumnAndColumn(int face0, int col0, int face1, int col1, int inc) { + auto coords0 = getSrcAndDst(_dims.x, col0); + auto coords1 = getSrcAndDst(_dims.x, col1); + + copyColumnToColumn(face0, coords0.first, face1, coords1.second, inc); + copyColumnToColumn(face1, coords1.first, face0, coords0.second, inc); + } + + void seamColumnAndRow(int face0, int col0, int face1, int row1, int inc) { + auto coords0 = getSrcAndDst(_dims.x, col0); + auto coords1 = getSrcAndDst(_dims.y, row1); + + copyColumnToRow(face0, coords0.first, face1, coords1.second, inc); + copyRowToColumn(face1, coords1.first, face0, coords0.second, inc); + } + + void seamRowAndRow(int face0, int row0, int face1, int row1, int inc) { + auto coords0 = getSrcAndDst(_dims.y, row0); + auto coords1 = getSrcAndDst(_dims.y, row1); + + copyRowToRow(face0, coords0.first, face1, coords1.second, inc); + copyRowToRow(face1, coords1.first, face0, coords0.second, inc); + } + + void copyColumnToColumn(int srcFace, int srcCol, int dstFace, int dstCol, const int dstInc) { + const auto lastOffset = _lineStride * (_dims.y - 1); + auto srcFirst = _faces[srcFace].begin() + srcCol + _lineStride; + auto srcLast = srcFirst + lastOffset; + + auto dstFirst = _faces[dstFace].begin() + dstCol + _lineStride; + auto dstLast = dstFirst + lastOffset; + const auto dstStride = _lineStride * dstInc; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, _lineStride, dstFirst, dstStride); + } + + void copyRowToRow(int srcFace, int srcRow, int dstFace, int dstRow, const int dstInc) { + const auto lastOffset =(_dims.x - 1); + auto srcFirst = _faces[srcFace].begin() + srcRow * _lineStride + 1; + auto srcLast = srcFirst + lastOffset; + + auto dstFirst = _faces[dstFace].begin() + dstRow * _lineStride + 1; + auto dstLast = dstFirst + lastOffset; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, 1, dstFirst, dstInc); + } + + void copyColumnToRow(int srcFace, int srcCol, int dstFace, int dstRow, int dstInc) { + const auto srcLastOffset = _lineStride * (_dims.y - 1); + auto srcFirst = _faces[srcFace].begin() + srcCol + _lineStride; + auto srcLast = srcFirst + srcLastOffset; + + const auto dstLastOffset = (_dims.x - 1); + auto dstFirst = _faces[dstFace].begin() + dstRow * _lineStride + 1; + auto dstLast = dstFirst + dstLastOffset; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, _lineStride, dstFirst, dstInc); + } + + void copyRowToColumn(int srcFace, int srcRow, int dstFace, int dstCol, int dstInc) { + const auto srcLastOffset = (_dims.x - 1); + auto srcFirst = _faces[srcFace].begin() + srcRow * _lineStride + 1; + auto srcLast = srcFirst + srcLastOffset; + + const auto dstLastOffset = _lineStride * (_dims.y - 1); + auto dstFirst = _faces[dstFace].begin() + dstCol + _lineStride; + auto dstLast = dstFirst + dstLastOffset; + const auto dstStride = _lineStride * dstInc; + + assert(srcFirst < _faces[srcFace].end()); + assert(srcLast < _faces[srcFace].end()); + assert(dstFirst < _faces[dstFace].end()); + assert(dstLast < _faces[dstFace].end()); + + if (dstInc < 0) { + std::swap(dstFirst, dstLast); + } + + copy(srcFirst, srcLast, 1, dstFirst, dstStride); + } +}; + +static void copySurface(const nvtt::Surface& source, glm::vec4* dest, size_t dstLineStride) { + const float* srcRedIt = source.channel(0); + const float* srcGreenIt = source.channel(1); + const float* srcBlueIt = source.channel(2); + const float* srcAlphaIt = source.channel(3); + + for (int y = 0; y < source.height(); y++) { + glm::vec4* dstColIt = dest; + for (int x = 0; x < source.width(); x++) { + *dstColIt = glm::vec4(*srcRedIt, *srcGreenIt, *srcBlueIt, *srcAlphaIt); + dstColIt++; + srcRedIt++; + srcGreenIt++; + srcBlueIt++; + srcAlphaIt++; + } + dest += dstLineStride; + } +} + +CubeMap::CubeMap(int width, int height, int mipCount) { + reset(width, height, mipCount); +} + +CubeMap::CubeMap(const std::vector& faces, int mipCount, const std::atomic& abortProcessing) { + reset(faces.front().getWidth(), faces.front().getHeight(), mipCount); + + int face; + + nvtt::Surface surface; + surface.setAlphaMode(nvtt::AlphaMode_None); + surface.setWrapMode(nvtt::WrapMode_Mirror); + + // Compute mips + for (face = 0; face < 6; face++) { + Image faceImage = faces[face].getConvertedToFormat(Image::Format_RGBAF); + + surface.setImage(nvtt::InputFormat_RGBA_32F, _width, _height, 1, faceImage.editBits()); + + auto mipLevel = 0; + copySurface(surface, editFace(0, face), getMipLineStride(0)); + + while (surface.canMakeNextMipmap() && !abortProcessing.load()) { + surface.buildNextMipmap(nvtt::MipmapFilter_Box); + mipLevel++; + + copySurface(surface, editFace(mipLevel, face), getMipLineStride(mipLevel)); + } + } + + if (abortProcessing.load()) { + return; + } + + for (gpu::uint16 mipLevel = 0; mipLevel < mipCount; ++mipLevel) { + Mip mip(mipLevel, this); + mip.applySeams(); + } +} + +void CubeMap::applyGamma(float value) { + for (auto& mip : _mips) { + for (auto& face : mip) { + for (auto& pixel : face) { + pixel.r = std::pow(pixel.r, value); + pixel.g = std::pow(pixel.g, value); + pixel.b = std::pow(pixel.b, value); + } + } + } +} + +void CubeMap::copyFace(int width, int height, const glm::vec4* source, size_t srcLineStride, glm::vec4* dest, size_t dstLineStride) { + for (int y = 0; y < height; y++) { + std::copy(source, source + width, dest); + source += srcLineStride; + dest += dstLineStride; + } +} + +Image CubeMap::getFaceImage(gpu::uint16 mipLevel, int face) const { + auto mipDims = getMipDimensions(mipLevel); + Image faceImage(mipDims.x, mipDims.y, Image::Format_RGBAF); + copyFace(mipDims.x, mipDims.y, getFace(mipLevel, face), getMipLineStride(mipLevel), (glm::vec4*)faceImage.editBits(), faceImage.getBytesPerLineCount() / sizeof(glm::vec4)); + return faceImage; +} + +void CubeMap::reset(int width, int height, int mipCount) { + assert(mipCount >0 && width > 0 && height > 0); + _width = width; + _height = height; + _mips.resize(mipCount); + for (auto mipLevel = 0; mipLevel < mipCount; mipLevel++) { + auto mipDimensions = getMipDimensions(mipLevel); + // Add extra pixels on edges to perform edge seam fixup (we will duplicate pixels from + // neighbouring faces) + auto mipPixelCount = (mipDimensions.x + 2 * EDGE_WIDTH) * (mipDimensions.y + 2 * EDGE_WIDTH); + + for (auto& face : _mips[mipLevel]) { + face.resize(mipPixelCount); + } + } +} + +void CubeMap::copyTo(CubeMap& other) const { + other._width = _width; + other._height = _height; + other._mips = _mips; +} + +void CubeMap::getFaceUV(const glm::vec3& dir, int* index, glm::vec2* uv) { + // Taken from https://en.wikipedia.org/wiki/Cube_mapping + float absX = std::abs(dir.x); + float absY = std::abs(dir.y); + float absZ = std::abs(dir.z); + + auto isXPositive = dir.x > 0; + auto isYPositive = dir.y > 0; + auto isZPositive = dir.z > 0; + + float maxAxis = 1.0f; + float uc = 0.0f; + float vc = 0.0f; + + // POSITIVE X + if (isXPositive && absX >= absY && absX >= absZ) { + // u (0 to 1) goes from +z to -z + // v (0 to 1) goes from -y to +y + maxAxis = absX; + uc = -dir.z; + vc = -dir.y; + *index = 0; + } + // NEGATIVE X + else if (!isXPositive && absX >= absY && absX >= absZ) { + // u (0 to 1) goes from -z to +z + // v (0 to 1) goes from -y to +y + maxAxis = absX; + uc = dir.z; + vc = -dir.y; + *index = 1; + } + // POSITIVE Y + else if (isYPositive && absY >= absX && absY >= absZ) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from +z to -z + maxAxis = absY; + uc = dir.x; + vc = dir.z; + *index = 2; + } + // NEGATIVE Y + else if (!isYPositive && absY >= absX && absY >= absZ) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from -z to +z + maxAxis = absY; + uc = dir.x; + vc = -dir.z; + *index = 3; + } + // POSITIVE Z + else if (isZPositive && absZ >= absX && absZ >= absY) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from -y to +y + maxAxis = absZ; + uc = dir.x; + vc = -dir.y; + *index = 4; + } + // NEGATIVE Z + else if (!isZPositive && absZ >= absX && absZ >= absY) { + // u (0 to 1) goes from +x to -x + // v (0 to 1) goes from -y to +y + maxAxis = absZ; + uc = -dir.x; + vc = -dir.y; + *index = 5; + } + + // Convert range from -1 to 1 to 0 to 1 + uv->x = 0.5f * (uc / maxAxis + 1.0f); + uv->y = 0.5f * (vc / maxAxis + 1.0f); +} + +glm::vec4 CubeMap::fetchLod(const glm::vec3& dir, float lod) const { + lod = glm::clamp(lod, 0.0f, _mips.size() - 1); + + gpu::uint16 loLevel = (gpu::uint16)std::floor(lod); + gpu::uint16 hiLevel = (gpu::uint16)std::ceil(lod); + float lodFrac = lod - (float)loLevel; + ConstMip loMip(loLevel, this); + ConstMip hiMip(hiLevel, this); + int face; + glm::vec2 uv; + glm::vec4 loColor; + glm::vec4 hiColor; + + getFaceUV(dir, &face, &uv); + + loColor = loMip.fetch(face, uv); + hiColor = hiMip.fetch(face, uv); + + return loColor + (hiColor - loColor) * lodFrac; +} + +struct CubeMap::GGXSamples { + float invTotalWeight; + std::vector points; +}; + +// All the GGX convolution code is inspired from: +// https://placeholderart.wordpress.com/2015/07/28/implementation-notes-runtime-environment-map-filtering-for-image-based-lighting/ +// Computation is done in tangent space so normal is always (0,0,1) which simplifies a lot of things + +void CubeMap::generateGGXSamples(GGXSamples& data, float roughness, const int resolution) { + glm::vec2 xi; + glm::vec3 L; + glm::vec3 H; + const float saTexel = (float)(4.0 * M_PI / (6.0 * resolution * resolution)); + const float mipBias = 3.0f; + const auto sampleCount = data.points.size(); + const auto hammersleySequenceLength = data.points.size(); + size_t sampleIndex = 0; + size_t hammersleySampleIndex = 0; + float NdotL; + + data.invTotalWeight = 0.0f; + + // Do some computation in tangent space + while (sampleIndex < sampleCount) { + if (hammersleySampleIndex < hammersleySequenceLength) { + xi = hammersley::evaluate((int)hammersleySampleIndex, (int)hammersleySequenceLength); + H = ggx::sample(xi, roughness); + L = H * (2.0f * H.z) - glm::vec3(0.0f, 0.0f, 1.0f); + NdotL = L.z; + hammersleySampleIndex++; + } else { + NdotL = -1.0f; + } + + while (NdotL <= 0.0f) { + // Create a purely random sample + xi.x = rand() / float(RAND_MAX); + xi.y = rand() / float(RAND_MAX); + H = ggx::sample(xi, roughness); + L = H * (2.0f * H.z) - glm::vec3(0.0f, 0.0f, 1.0f); + NdotL = L.z; + } + + float NdotH = std::max(0.0f, H.z); + float HdotV = NdotH; + float D = ggx::evaluate(NdotH, roughness); + float pdf = (D * NdotH / (4.0f * HdotV)) + 0.0001f; + float saSample = 1.0f / (float(sampleCount) * pdf + 0.0001f); + float mipLevel = std::max(0.5f * std::log2(saSample / saTexel) + mipBias, 0.0f); + + auto& sample = data.points[sampleIndex]; + sample.x = L.x; + sample.y = L.y; + sample.z = L.z; + sample.w = mipLevel; + + data.invTotalWeight += NdotL; + + sampleIndex++; + } + data.invTotalWeight = 1.0f / data.invTotalWeight; +} + +void CubeMap::convolveForGGX(CubeMap& output, const std::atomic& abortProcessing) const { + // This should match the value in the getMipLevelFromRoughness function (LightAmbient.slh) + static const float ROUGHNESS_1_MIP_RESOLUTION = 1.5f; + static const size_t MAX_SAMPLE_COUNT = 4000; + + const auto mipCount = getMipCount(); + GGXSamples params; + + params.points.reserve(MAX_SAMPLE_COUNT); + + for (gpu::uint16 mipLevel = 0; mipLevel < mipCount; ++mipLevel) { + // This is the inverse code found in LightAmbient.slh in getMipLevelFromRoughness + float levelAlpha = float(mipLevel) / (mipCount - ROUGHNESS_1_MIP_RESOLUTION); + float mipRoughness = levelAlpha * (1.0f + 2.0f * levelAlpha) / 3.0f; + + mipRoughness = std::max(1e-3f, mipRoughness); + mipRoughness = std::min(1.0f, mipRoughness); + + size_t mipTotalPixelCount = getMipWidth(mipLevel) * getMipHeight(mipLevel) * 6; + size_t sampleCount = 1U + size_t(4000 * mipRoughness * mipRoughness); + + sampleCount = std::min(sampleCount, 2 * mipTotalPixelCount); + sampleCount = std::min(MAX_SAMPLE_COUNT, sampleCount); + + params.points.resize(sampleCount); + generateGGXSamples(params, mipRoughness, _width); + + for (int face = 0; face < 6; face++) { + convolveMipFaceForGGX(params, output, mipLevel, face, abortProcessing); + if (abortProcessing.load()) { + return; + } + } + } +} + +void CubeMap::convolveMipFaceForGGX(const GGXSamples& samples, CubeMap& output, gpu::uint16 mipLevel, int face, const std::atomic& abortProcessing) const { + const glm::vec3* faceNormals = FACE_NORMALS + face * 4; + const glm::vec3 deltaYNormalLo = faceNormals[2] - faceNormals[0]; + const glm::vec3 deltaYNormalHi = faceNormals[3] - faceNormals[1]; + const auto mipDimensions = output.getMipDimensions(mipLevel); + const auto outputLineStride = output.getMipLineStride(mipLevel); + auto outputFacePixels = output.editFace(mipLevel, face); + + tbb::parallel_for(tbb::blocked_range2d(0, mipDimensions.y, 32, 0, mipDimensions.x, 32), [&](const tbb::blocked_range2d& range) { + auto rowRange = range.rows(); + auto colRange = range.cols(); + + for (auto y = rowRange.begin(); y < rowRange.end(); y++) { + if (abortProcessing.load()) { + break; + } + + const float yAlpha = (y + 0.5f) / mipDimensions.y; + const glm::vec3 normalXLo = faceNormals[0] + deltaYNormalLo * yAlpha; + const glm::vec3 normalXHi = faceNormals[1] + deltaYNormalHi * yAlpha; + const glm::vec3 deltaXNormal = normalXHi - normalXLo; + + for (auto x = colRange.begin(); x < colRange.end(); x++) { + const float xAlpha = (x + 0.5f) / mipDimensions.x; + // Interpolate normal for this pixel + const glm::vec3 normal = glm::normalize(normalXLo + deltaXNormal * xAlpha); + + outputFacePixels[x + y * outputLineStride] = computeConvolution(normal, samples); + } + } + }); +} + +glm::vec4 CubeMap::computeConvolution(const glm::vec3& N, const GGXSamples& samples) const { + // from tangent-space vector to world-space + glm::vec3 bitangent = std::abs(N.z) < 0.999f ? glm::vec3(0.0f, 0.0f, 1.0f) : glm::vec3(1.0f, 0.0f, 0.0f); + glm::vec3 tangent = glm::normalize(glm::cross(bitangent, N)); + bitangent = glm::cross(N, tangent); + + const size_t sampleCount = samples.points.size(); + glm::vec4 prefilteredColor = glm::vec4(0.0f); + + for (size_t i = 0; i < sampleCount; ++i) { + const auto& sample = samples.points[i]; + glm::vec3 L(sample.x, sample.y, sample.z); + float NdotL = L.z; + float mipLevel = sample.w; + // Now back to world space + L = tangent * L.x + bitangent * L.y + N * L.z; + prefilteredColor += fetchLod(L, mipLevel) * NdotL; + } + prefilteredColor = prefilteredColor * samples.invTotalWeight; + prefilteredColor.a = 1.0f; + return prefilteredColor; +} \ No newline at end of file diff --git a/libraries/image/src/image/CubeMap.h b/libraries/image/src/image/CubeMap.h new file mode 100644 index 0000000000..0745267cb6 --- /dev/null +++ b/libraries/image/src/image/CubeMap.h @@ -0,0 +1,92 @@ +// +// CubeMap.h +// image/src/image +// +// Created by Olivier Prat on 03/27/2019. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +#ifndef hifi_image_CubeMap_h +#define hifi_image_CubeMap_h + +#include +#include +#include +#include +#include + +#include "Image.h" + +namespace image { + + class CubeMap { + + enum { + EDGE_WIDTH = 1 + }; + + public: + + CubeMap(int width, int height, int mipCount); + CubeMap(const std::vector& faces, int mipCount, const std::atomic& abortProcessing = false); + + void reset(int width, int height, int mipCount); + void copyTo(CubeMap& other) const; + + void applyGamma(float value); + + gpu::uint16 getMipCount() const { return (gpu::uint16)_mips.size(); } + int getMipWidth(gpu::uint16 mipLevel) const { + return std::max(1, _width >> mipLevel); + } + int getMipHeight(gpu::uint16 mipLevel) const { + return std::max(1, _height >> mipLevel); + } + gpu::Vec2i getMipDimensions(gpu::uint16 mipLevel) const { + return gpu::Vec2i(getMipWidth(mipLevel), getMipHeight(mipLevel)); + } + + size_t getMipLineStride(gpu::uint16 mipLevel) const { + return getMipWidth(mipLevel) + 2 * EDGE_WIDTH; + } + + glm::vec4* editFace(gpu::uint16 mipLevel, int face) { + return _mips[mipLevel][face].data() + (getMipLineStride(mipLevel) + 1)*EDGE_WIDTH; + } + + const glm::vec4* getFace(gpu::uint16 mipLevel, int face) const { + return _mips[mipLevel][face].data() + (getMipLineStride(mipLevel) + 1)*EDGE_WIDTH; + } + + Image getFaceImage(gpu::uint16 mipLevel, int face) const; + + void convolveForGGX(CubeMap& output, const std::atomic& abortProcessing) const; + glm::vec4 fetchLod(const glm::vec3& dir, float lod) const; + + private: + + struct GGXSamples; + class Mip; + class ConstMip; + + using Face = std::vector; + using Faces = std::array; + + int _width; + int _height; + std::vector _mips; + + static void getFaceUV(const glm::vec3& dir, int* index, glm::vec2* uv); + static void generateGGXSamples(GGXSamples& data, float roughness, const int resolution); + static void copyFace(int width, int height, const glm::vec4* source, size_t srcLineStride, glm::vec4* dest, size_t dstLineStride); + void convolveMipFaceForGGX(const GGXSamples& samples, CubeMap& output, gpu::uint16 mipLevel, int face, const std::atomic& abortProcessing) const; + glm::vec4 computeConvolution(const glm::vec3& normal, const GGXSamples& samples) const; + + }; + +} + +#endif // hifi_image_CubeMap_h diff --git a/libraries/image/src/image/Image.cpp b/libraries/image/src/image/Image.cpp index df5ed15867..2ef83e42d8 100644 --- a/libraries/image/src/image/Image.cpp +++ b/libraries/image/src/image/Image.cpp @@ -6,28 +6,91 @@ using namespace image; +Image::Image(int width, int height, Format format) : + _dims(width, height), + _format(format) { + if (_format == Format_RGBAF) { + _floatData.resize(width*height); + } else { + _packedData = QImage(width, height, (QImage::Format)format); + } +} + +size_t Image::getByteCount() const { + if (_format == Format_RGBAF) { + return sizeof(FloatPixels::value_type) * _floatData.size(); + } else { + return _packedData.byteCount(); + } +} + +size_t Image::getBytesPerLineCount() const { + if (_format == Format_RGBAF) { + return sizeof(FloatPixels::value_type) * _dims.x; + } else { + return _packedData.bytesPerLine(); + } +} + +glm::uint8* Image::editScanLine(int y) { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data() + y * _dims.x); + } else { + return _packedData.scanLine(y); + } +} + +const glm::uint8* Image::getScanLine(int y) const { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data() + y * _dims.x); + } else { + return _packedData.scanLine(y); + } +} + +glm::uint8* Image::editBits() { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data()); + } else { + return _packedData.bits(); + } +} + +const glm::uint8* Image::getBits() const { + if (_format == Format_RGBAF) { + return reinterpret_cast(_floatData.data()); + } else { + return _packedData.bits(); + } +} + Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, TransformationMode transformMode) const { - if ((Image::Format)_data.format() == Image::Format_PACKED_FLOAT) { - // Start by converting to full float - glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()]; - auto unpackFunc = getHDRUnpackingFunction(); - auto floatDataIt = floatPixels; - for (glm::uint32 lineNb = 0; lineNb < getHeight(); lineNb++) { - const glm::uint32* srcPixelIt = reinterpret_cast(getScanLine((int)lineNb)); - const glm::uint32* srcPixelEnd = srcPixelIt + getWidth(); - - while (srcPixelIt < srcPixelEnd) { - *floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); - ++srcPixelIt; - ++floatDataIt; - } - } - - // Perform filtered resize with NVTT - static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats"); + if (_format == Format_PACKED_FLOAT || _format == Format_RGBAF) { nvtt::Surface surface; - surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels); - delete[] floatPixels; + + if (_format == Format_RGBAF) { + surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, _floatData.data()); + } else { + // Start by converting to full float + glm::vec4* floatPixels = new glm::vec4[getWidth()*getHeight()]; + auto unpackFunc = getHDRUnpackingFunction(); + auto floatDataIt = floatPixels; + for (glm::uint32 lineNb = 0; lineNb < getHeight(); lineNb++) { + const glm::uint32* srcPixelIt = reinterpret_cast(getScanLine((int)lineNb)); + const glm::uint32* srcPixelEnd = srcPixelIt + getWidth(); + + while (srcPixelIt < srcPixelEnd) { + *floatDataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); + ++srcPixelIt; + ++floatDataIt; + } + } + + // Perform filtered resize with NVTT + static_assert(sizeof(glm::vec4) == 4 * sizeof(float), "Assuming glm::vec4 holds 4 floats"); + surface.setImage(nvtt::InputFormat_RGBA_32F, getWidth(), getHeight(), 1, floatPixels); + delete[] floatPixels; + } nvtt::ResizeFilter filter = nvtt::ResizeFilter_Kaiser; if (transformMode == Qt::TransformationMode::FastTransformation) { @@ -35,44 +98,148 @@ Image Image::getScaled(glm::uvec2 dstSize, AspectRatioMode ratioMode, Transforma } surface.resize(dstSize.x, dstSize.y, 1, filter); - // And convert back to original format - QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT); - - auto packFunc = getHDRPackingFunction(); auto srcRedIt = reinterpret_cast(surface.channel(0)); auto srcGreenIt = reinterpret_cast(surface.channel(1)); auto srcBlueIt = reinterpret_cast(surface.channel(2)); - for (glm::uint32 lineNb = 0; lineNb < dstSize.y; lineNb++) { - glm::uint32* dstPixelIt = reinterpret_cast(resizedImage.scanLine((int)lineNb)); - glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x; + auto srcAlphaIt = reinterpret_cast(surface.channel(3)); + + if (_format == Format_RGBAF) { + Image output(_dims.x, _dims.y, _format); + auto dstPixelIt = output._floatData.begin(); + auto dstPixelEnd = output._floatData.end(); while (dstPixelIt < dstPixelEnd) { - *dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt)); + *dstPixelIt = glm::vec4(*srcRedIt, *srcGreenIt, *srcBlueIt, *srcAlphaIt); ++srcRedIt; ++srcGreenIt; ++srcBlueIt; + ++srcAlphaIt; + ++dstPixelIt; } + + return output; + } else { + // And convert back to original format + QImage resizedImage((int)dstSize.x, (int)dstSize.y, (QImage::Format)Image::Format_PACKED_FLOAT); + + auto packFunc = getHDRPackingFunction(); + for (glm::uint32 lineNb = 0; lineNb < dstSize.y; lineNb++) { + glm::uint32* dstPixelIt = reinterpret_cast(resizedImage.scanLine((int)lineNb)); + glm::uint32* dstPixelEnd = dstPixelIt + dstSize.x; + + while (dstPixelIt < dstPixelEnd) { + *dstPixelIt = packFunc(glm::vec3(*srcRedIt, *srcGreenIt, *srcBlueIt)); + ++srcRedIt; + ++srcGreenIt; + ++srcBlueIt; + ++dstPixelIt; + } + } + return resizedImage; } - return resizedImage; } else { - return _data.scaled(fromGlm(dstSize), ratioMode, transformMode); + return _packedData.scaled(fromGlm(dstSize), ratioMode, transformMode); } } Image Image::getConvertedToFormat(Format newFormat) const { - assert(getFormat() != Format_PACKED_FLOAT); - return _data.convertToFormat((QImage::Format)newFormat); + const float MAX_COLOR_VALUE = 255.0f; + + if (newFormat == _format) { + return *this; + } else if ((_format != Format_R11G11B10F && _format != Format_RGBAF) && (newFormat != Format_R11G11B10F && newFormat != Format_RGBAF)) { + return _packedData.convertToFormat((QImage::Format)newFormat); + } else if (_format == Format_PACKED_FLOAT) { + Image newImage(_dims.x, _dims.y, newFormat); + + switch (newFormat) { + case Format_RGBAF: + convertToFloatFromPacked(getBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, newImage._floatData.data(), _dims.x); + break; + + default: + { + auto unpackFunc = getHDRUnpackingFunction(); + const glm::uint32* srcIt = reinterpret_cast(getBits()); + + for (int y = 0; y < _dims.y; y++) { + for (int x = 0; x < _dims.x; x++) { + auto color = glm::clamp(unpackFunc(*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f); + newImage.setPackedPixel(x, y, qRgb(color.r, color.g, color.b)); + srcIt++; + } + } + break; + } + } + return newImage; + } else if (_format == Format_RGBAF) { + Image newImage(_dims.x, _dims.y, newFormat); + + switch (newFormat) { + case Format_R11G11B10F: + convertToPackedFromFloat(newImage.editBits(), _dims.x, _dims.y, getBytesPerLineCount(), gpu::Element::COLOR_R11G11B10, _floatData.data(), _dims.x); + break; + + default: + { + FloatPixels::const_iterator srcIt = _floatData.begin(); + + for (int y = 0; y < _dims.y; y++) { + for (int x = 0; x < _dims.x; x++) { + auto color = glm::clamp((*srcIt) * MAX_COLOR_VALUE, 0.0f, 255.0f); + newImage.setPackedPixel(x, y, qRgba(color.r, color.g, color.b, color.a)); + srcIt++; + } + } + break; + } + } + return newImage; + } else { + Image newImage(_dims.x, _dims.y, newFormat); + assert(newImage.hasFloatFormat()); + + if (newFormat == Format_RGBAF) { + FloatPixels::iterator dstIt = newImage._floatData.begin(); + + for (int y = 0; y < _dims.y; y++) { + auto line = (const QRgb*)getScanLine(y); + for (int x = 0; x < _dims.x; x++) { + QRgb pixel = line[x]; + *dstIt = glm::vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR_VALUE; + dstIt++; + } + } + } else { + auto packFunc = getHDRPackingFunction(); + glm::uint32* dstIt = reinterpret_cast( newImage.editBits() ); + + for (int y = 0; y < _dims.y; y++) { + auto line = (const QRgb*)getScanLine(y); + for (int x = 0; x < _dims.x; x++) { + QRgb pixel = line[x]; + *dstIt = packFunc(glm::vec3(qRed(pixel), qGreen(pixel), qBlue(pixel)) / MAX_COLOR_VALUE); + dstIt++; + } + } + } + return newImage; + } } void Image::invertPixels() { - _data.invertPixels(QImage::InvertRgba); + assert(_format != Format_PACKED_FLOAT && _format != Format_RGBAF); + _packedData.invertPixels(QImage::InvertRgba); } Image Image::getSubImage(QRect rect) const { - return _data.copy(rect); + assert(_format != Format_RGBAF); + return _packedData.copy(rect); } Image Image::getMirrored(bool horizontal, bool vertical) const { - return _data.mirrored(horizontal, vertical); + assert(_format != Format_RGBAF); + return _packedData.mirrored(horizontal, vertical); } diff --git a/libraries/image/src/image/Image.h b/libraries/image/src/image/Image.h index bfecf4f2a1..129061900f 100644 --- a/libraries/image/src/image/Image.h +++ b/libraries/image/src/image/Image.h @@ -48,37 +48,69 @@ namespace image { Format_RGBA8888_Premultiplied = QImage::Format_RGBA8888_Premultiplied, Format_Grayscale8 = QImage::Format_Grayscale8, Format_R11G11B10F = QImage::Format_RGB30, - Format_PACKED_FLOAT = Format_R11G11B10F + Format_PACKED_FLOAT = Format_R11G11B10F, + // RGBA 32 bit single precision float per component + Format_RGBAF = 100 }; using AspectRatioMode = Qt::AspectRatioMode; using TransformationMode = Qt::TransformationMode; - Image() {} - Image(int width, int height, Format format) : _data(width, height, (QImage::Format)format) {} - Image(const QImage& data) : _data(data) {} - void operator=(const QImage& image) { - _data = image; + Image() : _dims(0,0) {} + Image(int width, int height, Format format); + Image(const QImage& data) : _packedData(data), _dims(data.width(), data.height()), _format((Format)data.format()) {} + + void operator=(const QImage& other) { + _packedData = other; + _floatData.clear(); + _dims.x = other.width(); + _dims.y = other.height(); + _format = (Format)other.format(); } - bool isNull() const { return _data.isNull(); } - - Format getFormat() const { return (Format)_data.format(); } - bool hasAlphaChannel() const { return _data.hasAlphaChannel(); } - - glm::uint32 getWidth() const { return (glm::uint32)_data.width(); } - glm::uint32 getHeight() const { return (glm::uint32)_data.height(); } - glm::uvec2 getSize() const { return toGlm(_data.size()); } - size_t getByteCount() const { return _data.byteCount(); } - - QRgb getPixel(int x, int y) const { return _data.pixel(x, y); } - void setPixel(int x, int y, QRgb value) { - _data.setPixel(x, y, value); + void operator=(const Image& other) { + if (&other != this) { + _packedData = other._packedData; + _floatData = other._floatData; + _dims = other._dims; + _format = other._format; + } } - glm::uint8* editScanLine(int y) { return _data.scanLine(y); } - const glm::uint8* getScanLine(int y) const { return _data.scanLine(y); } - const glm::uint8* getBits() const { return _data.constBits(); } + bool isNull() const { return _packedData.isNull() && _floatData.empty(); } + + Format getFormat() const { return _format; } + bool hasAlphaChannel() const { return _packedData.hasAlphaChannel() || _format == Format_RGBAF; } + bool hasFloatFormat() const { return _format == Format_R11G11B10F || _format == Format_RGBAF; } + + glm::uint32 getWidth() const { return (glm::uint32)_dims.x; } + glm::uint32 getHeight() const { return (glm::uint32)_dims.y; } + glm::uvec2 getSize() const { return glm::uvec2(_dims); } + size_t getByteCount() const; + size_t getBytesPerLineCount() const; + + QRgb getPackedPixel(int x, int y) const { + assert(_format != Format_RGBAF); + return _packedData.pixel(x, y); + } + void setPackedPixel(int x, int y, QRgb value) { + assert(_format != Format_RGBAF); + _packedData.setPixel(x, y, value); + } + + glm::vec4 getFloatPixel(int x, int y) const { + assert(_format == Format_RGBAF); + return _floatData[x + y*_dims.x]; + } + void setFloatPixel(int x, int y, const glm::vec4& value) { + assert(_format == Format_RGBAF); + _floatData[x + y * _dims.x] = value; + } + + glm::uint8* editScanLine(int y); + const glm::uint8* getScanLine(int y) const; + glm::uint8* editBits(); + const glm::uint8* getBits() const; Image getScaled(glm::uvec2 newSize, AspectRatioMode ratioMode, TransformationMode transformationMode = Qt::SmoothTransformation) const; Image getConvertedToFormat(Format newFormat) const; @@ -90,7 +122,13 @@ namespace image { private: - QImage _data; + using FloatPixels = std::vector; + + // For QImage supported formats + QImage _packedData; + FloatPixels _floatData; + glm::ivec2 _dims; + Format _format; }; } // namespace image diff --git a/libraries/image/src/image/TextureProcessing.cpp b/libraries/image/src/image/TextureProcessing.cpp index 037229ace5..5b3d546f8e 100644 --- a/libraries/image/src/image/TextureProcessing.cpp +++ b/libraries/image/src/image/TextureProcessing.cpp @@ -29,10 +29,10 @@ #include "OpenEXRReader.h" #endif #include "ImageLogging.h" +#include "CubeMap.h" using namespace gpu; -#define CPU_MIPMAPS 1 #include #undef _CRT_SECURE_NO_WARNINGS @@ -111,11 +111,13 @@ TextureUsage::TextureLoader TextureUsage::getTextureLoaderForType(Type type, con return image::TextureUsage::createEmissiveTextureFromImage; case LIGHTMAP_TEXTURE: return image::TextureUsage::createLightmapTextureFromImage; - case CUBE_TEXTURE: + case SKY_TEXTURE: + return image::TextureUsage::createCubeTextureFromImage; + case AMBIENT_TEXTURE: if (options.value("generateIrradiance", true).toBool()) { - return image::TextureUsage::createCubeTextureFromImage; + return image::TextureUsage::createAmbientCubeTextureAndIrradianceFromImage; } else { - return image::TextureUsage::createCubeTextureFromImageWithoutIrradiance; + return image::TextureUsage::createAmbientCubeTextureFromImage; } case BUMP_TEXTURE: return image::TextureUsage::createNormalTextureFromBumpImage; @@ -186,14 +188,24 @@ gpu::TexturePointer TextureUsage::createMetallicTextureFromImage(Image&& srcImag return process2DTextureGrayscaleFromImage(std::move(srcImage), srcImageName, compress, target, false, abortProcessing); } -gpu::TexturePointer TextureUsage::createCubeTextureFromImage(Image&& srcImage, const std::string& srcImageName, +gpu::TexturePointer TextureUsage::createCubeTextureAndIrradianceFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, BackendTarget target, const std::atomic& abortProcessing) { - return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, true, abortProcessing); + return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, CUBE_GENERATE_IRRADIANCE, abortProcessing); } -gpu::TexturePointer TextureUsage::createCubeTextureFromImageWithoutIrradiance(Image&& srcImage, const std::string& srcImageName, - bool compress, BackendTarget target, const std::atomic& abortProcessing) { - return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, false, abortProcessing); +gpu::TexturePointer TextureUsage::createCubeTextureFromImage(Image&& srcImage, const std::string& srcImageName, + bool compress, BackendTarget target, const std::atomic& abortProcessing) { + return processCubeTextureColorFromImage(std::move(srcImage), srcImageName, compress, target, CUBE_DEFAULT, abortProcessing); +} + +gpu::TexturePointer TextureUsage::createAmbientCubeTextureFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing) { + return processCubeTextureColorFromImage(std::move(image), srcImageName, compress, target, CUBE_GGX_CONVOLVE, abortProcessing); +} + +gpu::TexturePointer TextureUsage::createAmbientCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing) { + return processCubeTextureColorFromImage(std::move(image), srcImageName, compress, target, CUBE_GENERATE_IRRADIANCE | CUBE_GGX_CONVOLVE, abortProcessing); } static float denormalize(float value, const float minValue) { @@ -215,11 +227,17 @@ static uint32 packR11G11B10F(const glm::vec3& color) { return glm::packF2x11_1x10(ucolor); } +static uint32 packUnorm4x8(const glm::vec3& color) { + return glm::packUnorm4x8(glm::vec4(color, 1.0f)); +} + static std::function getHDRPackingFunction(const gpu::Element& format) { if (format == gpu::Element::COLOR_RGB9E5) { return glm::packF3x9_E1x5; } else if (format == gpu::Element::COLOR_R11G11B10) { return packR11G11B10F; + } else if (format == gpu::Element::COLOR_RGBA_32 || format == gpu::Element::COLOR_SRGBA_32 || format == gpu::Element::COLOR_BGRA_32 || format == gpu::Element::COLOR_SBGRA_32) { + return packUnorm4x8; } else { qCWarning(imagelogging) << "Unknown handler format"; Q_UNREACHABLE(); @@ -231,18 +249,24 @@ std::function getHDRPackingFunction() { return getHDRPackingFunction(GPU_CUBEMAP_HDR_FORMAT); } -std::function getHDRUnpackingFunction() { - if (GPU_CUBEMAP_HDR_FORMAT == gpu::Element::COLOR_RGB9E5) { +std::function getHDRUnpackingFunction(const gpu::Element& format) { + if (format == gpu::Element::COLOR_RGB9E5) { return glm::unpackF3x9_E1x5; - } else if (GPU_CUBEMAP_HDR_FORMAT == gpu::Element::COLOR_R11G11B10) { + } else if (format == gpu::Element::COLOR_R11G11B10) { return glm::unpackF2x11_1x10; + } else if (format == gpu::Element::COLOR_RGBA_32 || format == gpu::Element::COLOR_SRGBA_32 || format == gpu::Element::COLOR_BGRA_32 || format == gpu::Element::COLOR_SBGRA_32) { + return glm::unpackUnorm4x8; } else { - qCWarning(imagelogging) << "Unknown HDR encoding format in Image"; + qCWarning(imagelogging) << "Unknown handler format"; Q_UNREACHABLE(); return nullptr; } } +std::function getHDRUnpackingFunction() { + return getHDRUnpackingFunction(GPU_CUBEMAP_HDR_FORMAT); +} + Image processRawImageData(QIODevice& content, const std::string& filename) { // Help the Image loader by extracting the image file format from the url filename ext. // Some tga are not created properly without it. @@ -490,13 +514,15 @@ struct MyErrorHandler : public nvtt::ErrorHandler { } }; +#if defined(NVTT_API) class SequentialTaskDispatcher : public nvtt::TaskDispatcher { public: - SequentialTaskDispatcher(const std::atomic& abortProcessing) : _abortProcessing(abortProcessing) {}; + SequentialTaskDispatcher(const std::atomic& abortProcessing = false) : _abortProcessing(abortProcessing) { + } const std::atomic& _abortProcessing; - virtual void dispatch(nvtt::Task* task, void* context, int count) override { + void dispatch(nvtt::Task* task, void* context, int count) override { for (int i = 0; i < count; i++) { if (!_abortProcessing.load()) { task(context, i); @@ -506,108 +532,137 @@ public: } } }; +#endif -void generateHDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face) { - // Take a local copy to force move construction - // https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter - Image localCopy = std::move(image); +void convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat, + glm::vec4* output, size_t outputLinePixelStride) { + glm::vec4* outputIt; + auto unpackFunc = getHDRUnpackingFunction(sourceFormat); - assert(localCopy.getFormat() == Image::Format_PACKED_FLOAT); - - const int width = localCopy.getWidth(), height = localCopy.getHeight(); - std::vector data; - std::vector::iterator dataIt; - auto mipFormat = texture->getStoredMipFormat(); - std::function unpackFunc = getHDRUnpackingFunction(); - - nvtt::InputFormat inputFormat = nvtt::InputFormat_RGBA_32F; - nvtt::WrapMode wrapMode = nvtt::WrapMode_Mirror; - nvtt::AlphaMode alphaMode = nvtt::AlphaMode_None; - - nvtt::CompressionOptions compressionOptions; - compressionOptions.setQuality(nvtt::Quality_Production); - - // TODO: gles: generate ETC mips instead? - if (mipFormat == gpu::Element::COLOR_COMPRESSED_BCX_HDR_RGB) { - compressionOptions.setFormat(nvtt::Format_BC6); - } else if (mipFormat == gpu::Element::COLOR_RGB9E5) { - compressionOptions.setFormat(nvtt::Format_RGB); - compressionOptions.setPixelType(nvtt::PixelType_Float); - compressionOptions.setPixelFormat(32, 32, 32, 0); - } else if (mipFormat == gpu::Element::COLOR_R11G11B10) { - compressionOptions.setFormat(nvtt::Format_RGB); - compressionOptions.setPixelType(nvtt::PixelType_Float); - compressionOptions.setPixelFormat(32, 32, 32, 0); - } else { - qCWarning(imagelogging) << "Unknown mip format"; - Q_UNREACHABLE(); - return; - } - - data.resize(width * height); - dataIt = data.begin(); + outputLinePixelStride -= width; + outputIt = output; for (auto lineNb = 0; lineNb < height; lineNb++) { - const uint32* srcPixelIt = reinterpret_cast(localCopy.getScanLine(lineNb)); + const uint32* srcPixelIt = reinterpret_cast(source + lineNb * srcLineByteStride); const uint32* srcPixelEnd = srcPixelIt + width; while (srcPixelIt < srcPixelEnd) { - *dataIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); + *outputIt = glm::vec4(unpackFunc(*srcPixelIt), 1.0f); ++srcPixelIt; - ++dataIt; + ++outputIt; } + outputIt += outputLinePixelStride; } - assert(dataIt == data.end()); +} - // We're done with the localCopy, free up the memory to avoid bloating the heap - localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one. +void convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat, + const glm::vec4* source, size_t srcLinePixelStride) { + const glm::vec4* sourceIt; + auto packFunc = getHDRPackingFunction(outputFormat); + + srcLinePixelStride -= width; + sourceIt = source; + for (auto lineNb = 0; lineNb < height; lineNb++) { + uint32* outPixelIt = reinterpret_cast(output + lineNb * outputLineByteStride); + uint32* outPixelEnd = outPixelIt + width; + + while (outPixelIt < outPixelEnd) { + *outPixelIt = packFunc(*sourceIt); + ++outPixelIt; + ++sourceIt; + } + sourceIt += srcLinePixelStride; + } +} + +nvtt::OutputHandler* getNVTTCompressionOutputHandler(gpu::Texture* outputTexture, int face, nvtt::CompressionOptions& compressionOptions) { + auto outputFormat = outputTexture->getStoredMipFormat(); + bool useNVTT = false; + + compressionOptions.setQuality(nvtt::Quality_Production); + + if (outputFormat == gpu::Element::COLOR_COMPRESSED_BCX_HDR_RGB) { + useNVTT = true; + compressionOptions.setFormat(nvtt::Format_BC6); + } else if (outputFormat == gpu::Element::COLOR_RGB9E5) { + compressionOptions.setFormat(nvtt::Format_RGB); + compressionOptions.setPixelType(nvtt::PixelType_Float); + compressionOptions.setPixelFormat(32, 32, 32, 0); + } else if (outputFormat == gpu::Element::COLOR_R11G11B10) { + compressionOptions.setFormat(nvtt::Format_RGB); + compressionOptions.setPixelType(nvtt::PixelType_Float); + compressionOptions.setPixelFormat(32, 32, 32, 0); + } else if (outputFormat == gpu::Element::COLOR_SRGBA_32) { + useNVTT = true; + compressionOptions.setFormat(nvtt::Format_RGB); + compressionOptions.setPixelType(nvtt::PixelType_UnsignedNorm); + compressionOptions.setPixelFormat(8, 8, 8, 0); + } else { + qCWarning(imagelogging) << "Unknown mip format"; + Q_UNREACHABLE(); + return nullptr; + } + + if (!useNVTT) { + // Don't use NVTT (at least version 2.1) as it outputs wrong RGB9E5 and R11G11B10F values from floats + return new PackedFloatOutputHandler(outputTexture, face, outputFormat); + } else { + return new OutputHandler(outputTexture, face); + } +} + +void convertImageToHDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic& abortProcessing, int face) { + assert(image.hasFloatFormat()); + + Image localCopy = image.getConvertedToFormat(Image::Format_RGBAF); + + const int width = localCopy.getWidth(); + const int height = localCopy.getHeight(); nvtt::OutputOptions outputOptions; outputOptions.setOutputHeader(false); - std::unique_ptr outputHandler; + + nvtt::CompressionOptions compressionOptions; + std::unique_ptr outputHandler{ getNVTTCompressionOutputHandler(texture, face, compressionOptions) }; + MyErrorHandler errorHandler; outputOptions.setErrorHandler(&errorHandler); nvtt::Context context; - int mipLevel = 0; - - if (mipFormat == gpu::Element::COLOR_RGB9E5 || mipFormat == gpu::Element::COLOR_R11G11B10) { - // Don't use NVTT (at least version 2.1) as it outputs wrong RGB9E5 and R11G11B10F values from floats - outputHandler.reset(new PackedFloatOutputHandler(texture, face, mipFormat)); - } else { - outputHandler.reset(new OutputHandler(texture, face)); - } + int mipLevel = baseMipLevel; outputOptions.setOutputHandler(outputHandler.get()); nvtt::Surface surface; - surface.setImage(inputFormat, width, height, 1, &(*data.begin())); - surface.setAlphaMode(alphaMode); - surface.setWrapMode(wrapMode); + surface.setImage(nvtt::InputFormat_RGBA_32F, width, height, 1, localCopy.getBits()); + surface.setAlphaMode(nvtt::AlphaMode_None); + surface.setWrapMode(nvtt::WrapMode_Mirror); SequentialTaskDispatcher dispatcher(abortProcessing); nvtt::Compressor compressor; context.setTaskDispatcher(&dispatcher); context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); - while (surface.canMakeNextMipmap() && !abortProcessing.load()) { - surface.buildNextMipmap(nvtt::MipmapFilter_Box); - context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + if (buildMips) { + while (surface.canMakeNextMipmap() && !abortProcessing.load()) { + surface.buildNextMipmap(nvtt::MipmapFilter_Box); + context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + } } } -void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face) { +void convertImageToLDRTexture(gpu::Texture* texture, Image&& image, BackendTarget target, int baseMipLevel, bool buildMips, const std::atomic& abortProcessing, int face) { // Take a local copy to force move construction // https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#f18-for-consume-parameters-pass-by-x-and-stdmove-the-parameter Image localCopy = std::move(image); - assert(localCopy.getFormat() != Image::Format_PACKED_FLOAT); - if (localCopy.getFormat() != Image::Format_ARGB32) { - localCopy = localCopy.getConvertedToFormat(Image::Format_ARGB32); - } - const int width = localCopy.getWidth(), height = localCopy.getHeight(); auto mipFormat = texture->getStoredMipFormat(); + int mipLevel = baseMipLevel; if (target != BackendTarget::GLES32) { + if (localCopy.getFormat() != Image::Format_ARGB32) { + localCopy = localCopy.getConvertedToFormat(Image::Format_ARGB32); + } + const void* data = static_cast(localCopy.getBits()); nvtt::TextureType textureType = nvtt::TextureType_2D; nvtt::InputFormat inputFormat = nvtt::InputFormat_BGRA_8UB; @@ -618,23 +673,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, float inputGamma = 2.2f; float outputGamma = 2.2f; - nvtt::InputOptions inputOptions; - inputOptions.setTextureLayout(textureType, width, height); + nvtt::Surface surface; + surface.setImage(inputFormat, width, height, 1, data); + surface.setAlphaMode(alphaMode); + surface.setWrapMode(wrapMode); - inputOptions.setMipmapData(data, width, height); - // setMipmapData copies the memory, so free up the memory afterward to avoid bloating the heap + // Surface copies the memory, so free up the memory afterward to avoid bloating the heap data = nullptr; localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one. + nvtt::InputOptions inputOptions; + inputOptions.setTextureLayout(textureType, width, height); + inputOptions.setFormat(inputFormat); inputOptions.setGamma(inputGamma, outputGamma); - inputOptions.setAlphaMode(alphaMode); - inputOptions.setWrapMode(wrapMode); inputOptions.setRoundMode(roundMode); - inputOptions.setMipmapGeneration(true); - inputOptions.setMipmapFilter(nvtt::MipmapFilter_Box); - nvtt::CompressionOptions compressionOptions; compressionOptions.setQuality(nvtt::Quality_Production); @@ -718,11 +772,22 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, outputOptions.setErrorHandler(&errorHandler); SequentialTaskDispatcher dispatcher(abortProcessing); - nvtt::Compressor compressor; - compressor.setTaskDispatcher(&dispatcher); - compressor.process(inputOptions, compressionOptions, outputOptions); + nvtt::Compressor context; + + context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + if (buildMips) { + while (surface.canMakeNextMipmap() && !abortProcessing.load()) { + surface.buildNextMipmap(nvtt::MipmapFilter_Box); + context.compress(surface, face, mipLevel++, compressionOptions, outputOptions); + } + } } else { - int numMips = 1 + (int)log2(std::max(width, height)); + int numMips = 1; + + if (buildMips) { + numMips += (int)log2(std::max(width, height)) - baseMipLevel; + } + assert(numMips > 0); Etc::RawImage *mipMaps = new Etc::RawImage[numMips]; Etc::Image::Format etcFormat = Etc::Image::Format::DEFAULT; @@ -756,23 +821,13 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, const float effort = 1.0f; const int numEncodeThreads = 4; int encodingTime; - const float MAX_COLOR = 255.0f; - std::vector floatData; - floatData.resize(width * height); - for (int y = 0; y < height; y++) { - QRgb *line = (QRgb *)localCopy.editScanLine(y); - for (int x = 0; x < width; x++) { - QRgb &pixel = line[x]; - floatData[x + y * width] = vec4(qRed(pixel), qGreen(pixel), qBlue(pixel), qAlpha(pixel)) / MAX_COLOR; - } + if (localCopy.getFormat() != Image::Format_RGBAF) { + localCopy = localCopy.getConvertedToFormat(Image::Format_RGBAF); } - // free up the memory afterward to avoid bloating the heap - localCopy = Image(); // Image doesn't have a clear function, so override it with an empty one. - Etc::EncodeMipmaps( - (float *)floatData.data(), width, height, + (float *)localCopy.editBits(), width, height, etcFormat, errorMetric, effort, numEncodeThreads, numEncodeThreads, numMips, Etc::FILTER_WRAP_NONE, @@ -782,9 +837,9 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, for (int i = 0; i < numMips; i++) { if (mipMaps[i].paucEncodingBits.get()) { if (face >= 0) { - texture->assignStoredMipFace(i, face, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); + texture->assignStoredMipFace(i+baseMipLevel, face, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); } else { - texture->assignStoredMip(i, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); + texture->assignStoredMip(i + baseMipLevel, mipMaps[i].uiEncodingBitsBytes, static_cast(mipMaps[i].paucEncodingBits.get())); } } } @@ -795,22 +850,27 @@ void generateLDRMips(gpu::Texture* texture, Image&& image, BackendTarget target, #endif -void generateMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing = false, int face = -1) { -#if CPU_MIPMAPS - PROFILE_RANGE(resource_parse, "generateMips"); +void convertImageToTexture(gpu::Texture* texture, Image& image, BackendTarget target, int face, int baseMipLevel, bool buildMips, const std::atomic& abortProcessing) { + PROFILE_RANGE(resource_parse, "convertToTextureWithMips"); if (target == BackendTarget::GLES32) { - generateLDRMips(texture, std::move(image), target, abortProcessing, face); + convertImageToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face); } else { - if (image.getFormat() == Image::Format_PACKED_FLOAT) { - generateHDRMips(texture, std::move(image), target, abortProcessing, face); + if (image.hasFloatFormat()) { + convertImageToHDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face); } else { - generateLDRMips(texture, std::move(image), target, abortProcessing, face); + convertImageToLDRTexture(texture, std::move(image), target, baseMipLevel, buildMips, abortProcessing, face); } } -#else - texture->setAutoGenerateMips(true); -#endif +} + +void convertToTextureWithMips(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face) { + convertImageToTexture(texture, image, target, face, 0, true, abortProcessing); +} + +void convertToTexture(gpu::Texture* texture, Image&& image, BackendTarget target, const std::atomic& abortProcessing, int face, int mipLevel) { + PROFILE_RANGE(resource_parse, "convertToTexture"); + convertImageToTexture(texture, image, target, face, mipLevel, false, abortProcessing); } void processTextureAlpha(const Image& srcImage, bool& validAlpha, bool& alphaAsMask) { @@ -900,7 +960,7 @@ gpu::TexturePointer TextureUsage::process2DTextureColorFromImage(Image&& srcImag theTexture->setUsage(usage.build()); theTexture->setStoredMipFormat(formatMip); theTexture->assignStoredMip(0, image.getByteCount(), image.getBits()); - generateMips(theTexture.get(), std::move(image), target, abortProcessing); + convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing); } return theTexture; @@ -944,14 +1004,14 @@ Image processBumpMap(Image&& image) { const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1); // surrounding pixels - const QRgb topLeft = localCopy.getPixel(iPrevClamped, jPrevClamped); - const QRgb top = localCopy.getPixel(iPrevClamped, j); - const QRgb topRight = localCopy.getPixel(iPrevClamped, jNextClamped); - const QRgb right = localCopy.getPixel(i, jNextClamped); - const QRgb bottomRight = localCopy.getPixel(iNextClamped, jNextClamped); - const QRgb bottom = localCopy.getPixel(iNextClamped, j); - const QRgb bottomLeft = localCopy.getPixel(iNextClamped, jPrevClamped); - const QRgb left = localCopy.getPixel(i, jPrevClamped); + const QRgb topLeft = localCopy.getPackedPixel(iPrevClamped, jPrevClamped); + const QRgb top = localCopy.getPackedPixel(iPrevClamped, j); + const QRgb topRight = localCopy.getPackedPixel(iPrevClamped, jNextClamped); + const QRgb right = localCopy.getPackedPixel(i, jNextClamped); + const QRgb bottomRight = localCopy.getPackedPixel(iNextClamped, jNextClamped); + const QRgb bottom = localCopy.getPackedPixel(iNextClamped, j); + const QRgb bottomLeft = localCopy.getPackedPixel(iNextClamped, jPrevClamped); + const QRgb left = localCopy.getPackedPixel(i, jPrevClamped); // take their gray intensities // since it's a grayscale image, the value of each component RGB is the same @@ -974,12 +1034,13 @@ Image processBumpMap(Image&& image) { // convert to rgb from the value obtained computing the filter QRgb qRgbValue = qRgba(mapComponent(v.z), mapComponent(v.y), mapComponent(v.x), 1.0); - result.setPixel(i, j, qRgbValue); + result.setPackedPixel(i, j, qRgbValue); } } return result; } + gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, BackendTarget target, bool isBumpMap, const std::atomic& abortProcessing) { @@ -1014,7 +1075,7 @@ gpu::TexturePointer TextureUsage::process2DTextureNormalMapFromImage(Image&& src theTexture->setSource(srcImageName); theTexture->setStoredMipFormat(formatMip); theTexture->assignStoredMip(0, image.getByteCount(), image.getBits()); - generateMips(theTexture.get(), std::move(image), target, abortProcessing); + convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing); } return theTexture; @@ -1054,7 +1115,7 @@ gpu::TexturePointer TextureUsage::process2DTextureGrayscaleFromImage(Image&& src theTexture->setSource(srcImageName); theTexture->setStoredMipFormat(formatMip); theTexture->assignStoredMip(0, image.getByteCount(), image.getBits()); - generateMips(theTexture.get(), std::move(image), target, abortProcessing); + convertToTextureWithMips(theTexture.get(), std::move(image), target, abortProcessing); } return theTexture; @@ -1416,8 +1477,41 @@ Image convertToHDRFormat(Image&& srcImage, gpu::Element format) { return hdrImage; } +static bool isLinearTextureFormat(gpu::Element format) { + return !((format == gpu::Element::COLOR_SRGBA_32) + || (format == gpu::Element::COLOR_SBGRA_32) + || (format == gpu::Element::COLOR_SR_8) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGB) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA_MASK) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA) + || (format == gpu::Element::COLOR_COMPRESSED_BCX_SRGBA_HIGH) + || (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGB) + || (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGBA) + || (format == gpu::Element::COLOR_COMPRESSED_ETC2_SRGB_PUNCHTHROUGH_ALPHA)); +} + +void convolveForGGX(const std::vector& faces, gpu::Texture* texture, BackendTarget target, const std::atomic& abortProcessing = false) { + PROFILE_RANGE(resource_parse, "convolveForGGX"); + CubeMap source(faces, texture->getNumMips(), abortProcessing); + CubeMap output(texture->getWidth(), texture->getHeight(), texture->getNumMips()); + + if (!faces.front().hasFloatFormat()) { + source.applyGamma(2.2f); + } + source.convolveForGGX(output, abortProcessing); + if (!isLinearTextureFormat(texture->getTexelFormat())) { + output.applyGamma(1.0f/2.2f); + } + + for (int face = 0; face < 6; face++) { + for (gpu::uint16 mipLevel = 0; mipLevel < output.getMipCount(); mipLevel++) { + convertToTexture(texture, output.getFaceImage(mipLevel, face), target, abortProcessing, face, mipLevel); + } + } +} + gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, - bool compress, BackendTarget target, bool generateIrradiance, + bool compress, BackendTarget target, int options, const std::atomic& abortProcessing) { PROFILE_RANGE(resource_parse, "processCubeTextureColorFromImage"); @@ -1491,7 +1585,7 @@ gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcIm theTexture->setStoredMipFormat(formatMip); // Generate irradiance while we are at it - if (generateIrradiance) { + if (options & CUBE_GENERATE_IRRADIANCE) { PROFILE_RANGE(resource_parse, "generateIrradiance"); gpu::Element irradianceFormat; // TODO: we could locally compress the irradiance texture on Android, but we don't need to @@ -1513,9 +1607,16 @@ gpu::TexturePointer TextureUsage::processCubeTextureColorFromImage(Image&& srcIm auto irradiance = irradianceTexture->getIrradiance(); theTexture->overrideIrradiance(irradiance); } - - for (uint8 face = 0; face < faces.size(); ++face) { - generateMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face); + + if (options & CUBE_GGX_CONVOLVE) { + // Performs and convolution AND mip map generation + convolveForGGX(faces, theTexture.get(), target, abortProcessing); + } else { + // Create mip maps and compress to final format in one go + for (uint8 face = 0; face < faces.size(); ++face) { + // Force building the mip maps right now on CPU if we are convolving for GGX later on + convertToTextureWithMips(theTexture.get(), std::move(faces[face]), target, abortProcessing, face); + } } } diff --git a/libraries/image/src/image/TextureProcessing.h b/libraries/image/src/image/TextureProcessing.h index 72e2400721..6f93af1b29 100644 --- a/libraries/image/src/image/TextureProcessing.h +++ b/libraries/image/src/image/TextureProcessing.h @@ -17,11 +17,16 @@ #include #include "Image.h" +#include namespace image { std::function getHDRPackingFunction(); std::function getHDRUnpackingFunction(); + void convertToFloatFromPacked(const unsigned char* source, int width, int height, size_t srcLineByteStride, gpu::Element sourceFormat, + glm::vec4* output, size_t outputLinePixelStride); + void convertToPackedFromFloat(unsigned char* output, int width, int height, size_t outputLineByteStride, gpu::Element outputFormat, + const glm::vec4* source, size_t srcLinePixelStride); namespace TextureUsage { @@ -62,7 +67,8 @@ enum Type { ROUGHNESS_TEXTURE, GLOSS_TEXTURE, EMISSIVE_TEXTURE, - CUBE_TEXTURE, + SKY_TEXTURE, + AMBIENT_TEXTURE, OCCLUSION_TEXTURE, SCATTERING_TEXTURE = OCCLUSION_TEXTURE, LIGHTMAP_TEXTURE, @@ -92,8 +98,12 @@ gpu::TexturePointer createMetallicTextureFromImage(Image&& image, const std::str bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); gpu::TexturePointer createCubeTextureFromImage(Image&& image, const std::string& srcImageName, bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); -gpu::TexturePointer createCubeTextureFromImageWithoutIrradiance(Image&& image, const std::string& srcImageName, - bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); +gpu::TexturePointer createCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); +gpu::TexturePointer createAmbientCubeTextureFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); +gpu::TexturePointer createAmbientCubeTextureAndIrradianceFromImage(Image&& image, const std::string& srcImageName, + bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); gpu::TexturePointer createLightmapTextureFromImage(Image&& image, const std::string& srcImageName, bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing); gpu::TexturePointer process2DTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, @@ -102,9 +112,14 @@ gpu::TexturePointer process2DTextureNormalMapFromImage(Image&& srcImage, const s gpu::BackendTarget target, bool isBumpMap, const std::atomic& abortProcessing); gpu::TexturePointer process2DTextureGrayscaleFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, gpu::BackendTarget target, bool isInvertedPixels, const std::atomic& abortProcessing); -gpu::TexturePointer processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, - gpu::BackendTarget target, bool generateIrradiance, const std::atomic& abortProcessing); +enum CubeTextureOptions { + CUBE_DEFAULT = 0x0, + CUBE_GENERATE_IRRADIANCE = 0x1, + CUBE_GGX_CONVOLVE = 0x2 +}; +gpu::TexturePointer processCubeTextureColorFromImage(Image&& srcImage, const std::string& srcImageName, bool compress, + gpu::BackendTarget target, int option, const std::atomic& abortProcessing); } // namespace TextureUsage const QStringList getSupportedFormats(); @@ -113,6 +128,9 @@ gpu::TexturePointer processImage(std::shared_ptr content, const std:: int maxNumPixels, TextureUsage::Type textureType, bool compress, gpu::BackendTarget target, const std::atomic& abortProcessing = false); +void convertToTextureWithMips(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic& abortProcessing = false, int face = -1); +void convertToTexture(gpu::Texture* texture, Image&& image, gpu::BackendTarget target, const std::atomic& abortProcessing = false, int face = -1, int mipLevel = 0); + } // namespace image #endif // hifi_image_TextureProcessing_h diff --git a/libraries/material-networking/src/material-networking/TextureCache.cpp b/libraries/material-networking/src/material-networking/TextureCache.cpp index 6af59930fa..6ceb5d328a 100644 --- a/libraries/material-networking/src/material-networking/TextureCache.cpp +++ b/libraries/material-networking/src/material-networking/TextureCache.cpp @@ -224,10 +224,14 @@ NetworkTexturePointer TextureCache::getTexture(const QUrl& url, image::TextureUs return getResourceTexture(url); } auto modifiedUrl = url; - if (type == image::TextureUsage::CUBE_TEXTURE) { + if (type == image::TextureUsage::SKY_TEXTURE) { QUrlQuery query { url.query() }; query.addQueryItem("skybox", ""); modifiedUrl.setQuery(query.toString()); + } else if (type == image::TextureUsage::AMBIENT_TEXTURE) { + QUrlQuery query{ url.query() }; + query.addQueryItem("ambient", ""); + modifiedUrl.setQuery(query.toString()); } TextureExtra extra = { type, content, maxNumPixels, sourceChannel }; return ResourceCache::getResource(modifiedUrl, QUrl(), &extra, std::hash()(extra)).staticCast(); @@ -283,7 +287,8 @@ gpu::TexturePointer getFallbackTextureForType(image::TextureUsage::Type type) { case image::TextureUsage::BUMP_TEXTURE: case image::TextureUsage::SPECULAR_TEXTURE: case image::TextureUsage::GLOSS_TEXTURE: - case image::TextureUsage::CUBE_TEXTURE: + case image::TextureUsage::SKY_TEXTURE: + case image::TextureUsage::AMBIENT_TEXTURE: case image::TextureUsage::STRICT_TEXTURE: default: break; @@ -408,7 +413,7 @@ void NetworkTexture::setExtra(void* extra) { _shouldFailOnRedirect = _currentlyLoadingResourceType != ResourceType::KTX; - if (_type == image::TextureUsage::CUBE_TEXTURE) { + if (_type == image::TextureUsage::SKY_TEXTURE) { setLoadPriority(this, SKYBOX_LOAD_PRIORITY); } else if (_currentlyLoadingResourceType == ResourceType::KTX) { setLoadPriority(this, HIGH_MIPS_LOAD_PRIORITY); diff --git a/libraries/networking/src/LimitedNodeList.cpp b/libraries/networking/src/LimitedNodeList.cpp index 18a180ad79..82f3459c15 100644 --- a/libraries/networking/src/LimitedNodeList.cpp +++ b/libraries/networking/src/LimitedNodeList.cpp @@ -588,6 +588,8 @@ void LimitedNodeList::eraseAllNodes() { foreach(const SharedNodePointer& killedNode, killedNodes) { handleNodeKill(killedNode); } + + _delayedNodeAdds.clear(); } void LimitedNodeList::reset() { @@ -755,7 +757,7 @@ void LimitedNodeList::delayNodeAdd(NewNodeInfo info) { } void LimitedNodeList::removeDelayedAdd(QUuid nodeUUID) { - auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](auto info) { + auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](const auto& info) { return info.uuid == nodeUUID; }); if (it != _delayedNodeAdds.end()) { @@ -764,7 +766,7 @@ void LimitedNodeList::removeDelayedAdd(QUuid nodeUUID) { } bool LimitedNodeList::isDelayedNode(QUuid nodeUUID) { - auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](auto info) { + auto it = std::find_if(_delayedNodeAdds.begin(), _delayedNodeAdds.end(), [&](const auto& info) { return info.uuid == nodeUUID; }); return it != _delayedNodeAdds.end(); diff --git a/libraries/networking/src/NetworkPeer.h b/libraries/networking/src/NetworkPeer.h index b75d2f8b86..43fbc753eb 100644 --- a/libraries/networking/src/NetworkPeer.h +++ b/libraries/networking/src/NetworkPeer.h @@ -26,7 +26,7 @@ const quint16 ICE_SERVER_DEFAULT_PORT = 7337; const int ICE_HEARBEAT_INTERVAL_MSECS = 2 * 1000; const int MAX_ICE_CONNECTION_ATTEMPTS = 5; -const int UDP_PUNCH_PING_INTERVAL_MS = 25; +const int UDP_PUNCH_PING_INTERVAL_MS = 250; class NetworkPeer : public QObject { Q_OBJECT diff --git a/libraries/networking/src/NodeList.cpp b/libraries/networking/src/NodeList.cpp index 0021a594bc..0a4c63d712 100644 --- a/libraries/networking/src/NodeList.cpp +++ b/libraries/networking/src/NodeList.cpp @@ -752,11 +752,11 @@ void NodeList::pingPunchForInactiveNode(const SharedNodePointer& node) { flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPing); } - // every second we're trying to ping this node and we're not getting anywhere - debug that out - const int NUM_DEBUG_CONNECTION_ATTEMPTS = 1000 / (UDP_PUNCH_PING_INTERVAL_MS); + // every two seconds we're trying to ping this node and we're not getting anywhere - debug that out + const int NUM_DEBUG_CONNECTION_ATTEMPTS = 2000 / (UDP_PUNCH_PING_INTERVAL_MS); if (node->getConnectionAttempts() > 0 && node->getConnectionAttempts() % NUM_DEBUG_CONNECTION_ATTEMPTS == 0) { - qCDebug(networking) << "No response to UDP hole punch pings for node" << node->getUUID() << "in last second."; + qCDebug(networking) << "No response to UDP hole punch pings for node" << node->getUUID() << "in last 2 s."; } auto nodeID = node->getUUID(); diff --git a/libraries/networking/src/NodePermissions.h b/libraries/networking/src/NodePermissions.h index d0e421a438..1b0b9d220d 100644 --- a/libraries/networking/src/NodePermissions.h +++ b/libraries/networking/src/NodePermissions.h @@ -41,10 +41,10 @@ public: NodePermissions(const NodePermissionsKey& key) { _id = key.first.toLower(); _rankID = key.second; } NodePermissions(QMap perms); - const QString& getID() const { return _id; } // a user-name or a group-name, not verified + QString getID() const { return _id; } // a user-name or a group-name, not verified void setID(const QString& id) { _id = id; } void setRankID(QUuid& rankID) { _rankID = rankID; } - const QUuid& getRankID() const { return _rankID; } + QUuid getRankID() const { return _rankID; } NodePermissionsKey getKey() const { return NodePermissionsKey(_id, _rankID); } // the _id member isn't authenticated/verified and _username is. @@ -52,7 +52,7 @@ public: const QString& getVerifiedUserName() const { return _verifiedUserName; } void setGroupID(QUuid groupID) { _groupID = groupID; if (!groupID.isNull()) { _groupIDSet = true; }} - const QUuid& getGroupID() const { return _groupID; } + QUuid getGroupID() const { return _groupID; } bool isGroup() const { return _groupIDSet; } bool isAssignment { false }; diff --git a/libraries/render-utils/src/AntialiasingEffect.cpp b/libraries/render-utils/src/AntialiasingEffect.cpp index 17c13df19a..a445ea2343 100644 --- a/libraries/render-utils/src/AntialiasingEffect.cpp +++ b/libraries/render-utils/src/AntialiasingEffect.cpp @@ -26,7 +26,7 @@ #include "ViewFrustum.h" #include "GeometryCache.h" #include "FramebufferCache.h" - +#include "RandomAndNoise.h" namespace ru { using render_utils::slot::texture::Texture; @@ -359,36 +359,11 @@ int JitterSampleConfig::play() { return _state; } -template -class Halton { -public: - - float eval(int index) const { - float f = 1.0f; - float r = 0.0f; - float invB = 1.0f / (float)B; - index++; // Indices start at 1, not 0 - - while (index > 0) { - f = f * invB; - r = r + f * (float)(index % B); - index = index / B; - - } - - return r; - } - -}; - - JitterSample::SampleSequence::SampleSequence(){ // Halton sequence (2,3) - Halton<2> genX; - Halton<3> genY; for (int i = 0; i < SEQUENCE_LENGTH; i++) { - offsets[i] = glm::vec2(genX.eval(i), genY.eval(i)); + offsets[i] = glm::vec2(halton::evaluate<2>(i), halton::evaluate<3>(i)); offsets[i] -= vec2(0.5f); } offsets[SEQUENCE_LENGTH] = glm::vec2(0.0f); diff --git a/libraries/render-utils/src/DeferredLightingEffect.cpp b/libraries/render-utils/src/DeferredLightingEffect.cpp index ab9dea2325..b8c720e9ca 100644 --- a/libraries/render-utils/src/DeferredLightingEffect.cpp +++ b/libraries/render-utils/src/DeferredLightingEffect.cpp @@ -365,6 +365,7 @@ void PrepareDeferred::run(const RenderContextPointer& renderContext, const Input // For the rest of the rendering, bind the lighting model batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); }); } @@ -416,6 +417,7 @@ void RenderDeferredSetup::run(const render::RenderContextPointer& renderContext, // THe lighting model batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // Subsurface scattering specific if (surfaceGeometryFramebuffer) { @@ -642,25 +644,37 @@ void RenderDeferred::run(const RenderContextPointer& renderContext, const Inputs config->setGPUBatchRunTime(_gpuTimer->getGPUAverage(), _gpuTimer->getBatchAverage()); } - - void DefaultLightingSetup::run(const RenderContextPointer& renderContext) { if (!_defaultLight || !_defaultBackground) { + auto defaultSkyboxURL = PathUtils::resourcesUrl() + "images/Default-Sky-9-cubemap/Default-Sky-9-cubemap.texmeta.json"; + if (!_defaultSkyboxNetworkTexture) { PROFILE_RANGE(render, "Process Default Skybox"); _defaultSkyboxNetworkTexture = DependencyManager::get()->getTexture( - PathUtils::resourcesUrl() + "images/Default-Sky-9-cubemap/Default-Sky-9-cubemap.texmeta.json", image::TextureUsage::CUBE_TEXTURE); + defaultSkyboxURL, image::TextureUsage::SKY_TEXTURE); + } + + if (!_defaultAmbientNetworkTexture) { + PROFILE_RANGE(render, "Process Default Ambient map"); + _defaultAmbientNetworkTexture = DependencyManager::get()->getTexture( + defaultSkyboxURL, image::TextureUsage::AMBIENT_TEXTURE); } if (_defaultSkyboxNetworkTexture && _defaultSkyboxNetworkTexture->isLoaded() && _defaultSkyboxNetworkTexture->getGPUTexture()) { - _defaultSkyboxAmbientTexture = _defaultSkyboxNetworkTexture->getGPUTexture(); - _defaultSkybox->setCubemap(_defaultSkyboxAmbientTexture); + _defaultSkybox->setCubemap(_defaultSkyboxNetworkTexture->getGPUTexture()); } else { // Don't do anything until the skybox has loaded return; } + if (_defaultAmbientNetworkTexture && _defaultAmbientNetworkTexture->isLoaded() && _defaultAmbientNetworkTexture->getGPUTexture()) { + _defaultAmbientTexture = _defaultAmbientNetworkTexture->getGPUTexture(); + } else { + // Don't do anything until the ambient box has been loaded + return; + } + auto lightStage = renderContext->_scene->getStage(); if (lightStage) { @@ -674,8 +688,8 @@ void DefaultLightingSetup::run(const RenderContextPointer& renderContext) { lp->setAmbientSpherePreset(gpu::SphericalHarmonics::Preset::OLD_TOWN_SQUARE); lp->setAmbientIntensity(0.5f); - lp->setAmbientMap(_defaultSkyboxAmbientTexture); - auto irradianceSH = _defaultSkyboxAmbientTexture->getIrradiance(); + lp->setAmbientMap(_defaultAmbientTexture); + auto irradianceSH = _defaultAmbientTexture->getIrradiance(); if (irradianceSH) { lp->setAmbientSphere((*irradianceSH)); } diff --git a/libraries/render-utils/src/DeferredLightingEffect.h b/libraries/render-utils/src/DeferredLightingEffect.h index f4935000ef..1cc6ca4767 100644 --- a/libraries/render-utils/src/DeferredLightingEffect.h +++ b/libraries/render-utils/src/DeferredLightingEffect.h @@ -212,7 +212,8 @@ protected: HazeStage::Index _defaultHazeID{ HazeStage::INVALID_INDEX }; graphics::SkyboxPointer _defaultSkybox { new ProceduralSkybox() }; NetworkTexturePointer _defaultSkyboxNetworkTexture; - gpu::TexturePointer _defaultSkyboxAmbientTexture; + NetworkTexturePointer _defaultAmbientNetworkTexture; + gpu::TexturePointer _defaultAmbientTexture; }; #endif // hifi_DeferredLightingEffect_h diff --git a/libraries/render-utils/src/LightAmbient.slh b/libraries/render-utils/src/LightAmbient.slh index 4ea9c0cd4c..cb76a8e545 100644 --- a/libraries/render-utils/src/LightAmbient.slh +++ b/libraries/render-utils/src/LightAmbient.slh @@ -17,8 +17,9 @@ vec4 evalSkyboxLight(vec3 direction, float lod) { #if !defined(GL_ES) float filterLod = textureQueryLod(skyboxMap, direction).x; - // Keep texture filtering LOD as limit to prevent aliasing on specular reflection - lod = max(lod, filterLod); + // Keep texture filtering LOD as limit to prevent aliasing on specular reflection, but add + // a bias to limit overblurring with convolved maps + lod = max(lod, filterLod-2); #endif return textureLod(skyboxMap, direction, lod); @@ -26,16 +27,30 @@ vec4 evalSkyboxLight(vec3 direction, float lod) { <@endfunc@> <@func declareEvalAmbientSpecularIrradiance(supportAmbientSphere, supportAmbientMap, supportIfAmbientMapElseAmbientSphere)@> +LAYOUT(binding=RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL) uniform sampler2D ambientFresnelLUT; -vec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float gloss) { +vec3 fresnelSchlickAmbient(vec3 fresnelColor, float ndotd, float roughness) { +#if RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT + vec2 ambientFresnel = texture(ambientFresnelLUT, vec2(roughness, ndotd)).xy; + return fresnelColor * ambientFresnel.x + vec3(ambientFresnel.y); +#else + float gloss = 1.0-roughness; float f = pow(1.0 - ndotd, 5.0); return fresnelColor + (max(vec3(gloss), fresnelColor) - fresnelColor) * f; +#endif } <@if supportAmbientMap@> <$declareSkyboxMap()$> <@endif@> +float getMipLevelFromRoughness(float roughness, float lodCount) { + // This should match the value in the CubeMap::convolveForGGX method (CubeMap.cpp) + float ROUGHNESS_1_MIP_RESOLUTION = 1.5; + float deltaLod = lodCount - ROUGHNESS_1_MIP_RESOLUTION; + return deltaLod * (sqrt(1.0+24.0*roughness)-1.0) / 4.0; +} + vec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, vec3 lightDir) { vec3 specularLight; <@if supportIfAmbientMapElseAmbientSphere@> @@ -43,10 +58,10 @@ vec3 evalAmbientSpecularIrradiance(LightAmbient ambient, SurfaceData surface, ve <@endif@> <@if supportAmbientMap@> { - float levels = getLightAmbientMapNumMips(ambient); - float m = 12.0 / (1.0+11.0*surface.roughness); - float lod = levels - m; + float levelCount = getLightAmbientMapNumMips(ambient); + float lod = getMipLevelFromRoughness(surface.roughness, levelCount); lod = max(lod, 0.0); + specularLight = evalSkyboxLight(lightDir, lod).xyz; } <@endif@> @@ -87,7 +102,7 @@ void evalLightingAmbient(out vec3 diffuse, out vec3 specular, LightAmbient ambie vec3 ambientSpaceLowNormal = (ambient.transform * vec4(lowNormalCurvature.xyz, 0.0)).xyz; <@endif@> - vec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, 1.0-surface.roughness); + vec3 ambientFresnel = fresnelSchlickAmbient(fresnelF0, surface.ndotv, surface.roughness); diffuse = (1.0 - metallic) * (vec3(1.0) - ambientFresnel) * sphericalHarmonics_evalSphericalLight(getLightAmbientSphere(ambient), ambientSpaceSurfaceNormal).xyz; diff --git a/libraries/render-utils/src/LightingModel.cpp b/libraries/render-utils/src/LightingModel.cpp index 2a85fcd960..5fcec1f033 100644 --- a/libraries/render-utils/src/LightingModel.cpp +++ b/libraries/render-utils/src/LightingModel.cpp @@ -9,10 +9,88 @@ // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html // #include "LightingModel.h" +#include "RandomAndNoise.h" +#include "BRDF.h" + +#include "render-utils/ShaderConstants.h" + +#include + +gpu::TexturePointer LightingModel::_ambientFresnelLUT; LightingModel::LightingModel() { Parameters parameters; _parametersBuffer = gpu::BufferView(std::make_shared(sizeof(Parameters), (const gpu::Byte*) ¶meters, sizeof(Parameters))); + +#if RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT + if (!_ambientFresnelLUT) { + // Code taken from the IntegrateBRDF method as described in this talk : + // https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf + const auto N_roughness = 32; + const auto N_NdotV = 256; + + using LUTVector = std::vector; + using LUTValueType = LUTVector::value_type::value_type; + + LUTVector lut(N_roughness * N_NdotV); + + _ambientFresnelLUT = gpu::Texture::create2D(gpu::Element{ gpu::VEC2, gpu::NUINT16, gpu::XY }, N_roughness, N_NdotV, 1U, + gpu::Sampler(gpu::Sampler::FILTER_MIN_POINT_MAG_LINEAR, gpu::Sampler::WRAP_CLAMP)); + + tbb::parallel_for(tbb::blocked_range2d(0, N_NdotV, 8, 0, N_roughness, 8), [&](const tbb::blocked_range2d& range) { + auto roughnessRange = range.cols(); + auto ndotvRange = range.rows(); + + for (auto j = ndotvRange.begin(); j < ndotvRange.end(); j++) { + const float NdotV = j / float(N_NdotV - 1); + + glm::vec3 V; + V.x = std::sqrt(1.0f - NdotV * NdotV); // sin + V.y = 0; + V.z = NdotV; // cos + + for (auto k = roughnessRange.begin(); k < roughnessRange.end(); k++) { + const float roughness = k / float(N_roughness - 1); + const float alpha = roughness * roughness; + const float alphaSquared = alpha * alpha; + + float A = 0.0f; + float B = 0.0f; + + const uint NumSamples = 1024; + for (uint i = 0; i < NumSamples; i++) { + glm::vec2 Xi = hammersley::evaluate(i, NumSamples); + glm::vec3 H = ggx::sample(Xi, roughness); + float VdotH = glm::dot(V, H); + glm::vec3 L = 2.0f * VdotH * H - V; + float NdotL = L.z; + + if (NdotL > 0.0f) { + VdotH = glm::clamp(VdotH, 0.0f, 1.0f); + + float NdotH = glm::clamp(H.z, 0.0f, 1.0f); + float G = smith::evaluateFastWithoutNdotV(alphaSquared, NdotV, NdotL); + float G_Vis = (G * VdotH) / NdotH; + float Fc = std::pow(1.0f - VdotH, 5.0f); + + A += (1.0f - Fc) * G_Vis; + B += Fc * G_Vis; + } + } + + A /= NumSamples; + B /= NumSamples; + + auto& lutValue = lut[k + j * N_roughness]; + lutValue.x = (LUTValueType)(glm::min(1.0f, A) * std::numeric_limits::max()); + lutValue.y = (LUTValueType)(glm::min(1.0f, B) * std::numeric_limits::max()); + } + } + }); + + _ambientFresnelLUT->assignStoredMip(0, N_roughness * N_NdotV * sizeof(LUTVector::value_type), (const gpu::Byte*)lut.data()); + } +#endif } void LightingModel::setUnlit(bool enable) { diff --git a/libraries/render-utils/src/LightingModel.h b/libraries/render-utils/src/LightingModel.h index f6bd6dcd46..a488abcb09 100644 --- a/libraries/render-utils/src/LightingModel.h +++ b/libraries/render-utils/src/LightingModel.h @@ -83,6 +83,7 @@ public: bool isShadowEnabled() const; UniformBufferView getParametersBuffer() const { return _parametersBuffer; } + gpu::TexturePointer getAmbientFresnelLUT() const { return _ambientFresnelLUT; } protected: @@ -126,6 +127,7 @@ protected: Parameters() {} }; UniformBufferView _parametersBuffer; + static gpu::TexturePointer _ambientFresnelLUT; }; using LightingModelPointer = std::shared_ptr; diff --git a/libraries/render-utils/src/Model.cpp b/libraries/render-utils/src/Model.cpp index b2fe4673af..2a35b0d161 100644 --- a/libraries/render-utils/src/Model.cpp +++ b/libraries/render-utils/src/Model.cpp @@ -444,6 +444,19 @@ bool Model::findRayIntersectionAgainstSubMeshes(const glm::vec3& origin, const g } } + /**jsdoc + * Information about a submesh intersection point. + * @typedef {object} SubmeshIntersection + * @property {Vec3} worldIntersectionPoint - The intersection point in world coordinates. + * @property {Vec3} meshIntersectionPoint - The intersection point in model coordinates. + * @property {number} partIndex - The index of the intersected mesh part within the submesh. + * @property {number} shapeID - The index of the mesh part within the model. + * @property {number} subMeshIndex - The index of the intersected submesh within the model. + * @property {string} subMeshName - The name of the intersected submesh. + * @property {Triangle} subMeshTriangleWorld - The vertices of the intersected mesh part triangle in world coordinates. + * @property {Vec3} subMeshNormal - The normal of the intersected mesh part triangle in model coordinates. + * @property {Triangle} subMeshTriangle - The vertices of the intersected mesh part triangle in model coordinates. + */ if (intersectedSomething) { distance = bestDistance; face = bestFace; diff --git a/libraries/render-utils/src/RenderCommonTask.cpp b/libraries/render-utils/src/RenderCommonTask.cpp index b1a62625b2..18532b7a66 100644 --- a/libraries/render-utils/src/RenderCommonTask.cpp +++ b/libraries/render-utils/src/RenderCommonTask.cpp @@ -94,6 +94,7 @@ void DrawLayered3D::run(const RenderContextPointer& renderContext, const Inputs& // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); if (_opaquePass) { renderStateSortShapes(renderContext, _shapePlumber, inItems, _maxDrawn); diff --git a/libraries/render-utils/src/RenderDeferredTask.cpp b/libraries/render-utils/src/RenderDeferredTask.cpp index ea2b05a6fa..d52f1da043 100644 --- a/libraries/render-utils/src/RenderDeferredTask.cpp +++ b/libraries/render-utils/src/RenderDeferredTask.cpp @@ -471,6 +471,7 @@ void RenderTransparentDeferred::run(const RenderContextPointer& renderContext, c // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // Set the light deferredLightingEffect->setupKeyLightBatch(args, batch, *lightFrame); @@ -536,6 +537,7 @@ void DrawStateSortDeferred::run(const RenderContextPointer& renderContext, const // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // From the lighting model define a global shapeKey ORED with individiual keys ShapeKey::Builder keyBuilder; diff --git a/libraries/render-utils/src/RenderForwardTask.cpp b/libraries/render-utils/src/RenderForwardTask.cpp index 0bc117bdb9..5e30308a05 100755 --- a/libraries/render-utils/src/RenderForwardTask.cpp +++ b/libraries/render-utils/src/RenderForwardTask.cpp @@ -251,6 +251,7 @@ void DrawForward::run(const RenderContextPointer& renderContext, const Inputs& i // Setup lighting model for all items; batch.setUniformBuffer(ru::Buffer::LightModel, lightingModel->getParametersBuffer()); + batch.setResourceTexture(ru::Texture::AmbientFresnel, lightingModel->getAmbientFresnelLUT()); // From the lighting model define a global shapeKey ORED with individiual keys ShapeKey::Builder keyBuilder; diff --git a/libraries/render-utils/src/render-utils/ShaderConstants.h b/libraries/render-utils/src/render-utils/ShaderConstants.h index 8c289e62d1..76c8dd4981 100644 --- a/libraries/render-utils/src/render-utils/ShaderConstants.h +++ b/libraries/render-utils/src/render-utils/ShaderConstants.h @@ -14,6 +14,10 @@ #ifndef RENDER_UTILS_SHADER_CONSTANTS_H #define RENDER_UTILS_SHADER_CONSTANTS_H +// Feature enabling flags (possibly need to rebuild shaders if this changes) +#define RENDER_UTILS_ENABLE_AMBIENT_FRESNEL_LUT 1 + +// Binding slots #define RENDER_UTILS_ATTR_TEXCOORD01 0 #define RENDER_UTILS_ATTR_COLOR 1 @@ -54,6 +58,7 @@ #define RENDER_UTILS_TEXTURE_DEFERRED_DIFFUSED_CURVATURE 7 #define RENDER_UTILS_TEXTURE_DEFERRED_LIGHTING 10 #define RENDER_UTILS_TEXTURE_SKYBOX 11 +#define RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL 14 #define RENDER_UTILS_BUFFER_SHADOW_PARAMS 2 #define RENDER_UTILS_TEXTURE_SHADOW 12 @@ -198,6 +203,7 @@ enum Texture { BloomColor = RENDER_UTILS_TEXTURE_BLOOM_COLOR, ToneMappingColor = RENDER_UTILS_TEXTURE_TM_COLOR, TextFont = RENDER_UTILS_TEXTURE_TEXT_FONT, + AmbientFresnel = RENDER_UTILS_TEXTURE_AMBIENT_FRESNEL, DebugTexture0 = RENDER_UTILS_DEBUG_TEXTURE0, }; } // namespace texture diff --git a/libraries/shared/src/AvatarConstants.h b/libraries/shared/src/AvatarConstants.h index 5166cb7a0b..fcf84a49cb 100644 --- a/libraries/shared/src/AvatarConstants.h +++ b/libraries/shared/src/AvatarConstants.h @@ -42,6 +42,7 @@ const float DEFAULT_AVATAR_HIPS_MASS = 40.0f; const float DEFAULT_AVATAR_HEAD_MASS = 20.0f; const float DEFAULT_AVATAR_LEFTHAND_MASS = 2.0f; const float DEFAULT_AVATAR_RIGHTHAND_MASS = 2.0f; +const float DEFAULT_AVATAR_IPD = 0.064f; // Used when avatar is missing joints... (avatar space) const glm::quat DEFAULT_AVATAR_MIDDLE_EYE_ROT { Quaternions::Y_180 }; @@ -102,6 +103,7 @@ static const float MAX_AVATAR_HEIGHT = 1000.0f * DEFAULT_AVATAR_HEIGHT; // meter static const float MIN_AVATAR_HEIGHT = 0.005f * DEFAULT_AVATAR_HEIGHT; // meters static const float MIN_AVATAR_RADIUS = 0.5f * MIN_AVATAR_HEIGHT; static const float AVATAR_WALK_SPEED_SCALAR = 1.0f; -static const float AVATAR_SPRINT_SPEED_SCALAR = 2.0f; +static const float AVATAR_DESKTOP_SPRINT_SPEED_SCALAR = 3.0f; +static const float AVATAR_HMD_SPRINT_SPEED_SCALAR = 2.0f; #endif // hifi_AvatarConstants_h diff --git a/libraries/shared/src/BRDF.cpp b/libraries/shared/src/BRDF.cpp new file mode 100644 index 0000000000..fe438f12a1 --- /dev/null +++ b/libraries/shared/src/BRDF.cpp @@ -0,0 +1,45 @@ +#include "BRDF.h" + +#include +#ifndef M_PI +#define M_PI 3.14159265359 +#endif + +namespace ggx { + +float evaluate(float NdotH, float roughness) { + float alpha = roughness * roughness; + float alphaSquared = alpha * alpha; + float denom = (float)(NdotH * NdotH * (alphaSquared - 1.0f) + 1.0f); + return alphaSquared / (denom * denom); +} + +glm::vec3 sample(const glm::vec2& Xi, const float roughness) { + const float a = roughness * roughness; + + float phi = 2.0f * (float) M_PI * Xi.x; + float cosTheta = std::sqrt((1.0f - Xi.y) / (1.0f + (a*a - 1.0f) * Xi.y)); + float sinTheta = std::sqrt(1.0f - cosTheta * cosTheta); + + // from spherical coordinates to cartesian coordinates + glm::vec3 H; + H.x = std::cos(phi) * sinTheta; + H.y = std::sin(phi) * sinTheta; + H.z = cosTheta; + + return H; +} + +} + + +namespace smith { + + float evaluateFastWithoutNdotV(float alphaSquared, float NdotV, float NdotL) { + float oneMinusAlphaSquared = 1.0f - alphaSquared; + float G = NdotL * std::sqrt(alphaSquared + NdotV * NdotV * oneMinusAlphaSquared); + G = G + NdotV * std::sqrt(alphaSquared + NdotL * NdotL * oneMinusAlphaSquared); + return 2.0f * NdotL / G; + } + +} diff --git a/libraries/shared/src/BRDF.h b/libraries/shared/src/BRDF.h new file mode 100644 index 0000000000..4e6cdd1f38 --- /dev/null +++ b/libraries/shared/src/BRDF.h @@ -0,0 +1,36 @@ +#pragma once +// +// BRDF.h +// +// Created by Olivier Prat on 04/04/19. +// Copyright 2019 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#ifndef SHARED_BRDF_H +#define SHARED_BRDF_H + +#include +#include + +// GGX micro-facet model +namespace ggx { + float evaluate(float NdotH, float roughness); + glm::vec3 sample(const glm::vec2& Xi, const float roughness); +} + +// Smith visibility function +namespace smith { + float evaluateFastWithoutNdotV(float alphaSquared, float NdotV, float NdotL); + + inline float evaluateFast(float alphaSquared, float NdotV, float NdotL) { + return evaluateFastWithoutNdotV(alphaSquared, NdotV, NdotL) * NdotV; + } + + inline float evaluate(float roughness, float NdotV, float NdotL) { + return evaluateFast(roughness*roughness*roughness*roughness, NdotV, NdotL); + } +} + +#endif // SHARED_BRDF_H \ No newline at end of file diff --git a/libraries/shared/src/GeometryUtil.h b/libraries/shared/src/GeometryUtil.h index 04c54fc32e..764eeb1500 100644 --- a/libraries/shared/src/GeometryUtil.h +++ b/libraries/shared/src/GeometryUtil.h @@ -119,6 +119,13 @@ void swingTwistDecomposition(const glm::quat& rotation, glm::quat& swing, glm::quat& twist); +/**jsdoc + * A triangle in a mesh. + * @typedef {object} Triangle + * @property {Vec3} v0 - The position of vertex 0 in the triangle. + * @property {Vec3} v1 - The position of vertex 1 in the triangle. + * @property {Vec3} v2 - The position of vertex 2 in the triangle. + */ class Triangle { public: glm::vec3 v0; diff --git a/libraries/shared/src/RandomAndNoise.h b/libraries/shared/src/RandomAndNoise.h new file mode 100644 index 0000000000..7bde14a141 --- /dev/null +++ b/libraries/shared/src/RandomAndNoise.h @@ -0,0 +1,52 @@ +// +// RandomAndNoise.h +// +// Created by Olivier Prat on 05/16/18. +// Copyright 2018 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +#ifndef RANDOM_AND_NOISE_H +#define RANDOM_AND_NOISE_H + +#include + +namespace halton { + // Low discrepancy Halton sequence generator + template + float evaluate(int index) { + float f = 1.0f; + float r = 0.0f; + float invB = 1.0f / (float)B; + index++; // Indices start at 1, not 0 + + while (index > 0) { + f = f * invB; + r = r + f * (float)(index % B); + index = index / B; + + } + + return r; + } +} + +inline float getRadicalInverseVdC(uint32_t bits) { + bits = (bits << 16u) | (bits >> 16u); + bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u); + bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u); + bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u); + bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u); + return float(bits) * 2.3283064365386963e-10f; // / 0x100000000\n" +} + +namespace hammersley { + // Low discrepancy Hammersley 2D sequence generator + inline glm::vec2 evaluate(int k, const int sequenceLength) { + return glm::vec2(float(k) / float(sequenceLength), getRadicalInverseVdC(k)); + } +} + + +#endif \ No newline at end of file diff --git a/libraries/shared/src/TBBHelpers.h b/libraries/shared/src/TBBHelpers.h index 6b5c4d416b..0c4deace6a 100644 --- a/libraries/shared/src/TBBHelpers.h +++ b/libraries/shared/src/TBBHelpers.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef _WIN32 #pragma warning( pop ) diff --git a/libraries/shared/src/shared/Camera.h b/libraries/shared/src/shared/Camera.h index 0132e58d18..f41183479c 100644 --- a/libraries/shared/src/shared/Camera.h +++ b/libraries/shared/src/shared/Camera.h @@ -36,25 +36,6 @@ static int cameraModeId = qRegisterMetaType(); class Camera : public QObject { Q_OBJECT - /**jsdoc - * The Camera API provides access to the "camera" that defines your view in desktop and HMD display modes. - * - * @namespace Camera - * - * @hifi-interface - * @hifi-client-entity - * @hifi-avatar - * - * @property {Vec3} position - The position of the camera. You can set this value only when the camera is in independent - * mode. - * @property {Quat} orientation - The orientation of the camera. You can set this value only when the camera is in - * independent mode. - * @property {Camera.Mode} mode - The camera mode. - * @property {ViewFrustum} frustum - The camera frustum. - * @property {Uuid} cameraEntity - The ID of the entity that is used for the camera position and orientation when the - * camera is in entity mode. - */ - // FIXME: The cameraEntity property definition is copied from FancyCamera.h. Q_PROPERTY(glm::vec3 position READ getPosition WRITE setPosition) Q_PROPERTY(glm::quat orientation READ getOrientation WRITE setOrientation) Q_PROPERTY(QString mode READ getModeString WRITE setModeString) @@ -82,53 +63,54 @@ public: public slots: /**jsdoc - * Get the current camera mode. You can also get the mode using the Camera.mode property. + * Gets the current camera mode. You can also get the mode using the {@link Camera|Camera.mode} property. * @function Camera.getModeString * @returns {Camera.Mode} The current camera mode. */ QString getModeString() const; /**jsdoc - * Set the camera mode. You can also set the mode using the Camera.mode property. - * @function Camera.setModeString - * @param {Camera.Mode} mode - The mode to set the camera to. - */ + * Sets the camera mode. You can also set the mode using the {@link Camera|Camera.mode} property. + * @function Camera.setModeString + * @param {Camera.Mode} mode - The mode to set the camera to. + */ void setModeString(const QString& mode); /**jsdoc - * Get the current camera position. You can also get the position using the Camera.position property. - * @function Camera.getPosition - * @returns {Vec3} The current camera position. - */ + * Gets the current camera position. You can also get the position using the {@link Camera|Camera.position} property. + * @function Camera.getPosition + * @returns {Vec3} The current camera position. + */ glm::vec3 getPosition() const { return _position; } /**jsdoc - * Set the camera position. You can also set the position using the Camera.position property. Only works if the - * camera is in independent mode. - * @function Camera.setPosition - * @param {Vec3} position - The position to set the camera at. - */ + * Sets the camera position. You can also set the position using the {@link Camera|Camera.position} property. Only works if + * the camera is in independent mode. + * @function Camera.setPosition + * @param {Vec3} position - The position to set the camera at. + */ void setPosition(const glm::vec3& position); /**jsdoc - * Get the current camera orientation. You can also get the orientation using the Camera.orientation property. - * @function Camera.getOrientation - * @returns {Quat} The current camera orientation. - */ + * Gets the current camera orientation. You can also get the orientation using the {@link Camera|Camera.orientation} + * property. + * @function Camera.getOrientation + * @returns {Quat} The current camera orientation. + */ glm::quat getOrientation() const { return _orientation; } /**jsdoc - * Set the camera orientation. You can also set the orientation using the Camera.orientation property. Only - * works if the camera is in independent mode. - * @function Camera.setOrientation - * @param {Quat} orientation - The orientation to set the camera to. - */ + * Sets the camera orientation. You can also set the orientation using the {@link Camera|Camera.orientation} property. Only + * works if the camera is in independent mode. + * @function Camera.setOrientation + * @param {Quat} orientation - The orientation to set the camera to. + */ void setOrientation(const glm::quat& orientation); /**jsdoc - * Compute a {@link PickRay} based on the current camera configuration and the specified x, y position on the - * screen. The {@link PickRay} can be used in functions such as {@link Entities.findRayIntersection} and - * {@link Overlays.findRayIntersection}. + * Computes a {@link PickRay} based on the current camera configuration and the specified x, y position on the + * screen. The {@link PickRay} can be used in functions such as {@link Entities.findRayIntersection} and + * {@link Overlays.findRayIntersection}. * @function Camera.computePickRay * @param {number} x - X-coordinate on screen. * @param {number} y - Y-coordinate on screen. @@ -147,9 +129,9 @@ public slots: virtual PickRay computePickRay(float x, float y) const = 0; /**jsdoc - * Rotate the camera to look at the specified position. Only works if the camera is in independent mode. + * Rotates the camera to look at the specified position. Only works if the camera is in independent mode. * @function Camera.lookAt - * @param {Vec3} position - Position to look at. + * @param {Vec3} position - The position to look at. * @example Rotate your camera to look at entities as you click on them with your mouse. * function onMousePressEvent(event) { * var pickRay = Camera.computePickRay(event.x, event.y); @@ -168,15 +150,15 @@ public slots: void lookAt(const glm::vec3& position); /**jsdoc - * Set the camera to continue looking at the specified position even while the camera moves. Only works if the - * camera is in independent mode. + * Sets the camera to continue looking at the specified position even while the camera moves. Only works if + * the camera is in independent mode. * @function Camera.keepLookingAt - * @param {Vec3} position - Position to keep looking at. + * @param {Vec3} position - The position to keep looking at. */ void keepLookingAt(const glm::vec3& position); /**jsdoc - * Stops the camera from continually looking at the position that was set with Camera.keepLookingAt. + * Stops the camera from continually looking at the position that was set with {@link Camera.keepLookingAt}. * @function Camera.stopLookingAt */ void stopLooking() { _isKeepLookingAt = false; } diff --git a/scripts/system/edit.js b/scripts/system/edit.js index 104648d7c4..69cf278ab3 100644 --- a/scripts/system/edit.js +++ b/scripts/system/edit.js @@ -2129,9 +2129,32 @@ var DELETED_ENTITY_MAP = {}; function applyEntityProperties(data) { var editEntities = data.editEntities; + var createEntities = data.createEntities; + var deleteEntities = data.deleteEntities; var selectedEntityIDs = []; - var selectEdits = data.createEntities.length === 0 || !data.selectCreated; + var selectEdits = createEntities.length === 0 || !data.selectCreated; var i, entityID, entityProperties; + for (i = 0; i < createEntities.length; i++) { + entityID = createEntities[i].entityID; + entityProperties = createEntities[i].properties; + var newEntityID = Entities.addEntity(entityProperties); + recursiveAdd(newEntityID, createEntities[i]); + DELETED_ENTITY_MAP[entityID] = newEntityID; + if (data.selectCreated) { + selectedEntityIDs.push(newEntityID); + } + } + for (i = 0; i < deleteEntities.length; i++) { + entityID = deleteEntities[i].entityID; + if (DELETED_ENTITY_MAP[entityID] !== undefined) { + entityID = DELETED_ENTITY_MAP[entityID]; + } + Entities.deleteEntity(entityID); + var index = selectedEntityIDs.indexOf(entityID); + if (index >= 0) { + selectedEntityIDs.splice(index, 1); + } + } for (i = 0; i < editEntities.length; i++) { entityID = editEntities[i].entityID; if (DELETED_ENTITY_MAP[entityID] !== undefined) { @@ -2145,27 +2168,6 @@ function applyEntityProperties(data) { selectedEntityIDs.push(entityID); } } - for (i = 0; i < data.createEntities.length; i++) { - entityID = data.createEntities[i].entityID; - entityProperties = data.createEntities[i].properties; - var newEntityID = Entities.addEntity(entityProperties); - recursiveAdd(newEntityID, data.createEntities[i]); - DELETED_ENTITY_MAP[entityID] = newEntityID; - if (data.selectCreated) { - selectedEntityIDs.push(newEntityID); - } - } - for (i = 0; i < data.deleteEntities.length; i++) { - entityID = data.deleteEntities[i].entityID; - if (DELETED_ENTITY_MAP[entityID] !== undefined) { - entityID = DELETED_ENTITY_MAP[entityID]; - } - Entities.deleteEntity(entityID); - var index = selectedEntityIDs.indexOf(entityID); - if (index >= 0) { - selectedEntityIDs.splice(index, 1); - } - } // We might be getting an undo while edit.js is disabled. If that is the case, don't set // our selections, causing the edit widgets to display. diff --git a/scripts/system/libraries/entitySelectionTool.js b/scripts/system/libraries/entitySelectionTool.js index d2f3ece5f1..3fdc1d6652 100644 --- a/scripts/system/libraries/entitySelectionTool.js +++ b/scripts/system/libraries/entitySelectionTool.js @@ -1347,12 +1347,16 @@ SelectionDisplay = (function() { }; that.updateLastMouseEvent = function(event) { - if (activeTool && lastMouseEvent !== null) { + if (activeTool && lastMouseEvent !== null) { + var change = lastMouseEvent.isShifted !== event.isShifted || lastMouseEvent.isMeta !== event.isMeta || + lastMouseEvent.isControl !== event.isControl || lastMouseEvent.isAlt !== event.isAlt; lastMouseEvent.isShifted = event.isShifted; lastMouseEvent.isMeta = event.isMeta; lastMouseEvent.isControl = event.isControl; - lastMouseEvent.isAlt = event.isAlt; - activeTool.onMove(lastMouseEvent); + lastMouseEvent.isAlt = event.isAlt; + if (change) { + activeTool.onMove(lastMouseEvent); + } } }; diff --git a/tools/jsdoc/plugins/hifi.js b/tools/jsdoc/plugins/hifi.js index f8230b5dea..a7c62cfc6d 100644 --- a/tools/jsdoc/plugins/hifi.js +++ b/tools/jsdoc/plugins/hifi.js @@ -58,6 +58,7 @@ exports.handlers = { '../../libraries/physics/src', '../../libraries/plugins/src/plugins', '../../libraries/pointers/src', + '../../libraries/render-utils/src', '../../libraries/script-engine/src', '../../libraries/shared/src', '../../libraries/shared/src/shared', diff --git a/tools/oven/src/BakerCLI.cpp b/tools/oven/src/BakerCLI.cpp index 64462a4e37..669b821456 100644 --- a/tools/oven/src/BakerCLI.cpp +++ b/tools/oven/src/BakerCLI.cpp @@ -80,8 +80,9 @@ void BakerCLI::bakeFile(QUrl inputUrl, const QString& outputPath, const QString& { "roughness", image::TextureUsage::ROUGHNESS_TEXTURE }, { "gloss", image::TextureUsage::GLOSS_TEXTURE }, { "emissive", image::TextureUsage::EMISSIVE_TEXTURE }, - { "cube", image::TextureUsage::CUBE_TEXTURE }, - { "skybox", image::TextureUsage::CUBE_TEXTURE }, + { "cube", image::TextureUsage::SKY_TEXTURE }, + { "skybox", image::TextureUsage::SKY_TEXTURE }, + { "ambient", image::TextureUsage::AMBIENT_TEXTURE }, { "occlusion", image::TextureUsage::OCCLUSION_TEXTURE }, { "scattering", image::TextureUsage::SCATTERING_TEXTURE }, { "lightmap", image::TextureUsage::LIGHTMAP_TEXTURE }, diff --git a/tools/oven/src/DomainBaker.cpp b/tools/oven/src/DomainBaker.cpp index 8a4489fcac..7d6a14d5da 100644 --- a/tools/oven/src/DomainBaker.cpp +++ b/tools/oven/src/DomainBaker.cpp @@ -387,13 +387,13 @@ void DomainBaker::enumerateEntities() { if (entity.contains(AMBIENT_LIGHT_KEY)) { auto ambientLight = entity[AMBIENT_LIGHT_KEY].toObject(); if (ambientLight.contains(AMBIENT_URL_KEY)) { - addTextureBaker(AMBIENT_LIGHT_KEY + "." + AMBIENT_URL_KEY, ambientLight[AMBIENT_URL_KEY].toString(), image::TextureUsage::CUBE_TEXTURE, *it); + addTextureBaker(AMBIENT_LIGHT_KEY + "." + AMBIENT_URL_KEY, ambientLight[AMBIENT_URL_KEY].toString(), image::TextureUsage::AMBIENT_TEXTURE, *it); } } if (entity.contains(SKYBOX_KEY)) { auto skybox = entity[SKYBOX_KEY].toObject(); if (skybox.contains(SKYBOX_URL_KEY)) { - addTextureBaker(SKYBOX_KEY + "." + SKYBOX_URL_KEY, skybox[SKYBOX_URL_KEY].toString(), image::TextureUsage::CUBE_TEXTURE, *it); + addTextureBaker(SKYBOX_KEY + "." + SKYBOX_URL_KEY, skybox[SKYBOX_URL_KEY].toString(), image::TextureUsage::SKY_TEXTURE, *it); } } @@ -412,9 +412,13 @@ void DomainBaker::enumerateEntities() { if (entity.contains(MATERIAL_URL_KEY)) { addMaterialBaker(MATERIAL_URL_KEY, entity[MATERIAL_URL_KEY].toString(), true, *it); } + // FIXME: Disabled for now because relative texture URLs are not supported for embedded materials in material entities + // We need to make texture URLs absolute in this particular case only, keeping in mind that FSTBaker also uses embedded materials + /* if (entity.contains(MATERIAL_DATA_KEY)) { addMaterialBaker(MATERIAL_DATA_KEY, entity[MATERIAL_DATA_KEY].toString(), false, *it); } + */ } } diff --git a/tools/oven/src/ui/SkyboxBakeWidget.cpp b/tools/oven/src/ui/SkyboxBakeWidget.cpp index 71ae0cbab0..6c6e0340ac 100644 --- a/tools/oven/src/ui/SkyboxBakeWidget.cpp +++ b/tools/oven/src/ui/SkyboxBakeWidget.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -61,6 +62,15 @@ void SkyboxBakeWidget::setupUI() { // start a new row for next component ++rowIndex; + // setup a section to enable Ambient map baking + _ambientMapBox = new QCheckBox("Bake ambient map(s)"); + _ambientMapBox->setChecked(false); + + gridLayout->addWidget(_ambientMapBox, rowIndex, 1); + + // start a new row for next component + ++rowIndex; + // setup a section to choose the output directory QLabel* outputDirectoryLabel = new QLabel("Output Directory"); @@ -176,51 +186,67 @@ void SkyboxBakeWidget::bakeButtonClicked() { // if the URL doesn't have a scheme, assume it is a local file if (skyboxToBakeURL.scheme() != "http" && skyboxToBakeURL.scheme() != "https" && skyboxToBakeURL.scheme() != "ftp") { - skyboxToBakeURL.setScheme("file"); + skyboxToBakeURL = QUrl::fromLocalFile(fileURLString); } // everything seems to be in place, kick off a bake for this skybox now - auto baker = std::unique_ptr { - new TextureBaker(skyboxToBakeURL, image::TextureUsage::CUBE_TEXTURE, outputDirectory.absolutePath()) - }; + addBaker(new TextureBaker(skyboxToBakeURL, image::TextureUsage::SKY_TEXTURE, outputDirectory.absolutePath()), + outputDirectory); - // move the baker to a worker thread - baker->moveToThread(Oven::instance().getNextWorkerThread()); + if (_ambientMapBox->isChecked()) { + QString ambientMapBaseFilename; + QString urlPath = skyboxToBakeURL.path(); + auto urlParts = urlPath.split('.'); - // invoke the bake method on the baker thread - QMetaObject::invokeMethod(baker.get(), "bake"); + urlParts.front() += "-ambient"; + ambientMapBaseFilename = QUrl(urlParts.front()).fileName(); - // make sure we hear about the results of this baker when it is done - connect(baker.get(), &TextureBaker::finished, this, &SkyboxBakeWidget::handleFinishedBaker); - - // add a pending row to the results window to show that this bake is in process - auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow(); - auto resultsRow = resultsWindow->addPendingResultRow(skyboxToBakeURL.fileName(), outputDirectory); - - // keep a unique_ptr to this baker - // and remember the row that represents it in the results table - _bakers.emplace_back(std::move(baker), resultsRow); + // we need to bake the corresponding ambient map too + addBaker(new TextureBaker(skyboxToBakeURL, image::TextureUsage::AMBIENT_TEXTURE, outputDirectory.absolutePath(), QString(), ambientMapBaseFilename), + outputDirectory); + } } } +void SkyboxBakeWidget::addBaker(TextureBaker* baker, const QDir& outputDirectory) { + auto textureBaker = std::unique_ptr{ baker }; + + // move the textureBaker to a worker thread + textureBaker->moveToThread(Oven::instance().getNextWorkerThread()); + + // make sure we hear about the results of this textureBaker when it is done + connect(textureBaker.get(), &TextureBaker::finished, this, &SkyboxBakeWidget::handleFinishedBaker); + + // invoke the bake method on the textureBaker thread + QMetaObject::invokeMethod(textureBaker.get(), "bake"); + + // add a pending row to the results window to show that this bake is in process + auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow(); + auto resultsRow = resultsWindow->addPendingResultRow(baker->getBaseFilename(), outputDirectory); + + // keep a unique_ptr to this textureBaker + // and remember the row that represents it in the results table + _bakers.emplace_back(std::move(textureBaker), resultsRow); +} + void SkyboxBakeWidget::handleFinishedBaker() { - if (auto baker = qobject_cast(sender())) { + if (auto textureBaker = qobject_cast(sender())) { // add the results of this bake to the results window - auto it = std::find_if(_bakers.begin(), _bakers.end(), [baker](const BakerRowPair& value) { - return value.first.get() == baker; + auto it = std::find_if(_bakers.begin(), _bakers.end(), [textureBaker](const BakerRowPair& value) { + return value.first.get() == textureBaker; }); if (it != _bakers.end()) { auto resultRow = it->second; auto resultsWindow = OvenGUIApplication::instance()->getMainWindow()->showResultsWindow(); - if (baker->hasErrors()) { - resultsWindow->changeStatusForRow(resultRow, baker->getErrors().join("\n")); + if (textureBaker->hasErrors()) { + resultsWindow->changeStatusForRow(resultRow, textureBaker->getErrors().join("\n")); } else { resultsWindow->changeStatusForRow(resultRow, "Success"); } - // drop our strong pointer to the baker now that we are done with it + // drop our strong pointer to the textureBaker now that we are done with it _bakers.erase(it); } } diff --git a/tools/oven/src/ui/SkyboxBakeWidget.h b/tools/oven/src/ui/SkyboxBakeWidget.h index f00ab07f33..f560964649 100644 --- a/tools/oven/src/ui/SkyboxBakeWidget.h +++ b/tools/oven/src/ui/SkyboxBakeWidget.h @@ -21,6 +21,7 @@ #include "BakeWidget.h" class QLineEdit; +class QCheckBox; class SkyboxBakeWidget : public BakeWidget { Q_OBJECT @@ -42,9 +43,12 @@ private: QLineEdit* _selectionLineEdit; QLineEdit* _outputDirLineEdit; + QCheckBox* _ambientMapBox; Setting::Handle _exportDirectory; Setting::Handle _selectionStartDirectory; + + void addBaker(TextureBaker* baker, const QDir& outputDir); }; #endif // hifi_SkyboxBakeWidget_h