mirror of
https://github.com/lubosz/overte.git
synced 2025-04-23 20:34:07 +02:00
Merge with master
This commit is contained in:
commit
d84e2d31ea
34 changed files with 631 additions and 702 deletions
230
examples/edit.js
230
examples/edit.js
|
@ -291,22 +291,18 @@ var toolBar = (function () {
|
|||
var RESIZE_TIMEOUT = 120000; // 2 minutes
|
||||
var RESIZE_MAX_CHECKS = RESIZE_TIMEOUT / RESIZE_INTERVAL;
|
||||
function addModel(url) {
|
||||
var position;
|
||||
var entityID = createNewEntity({
|
||||
type: "Model",
|
||||
dimensions: DEFAULT_DIMENSIONS,
|
||||
modelURL: url
|
||||
}, false);
|
||||
|
||||
position = Vec3.sum(MyAvatar.position, Vec3.multiply(Quat.getFront(MyAvatar.orientation), SPAWN_DISTANCE));
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
var entityId = Entities.addEntity({
|
||||
type: "Model",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_DIMENSIONS), DEFAULT_DIMENSIONS),
|
||||
dimensions: DEFAULT_DIMENSIONS,
|
||||
modelURL: url
|
||||
});
|
||||
if (entityID) {
|
||||
print("Model added: " + url);
|
||||
|
||||
var checkCount = 0;
|
||||
function resize() {
|
||||
var entityProperties = Entities.getEntityProperties(entityId);
|
||||
var entityProperties = Entities.getEntityProperties(entityID);
|
||||
var naturalDimensions = entityProperties.naturalDimensions;
|
||||
|
||||
checkCount++;
|
||||
|
@ -318,21 +314,41 @@ var toolBar = (function () {
|
|||
print("Resize failed: timed out waiting for model (" + url + ") to load");
|
||||
}
|
||||
} else {
|
||||
Entities.editEntity(entityId, { dimensions: naturalDimensions });
|
||||
Entities.editEntity(entityID, { dimensions: naturalDimensions });
|
||||
|
||||
// Reset selection so that the selection overlays will be updated
|
||||
selectionManager.setSelections([entityId]);
|
||||
selectionManager.setSelections([entityID]);
|
||||
}
|
||||
}
|
||||
|
||||
selectionManager.setSelections([entityId]);
|
||||
selectionManager.setSelections([entityID]);
|
||||
|
||||
Script.setTimeout(resize, RESIZE_INTERVAL);
|
||||
} else {
|
||||
Window.alert("Can't add model: Model would be out of bounds.");
|
||||
}
|
||||
}
|
||||
|
||||
function createNewEntity(properties, dragOnCreate) {
|
||||
// Default to true if not passed in
|
||||
dragOnCreate = dragOnCreate == undefined ? true : dragOnCreate;
|
||||
|
||||
var dimensions = properties.dimensions ? properties.dimensions : DEFAULT_DIMENSIONS;
|
||||
var position = getPositionToCreateEntity();
|
||||
var entityID = null;
|
||||
if (position != null) {
|
||||
position = grid.snapToSurface(grid.snapToGrid(position, false, dimensions), dimensions),
|
||||
properties.position = position;
|
||||
|
||||
entityID = Entities.addEntity(properties);
|
||||
if (dragOnCreate) {
|
||||
placingEntityID = entityID;
|
||||
}
|
||||
} else {
|
||||
Window.alert("Can't create " + properties.type + ": " + properties.type + " would be out of bounds.");
|
||||
}
|
||||
|
||||
return entityID;
|
||||
}
|
||||
|
||||
var newModelButtonDown = false;
|
||||
var browseMarketplaceButtonDown = false;
|
||||
that.mousePressEvent = function (event) {
|
||||
|
@ -363,127 +379,82 @@ var toolBar = (function () {
|
|||
}
|
||||
|
||||
if (newCubeButton === toolBar.clicked(clickedOverlay)) {
|
||||
var position = getPositionToCreateEntity();
|
||||
createNewEntity({
|
||||
type: "Box",
|
||||
dimensions: DEFAULT_DIMENSIONS,
|
||||
color: { red: 255, green: 0, blue: 0 }
|
||||
});
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
placingEntityID = Entities.addEntity({
|
||||
type: "Box",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_DIMENSIONS), DEFAULT_DIMENSIONS),
|
||||
dimensions: DEFAULT_DIMENSIONS,
|
||||
color: { red: 255, green: 0, blue: 0 }
|
||||
|
||||
});
|
||||
} else {
|
||||
Window.alert("Can't create box: Box would be out of bounds.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (newSphereButton === toolBar.clicked(clickedOverlay)) {
|
||||
var position = getPositionToCreateEntity();
|
||||
createNewEntity({
|
||||
type: "Sphere",
|
||||
dimensions: DEFAULT_DIMENSIONS,
|
||||
color: { red: 255, green: 0, blue: 0 }
|
||||
});
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
placingEntityID = Entities.addEntity({
|
||||
type: "Sphere",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_DIMENSIONS), DEFAULT_DIMENSIONS),
|
||||
dimensions: DEFAULT_DIMENSIONS,
|
||||
color: { red: 255, green: 0, blue: 0 }
|
||||
});
|
||||
} else {
|
||||
Window.alert("Can't create sphere: Sphere would be out of bounds.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (newLightButton === toolBar.clicked(clickedOverlay)) {
|
||||
var position = getPositionToCreateEntity();
|
||||
createNewEntity({
|
||||
type: "Light",
|
||||
dimensions: DEFAULT_LIGHT_DIMENSIONS,
|
||||
isSpotlight: false,
|
||||
color: { red: 150, green: 150, blue: 150 },
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
placingEntityID = Entities.addEntity({
|
||||
type: "Light",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_LIGHT_DIMENSIONS), DEFAULT_LIGHT_DIMENSIONS),
|
||||
dimensions: DEFAULT_LIGHT_DIMENSIONS,
|
||||
isSpotlight: false,
|
||||
color: { red: 150, green: 150, blue: 150 },
|
||||
constantAttenuation: 1,
|
||||
linearAttenuation: 0,
|
||||
quadraticAttenuation: 0,
|
||||
exponent: 0,
|
||||
cutoff: 180, // in degrees
|
||||
});
|
||||
|
||||
constantAttenuation: 1,
|
||||
linearAttenuation: 0,
|
||||
quadraticAttenuation: 0,
|
||||
exponent: 0,
|
||||
cutoff: 180, // in degrees
|
||||
});
|
||||
} else {
|
||||
Window.alert("Can't create Light: Light would be out of bounds.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
if (newTextButton === toolBar.clicked(clickedOverlay)) {
|
||||
var position = getPositionToCreateEntity();
|
||||
createNewEntity({
|
||||
type: "Text",
|
||||
dimensions: { x: 0.65, y: 0.3, z: 0.01 },
|
||||
backgroundColor: { red: 64, green: 64, blue: 64 },
|
||||
textColor: { red: 255, green: 255, blue: 255 },
|
||||
text: "some text",
|
||||
lineHeight: 0.06
|
||||
});
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
placingEntityID = Entities.addEntity({
|
||||
type: "Text",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_DIMENSIONS), DEFAULT_DIMENSIONS),
|
||||
dimensions: { x: 0.65, y: 0.3, z: 0.01 },
|
||||
backgroundColor: { red: 64, green: 64, blue: 64 },
|
||||
textColor: { red: 255, green: 255, blue: 255 },
|
||||
text: "some text",
|
||||
lineHeight: 0.06
|
||||
});
|
||||
} else {
|
||||
Window.alert("Can't create box: Text would be out of bounds.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (newWebButton === toolBar.clicked(clickedOverlay)) {
|
||||
var position = getPositionToCreateEntity();
|
||||
createNewEntity({
|
||||
type: "Web",
|
||||
dimensions: { x: 1.6, y: 0.9, z: 0.01 },
|
||||
sourceUrl: "https://highfidelity.com/",
|
||||
});
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
placingEntityID = Entities.addEntity({
|
||||
type: "Web",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_DIMENSIONS), DEFAULT_DIMENSIONS),
|
||||
dimensions: { x: 1.6, y: 0.9, z: 0.01 },
|
||||
sourceUrl: "https://highfidelity.com/",
|
||||
});
|
||||
} else {
|
||||
Window.alert("Can't create Web Entity: would be out of bounds.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (newZoneButton === toolBar.clicked(clickedOverlay)) {
|
||||
var position = getPositionToCreateEntity();
|
||||
createNewEntity({
|
||||
type: "Zone",
|
||||
dimensions: { x: 10, y: 10, z: 10 },
|
||||
});
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
placingEntityID = Entities.addEntity({
|
||||
type: "Zone",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_DIMENSIONS), DEFAULT_DIMENSIONS),
|
||||
dimensions: { x: 10, y: 10, z: 10 },
|
||||
});
|
||||
} else {
|
||||
Window.alert("Can't create box: Text would be out of bounds.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (newPolyVoxButton === toolBar.clicked(clickedOverlay)) {
|
||||
var position = getPositionToCreateEntity();
|
||||
createNewEntity({
|
||||
type: "PolyVox",
|
||||
dimensions: { x: 10, y: 10, z: 10 },
|
||||
voxelVolumeSize: {x:16, y:16, z:16},
|
||||
voxelSurfaceStyle: 1
|
||||
});
|
||||
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
placingEntityID = Entities.addEntity({
|
||||
type: "PolyVox",
|
||||
position: grid.snapToSurface(grid.snapToGrid(position, false, DEFAULT_DIMENSIONS),
|
||||
DEFAULT_DIMENSIONS),
|
||||
dimensions: { x: 10, y: 10, z: 10 },
|
||||
voxelVolumeSize: {x:16, y:16, z:16},
|
||||
voxelSurfaceStyle: 1
|
||||
});
|
||||
} else {
|
||||
Window.alert("Can't create PolyVox: would be out of bounds.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -666,7 +637,7 @@ function handleIdleMouse() {
|
|||
idleMouseTimerId = null;
|
||||
if (isActive) {
|
||||
highlightEntityUnderCursor(lastMousePosition, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function highlightEntityUnderCursor(position, accurateRay) {
|
||||
|
@ -837,15 +808,15 @@ function setupModelMenus() {
|
|||
}
|
||||
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Entity List...", shortcutKey: "CTRL+META+L", afterItem: "Models" });
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Allow Selecting of Large Models", shortcutKey: "CTRL+META+L",
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Allow Selecting of Large Models", shortcutKey: "CTRL+META+L",
|
||||
afterItem: "Entity List...", isCheckable: true, isChecked: true });
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Allow Selecting of Small Models", shortcutKey: "CTRL+META+S",
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Allow Selecting of Small Models", shortcutKey: "CTRL+META+S",
|
||||
afterItem: "Allow Selecting of Large Models", isCheckable: true, isChecked: true });
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Allow Selecting of Lights", shortcutKey: "CTRL+SHIFT+META+L",
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Allow Selecting of Lights", shortcutKey: "CTRL+SHIFT+META+L",
|
||||
afterItem: "Allow Selecting of Small Models", isCheckable: true });
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Select All Entities In Box", shortcutKey: "CTRL+SHIFT+META+A",
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Select All Entities In Box", shortcutKey: "CTRL+SHIFT+META+A",
|
||||
afterItem: "Allow Selecting of Lights" });
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Select All Entities Touching Box", shortcutKey: "CTRL+SHIFT+META+T",
|
||||
Menu.addMenuItem({ menuName: "Edit", menuItemName: "Select All Entities Touching Box", shortcutKey: "CTRL+SHIFT+META+T",
|
||||
afterItem: "Select All Entities In Box" });
|
||||
|
||||
Menu.addMenuItem({ menuName: "File", menuItemName: "Models", isSeparator: true, beforeItem: "Settings" });
|
||||
|
@ -962,7 +933,7 @@ function selectAllEtitiesInCurrentSelectionBox(keepIfTouching) {
|
|||
entities.splice(i, 1);
|
||||
--i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
selectionManager.setSelections(entities);
|
||||
}
|
||||
|
@ -1038,17 +1009,29 @@ function handeMenuEvent(menuItem) {
|
|||
tooltip.show(false);
|
||||
}
|
||||
|
||||
// This function tries to find a reasonable position to place a new entity based on the camera
|
||||
// position. If a reasonable position within the world bounds can't be found, `null` will
|
||||
// be returned. The returned position will also take into account grid snapping settings.
|
||||
function getPositionToCreateEntity() {
|
||||
var distance = cameraManager.enabled ? cameraManager.zoomDistance : DEFAULT_ENTITY_DRAG_DROP_DISTANCE;
|
||||
var direction = Quat.getFront(Camera.orientation);
|
||||
var offset = Vec3.multiply(distance, direction);
|
||||
var position = Vec3.sum(Camera.position, offset);
|
||||
var placementPosition = Vec3.sum(Camera.position, offset);
|
||||
|
||||
position.x = Math.max(0, position.x);
|
||||
position.y = Math.max(0, position.y);
|
||||
position.z = Math.max(0, position.z);
|
||||
var cameraPosition = Camera.position;
|
||||
|
||||
return position;
|
||||
var cameraOutOfBounds = cameraPosition.x < 0 || cameraPosition.y < 0 || cameraPosition.z < 0;
|
||||
var placementOutOfBounds = placementPosition.x < 0 || placementPosition.y < 0 || placementPosition.z < 0;
|
||||
|
||||
if (cameraOutOfBounds && placementOutOfBounds) {
|
||||
return null;
|
||||
}
|
||||
|
||||
placementPosition.x = Math.max(0, placementPosition.x);
|
||||
placementPosition.y = Math.max(0, placementPosition.y);
|
||||
placementPosition.z = Math.max(0, placementPosition.z);
|
||||
|
||||
return placementPosition;
|
||||
}
|
||||
|
||||
function importSVO(importURL) {
|
||||
|
@ -1064,17 +1047,18 @@ function importSVO(importURL) {
|
|||
|
||||
if (success) {
|
||||
var VERY_LARGE = 10000;
|
||||
var position = { x: 0, y: 0, z: 0};
|
||||
var position = { x: 0, y: 0, z: 0 };
|
||||
if (Clipboard.getClipboardContentsLargestDimension() < VERY_LARGE) {
|
||||
position = getPositionToCreateEntity();
|
||||
}
|
||||
if (position.x > 0 && position.y > 0 && position.z > 0) {
|
||||
if (position != null) {
|
||||
var pastedEntityIDs = Clipboard.pasteEntities(position);
|
||||
|
||||
if (isActive) {
|
||||
selectionManager.setSelections(pastedEntityIDs);
|
||||
}
|
||||
Window.raiseMainWindow();
|
||||
|
||||
Window.raiseMainWindow();
|
||||
} else {
|
||||
Window.alert("Can't import objects: objects would be out of bounds.");
|
||||
}
|
||||
|
@ -1264,7 +1248,7 @@ PropertiesTool = function(opts) {
|
|||
if (data.properties.keyLightDirection !== undefined) {
|
||||
data.properties.keyLightDirection = Vec3.fromPolar(
|
||||
data.properties.keyLightDirection.x * DEGREES_TO_RADIANS, data.properties.keyLightDirection.y * DEGREES_TO_RADIANS);
|
||||
}
|
||||
}
|
||||
Entities.editEntity(selectionManager.selections[0], data.properties);
|
||||
if (data.properties.name != undefined) {
|
||||
entityListTool.sendUpdate();
|
||||
|
@ -1360,8 +1344,8 @@ PropertiesTool = function(opts) {
|
|||
var properties = selectionManager.savedProperties[selectionManager.selections[i]];
|
||||
if (properties.type == "Zone") {
|
||||
var centerOfZone = properties.boundingBox.center;
|
||||
var atmosphereCenter = { x: centerOfZone.x,
|
||||
y: centerOfZone.y - properties.atmosphere.innerRadius,
|
||||
var atmosphereCenter = { x: centerOfZone.x,
|
||||
y: centerOfZone.y - properties.atmosphere.innerRadius,
|
||||
z: centerOfZone.z };
|
||||
|
||||
Entities.editEntity(selectionManager.selections[i], {
|
||||
|
|
|
@ -1000,7 +1000,8 @@ void Application::paintGL() {
|
|||
|
||||
_compositor.displayOverlayTexture(&renderArgs);
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (!OculusManager::isConnected() || OculusManager::allowSwap()) {
|
||||
PROFILE_RANGE(__FUNCTION__ "/bufferSwap");
|
||||
_glWidget->swapBuffers();
|
||||
|
@ -1012,6 +1013,13 @@ void Application::paintGL() {
|
|||
_frameCount++;
|
||||
_numFramesSinceLastResize++;
|
||||
Stats::getInstance()->setRenderDetails(renderArgs._details);
|
||||
|
||||
|
||||
// Reset the gpu::Context Stages
|
||||
// Back to the default framebuffer;
|
||||
gpu::Batch batch;
|
||||
batch.resetStages();
|
||||
renderArgs._context->render(batch);
|
||||
}
|
||||
|
||||
void Application::runTests() {
|
||||
|
|
|
@ -249,8 +249,6 @@ Menu::Menu() {
|
|||
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::BlueSpeechSphere, 0, true);
|
||||
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::EnableCharacterController, 0, true,
|
||||
avatar, SLOT(updateMotionBehavior()));
|
||||
addCheckableActionToQMenuAndActionHash(avatarMenu, MenuOption::ShiftHipsForIdleAnimations, 0, false,
|
||||
avatar, SLOT(updateMotionBehavior()));
|
||||
|
||||
MenuWrapper* viewMenu = addMenu("View");
|
||||
|
||||
|
|
|
@ -273,7 +273,6 @@ namespace MenuOption {
|
|||
const QString SimpleShadows = "Simple";
|
||||
const QString SixenseEnabled = "Enable Hydra Support";
|
||||
const QString SixenseMouseInput = "Enable Sixense Mouse Input";
|
||||
const QString ShiftHipsForIdleAnimations = "Shift hips for idle animations";
|
||||
const QString Stars = "Stars";
|
||||
const QString Stats = "Stats";
|
||||
const QString StopAllScripts = "Stop All Scripts";
|
||||
|
|
|
@ -106,7 +106,7 @@ bool ModelPackager::loadModel() {
|
|||
}
|
||||
qCDebug(interfaceapp) << "Reading FBX file : " << _fbxInfo.filePath();
|
||||
QByteArray fbxContents = fbx.readAll();
|
||||
_geometry = readFBX(fbxContents, QVariantHash());
|
||||
_geometry = readFBX(fbxContents, QVariantHash(), _fbxInfo.filePath());
|
||||
|
||||
// make sure we have some basic mappings
|
||||
populateBasicMapping(_mapping, _fbxInfo.filePath(), _geometry);
|
||||
|
|
|
@ -98,7 +98,6 @@ MyAvatar::MyAvatar() :
|
|||
_lookAtTargetAvatar(),
|
||||
_shouldRender(true),
|
||||
_billboardValid(false),
|
||||
_feetTouchFloor(true),
|
||||
_eyeContactTarget(LEFT_EYE),
|
||||
_realWorldFieldOfView("realWorldFieldOfView",
|
||||
DEFAULT_REAL_WORLD_FIELD_OF_VIEW_DEGREES),
|
||||
|
@ -166,9 +165,6 @@ void MyAvatar::update(float deltaTime) {
|
|||
head->setAudioAverageLoudness(audio->getAudioAverageInputLoudness());
|
||||
|
||||
simulate(deltaTime);
|
||||
if (_feetTouchFloor) {
|
||||
_skeletonModel.updateStandingFoot();
|
||||
}
|
||||
}
|
||||
|
||||
void MyAvatar::simulate(float deltaTime) {
|
||||
|
@ -1140,11 +1136,7 @@ glm::vec3 MyAvatar::getSkeletonPosition() const {
|
|||
// The avatar is rotated PI about the yAxis, so we have to correct for it
|
||||
// to get the skeleton offset contribution in the world-frame.
|
||||
const glm::quat FLIP = glm::angleAxis(PI, glm::vec3(0.0f, 1.0f, 0.0f));
|
||||
glm::vec3 skeletonOffset = _skeletonOffset;
|
||||
if (_feetTouchFloor) {
|
||||
skeletonOffset += _skeletonModel.getStandingOffset();
|
||||
}
|
||||
return _position + getOrientation() * FLIP * skeletonOffset;
|
||||
return _position + getOrientation() * FLIP * _skeletonOffset;
|
||||
}
|
||||
return Avatar::getPosition();
|
||||
}
|
||||
|
@ -1622,7 +1614,6 @@ void MyAvatar::updateMotionBehavior() {
|
|||
_motionBehaviors &= ~AVATAR_MOTION_SCRIPTED_MOTOR_ENABLED;
|
||||
}
|
||||
_characterController.setEnabled(menu->isOptionChecked(MenuOption::EnableCharacterController));
|
||||
_feetTouchFloor = menu->isOptionChecked(MenuOption::ShiftHipsForIdleAnimations);
|
||||
}
|
||||
|
||||
//Renders sixense laser pointers for UI selection with controllers
|
||||
|
|
|
@ -258,7 +258,6 @@ private:
|
|||
|
||||
QList<AnimationHandlePointer> _animationHandles;
|
||||
|
||||
bool _feetTouchFloor;
|
||||
eyeContactTarget _eyeContactTarget;
|
||||
|
||||
RecorderPointer _recorder;
|
||||
|
|
|
@ -24,12 +24,6 @@
|
|||
#include "Util.h"
|
||||
#include "InterfaceLogging.h"
|
||||
|
||||
enum StandingFootState {
|
||||
LEFT_FOOT,
|
||||
RIGHT_FOOT,
|
||||
NO_FOOT
|
||||
};
|
||||
|
||||
SkeletonModel::SkeletonModel(Avatar* owningAvatar, QObject* parent) :
|
||||
Model(parent),
|
||||
_triangleFanID(DependencyManager::get<GeometryCache>()->allocateID()),
|
||||
|
@ -37,9 +31,6 @@ SkeletonModel::SkeletonModel(Avatar* owningAvatar, QObject* parent) :
|
|||
_boundingShape(),
|
||||
_boundingShapeLocalOffset(0.0f),
|
||||
_defaultEyeModelPosition(glm::vec3(0.0f, 0.0f, 0.0f)),
|
||||
_standingFoot(NO_FOOT),
|
||||
_standingOffset(0.0f),
|
||||
_clampedFootPosition(0.0f),
|
||||
_headClipDistance(DEFAULT_NEAR_CLIP),
|
||||
_isFirstPerson(false)
|
||||
{
|
||||
|
@ -573,65 +564,6 @@ glm::vec3 SkeletonModel::getDefaultEyeModelPosition() const {
|
|||
return _owningAvatar->getScale() * _defaultEyeModelPosition;
|
||||
}
|
||||
|
||||
/// \return offset of hips after foot animation
|
||||
void SkeletonModel::updateStandingFoot() {
|
||||
if (_geometry == NULL) {
|
||||
return;
|
||||
}
|
||||
glm::vec3 offset(0.0f);
|
||||
int leftFootIndex = _geometry->getFBXGeometry().leftToeJointIndex;
|
||||
int rightFootIndex = _geometry->getFBXGeometry().rightToeJointIndex;
|
||||
|
||||
if (leftFootIndex != -1 && rightFootIndex != -1) {
|
||||
glm::vec3 leftPosition, rightPosition;
|
||||
getJointPosition(leftFootIndex, leftPosition);
|
||||
getJointPosition(rightFootIndex, rightPosition);
|
||||
|
||||
int lowestFoot = (leftPosition.y < rightPosition.y) ? LEFT_FOOT : RIGHT_FOOT;
|
||||
const float MIN_STEP_HEIGHT_THRESHOLD = 0.05f;
|
||||
bool oneFoot = fabsf(leftPosition.y - rightPosition.y) > MIN_STEP_HEIGHT_THRESHOLD;
|
||||
int currentFoot = oneFoot ? lowestFoot : _standingFoot;
|
||||
|
||||
if (_standingFoot == NO_FOOT) {
|
||||
currentFoot = lowestFoot;
|
||||
}
|
||||
if (currentFoot != _standingFoot) {
|
||||
if (_standingFoot == NO_FOOT) {
|
||||
// pick the lowest foot
|
||||
glm::vec3 lowestPosition = (currentFoot == LEFT_FOOT) ? leftPosition : rightPosition;
|
||||
// we ignore zero length positions which can happen for a few frames until skeleton is fully loaded
|
||||
if (glm::length(lowestPosition) > 0.0f) {
|
||||
_standingFoot = currentFoot;
|
||||
_clampedFootPosition = lowestPosition;
|
||||
}
|
||||
} else {
|
||||
// swap feet
|
||||
_standingFoot = currentFoot;
|
||||
glm::vec3 nextPosition = leftPosition;
|
||||
glm::vec3 prevPosition = rightPosition;
|
||||
if (_standingFoot == RIGHT_FOOT) {
|
||||
nextPosition = rightPosition;
|
||||
prevPosition = leftPosition;
|
||||
}
|
||||
glm::vec3 oldOffset = _clampedFootPosition - prevPosition;
|
||||
_clampedFootPosition = oldOffset + nextPosition;
|
||||
offset = _clampedFootPosition - nextPosition;
|
||||
}
|
||||
} else {
|
||||
glm::vec3 nextPosition = (_standingFoot == LEFT_FOOT) ? leftPosition : rightPosition;
|
||||
offset = _clampedFootPosition - nextPosition;
|
||||
}
|
||||
|
||||
// clamp the offset to not exceed some max distance
|
||||
const float MAX_STEP_OFFSET = 1.0f;
|
||||
float stepDistance = glm::length(offset);
|
||||
if (stepDistance > MAX_STEP_OFFSET) {
|
||||
offset *= (MAX_STEP_OFFSET / stepDistance);
|
||||
}
|
||||
}
|
||||
_standingOffset = offset;
|
||||
}
|
||||
|
||||
float DENSITY_OF_WATER = 1000.0f; // kg/m^3
|
||||
float MIN_JOINT_MASS = 1.0f;
|
||||
float VERY_BIG_MASS = 1.0e6f;
|
||||
|
|
|
@ -96,10 +96,6 @@ public:
|
|||
/// \return whether or not the head was found.
|
||||
glm::vec3 getDefaultEyeModelPosition() const;
|
||||
|
||||
/// skeleton offset caused by moving feet
|
||||
void updateStandingFoot();
|
||||
const glm::vec3& getStandingOffset() const { return _standingOffset; }
|
||||
|
||||
void computeBoundingShape(const FBXGeometry& geometry);
|
||||
void renderBoundingCollisionShapes(gpu::Batch& batch, float alpha);
|
||||
float getBoundingShapeRadius() const { return _boundingShape.getRadius(); }
|
||||
|
@ -169,9 +165,6 @@ private:
|
|||
glm::vec3 _boundingShapeLocalOffset;
|
||||
|
||||
glm::vec3 _defaultEyeModelPosition;
|
||||
int _standingFoot;
|
||||
glm::vec3 _standingOffset;
|
||||
glm::vec3 _clampedFootPosition;
|
||||
|
||||
float _headClipDistance; // Near clip distance to use if no separate head model
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ void ApplicationOverlay::renderQmlUi(RenderArgs* renderArgs) {
|
|||
batch.setProjectionTransform(mat4());
|
||||
batch.setModelTransform(Transform());
|
||||
batch.setViewTransform(Transform());
|
||||
batch._glBindTexture(GL_TEXTURE_2D, _uiTexture);
|
||||
batch._glActiveBindTexture(GL_TEXTURE0, GL_TEXTURE_2D, _uiTexture);
|
||||
|
||||
geometryCache->renderUnitQuad(batch, glm::vec4(1));
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ void AnimationReader::run() {
|
|||
QSharedPointer<Resource> animation = _animation.toStrongRef();
|
||||
if (!animation.isNull()) {
|
||||
QMetaObject::invokeMethod(animation.data(), "setGeometry",
|
||||
Q_ARG(const FBXGeometry&, readFBX(_reply->readAll(), QVariantHash())));
|
||||
Q_ARG(const FBXGeometry&, readFBX(_reply->readAll(), QVariantHash(), _reply->property("url").toString())));
|
||||
}
|
||||
_reply->deleteLater();
|
||||
}
|
||||
|
|
|
@ -177,8 +177,7 @@ void RenderableWebEntityItem::render(RenderArgs* args) {
|
|||
batch.setModelTransform(getTransformToCenter());
|
||||
bool textured = false, culled = false, emissive = false;
|
||||
if (_texture) {
|
||||
batch._glActiveTexture(GL_TEXTURE0);
|
||||
batch._glBindTexture(GL_TEXTURE_2D, _texture);
|
||||
batch._glActiveBindTexture(GL_TEXTURE0, GL_TEXTURE_2D, _texture);
|
||||
textured = emissive = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <QTextStream>
|
||||
#include <QtDebug>
|
||||
#include <QtEndian>
|
||||
#include <QFileInfo>
|
||||
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
#include <glm/gtx/quaternion.hpp>
|
||||
|
@ -1454,9 +1455,21 @@ void buildModelMesh(ExtractedMesh& extracted) {
|
|||
}
|
||||
#endif // USE_MODEL_MESH
|
||||
|
||||
QByteArray fileOnUrl(const QByteArray& filenameString, const QString& url) {
|
||||
QString path = QFileInfo(url).path();
|
||||
QByteArray filename = filenameString;
|
||||
QFileInfo checkFile(path + "/" + filename.replace('\\', '/'));
|
||||
//check if the file exists at the RelativeFileName
|
||||
if (checkFile.exists() && checkFile.isFile()) {
|
||||
filename = filename.replace('\\', '/');
|
||||
} else {
|
||||
// there is no texture at the fbx dir with the filename added. Assume it is in the fbx dir.
|
||||
filename = filename.mid(qMax(filename.lastIndexOf('\\'), filename.lastIndexOf('/')) + 1);
|
||||
}
|
||||
return filename;
|
||||
}
|
||||
|
||||
|
||||
FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping, bool loadLightmaps, float lightmapLevel) {
|
||||
FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping, const QString& url, bool loadLightmaps, float lightmapLevel) {
|
||||
QHash<QString, ExtractedMesh> meshes;
|
||||
QHash<QString, QString> modelIDsToNames;
|
||||
QHash<QString, int> meshIDsToMeshIndices;
|
||||
|
@ -1781,9 +1794,8 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
|
|||
TextureParam tex;
|
||||
foreach (const FBXNode& subobject, object.children) {
|
||||
if (subobject.name == "RelativeFilename") {
|
||||
// trim off any path information
|
||||
QByteArray filename = subobject.properties.at(0).toByteArray();
|
||||
filename = filename.mid(qMax(filename.lastIndexOf('\\'), filename.lastIndexOf('/')) + 1);
|
||||
filename = fileOnUrl(filename, url);
|
||||
textureFilenames.insert(getID(object.properties), filename);
|
||||
} else if (subobject.name == "TextureName") {
|
||||
// trim the name from the timestamp
|
||||
|
@ -1857,7 +1869,7 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
|
|||
foreach (const FBXNode& subobject, object.children) {
|
||||
if (subobject.name == "RelativeFilename") {
|
||||
filename = subobject.properties.at(0).toByteArray();
|
||||
filename = filename.mid(qMax(filename.lastIndexOf('\\'), filename.lastIndexOf('/')) + 1);
|
||||
filename = fileOnUrl(filename, url);
|
||||
|
||||
} else if (subobject.name == "Content" && !subobject.properties.isEmpty()) {
|
||||
content = subobject.properties.at(0).toByteArray();
|
||||
|
@ -2717,12 +2729,12 @@ FBXGeometry extractFBXGeometry(const FBXNode& node, const QVariantHash& mapping,
|
|||
return geometry;
|
||||
}
|
||||
|
||||
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, bool loadLightmaps, float lightmapLevel) {
|
||||
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, const QString& url, bool loadLightmaps, float lightmapLevel) {
|
||||
QBuffer buffer(const_cast<QByteArray*>(&model));
|
||||
buffer.open(QIODevice::ReadOnly);
|
||||
return readFBX(&buffer, mapping, loadLightmaps, lightmapLevel);
|
||||
return readFBX(&buffer, mapping, url, loadLightmaps, lightmapLevel);
|
||||
}
|
||||
|
||||
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, bool loadLightmaps, float lightmapLevel) {
|
||||
return extractFBXGeometry(parseFBX(device), mapping, loadLightmaps, lightmapLevel);
|
||||
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, const QString& url, bool loadLightmaps, float lightmapLevel) {
|
||||
return extractFBXGeometry(parseFBX(device), mapping, url, loadLightmaps, lightmapLevel);
|
||||
}
|
||||
|
|
|
@ -271,10 +271,10 @@ Q_DECLARE_METATYPE(FBXGeometry)
|
|||
|
||||
/// Reads FBX geometry from the supplied model and mapping data.
|
||||
/// \exception QString if an error occurs in parsing
|
||||
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, bool loadLightmaps = true, float lightmapLevel = 1.0f);
|
||||
FBXGeometry readFBX(const QByteArray& model, const QVariantHash& mapping, const QString& url = "", bool loadLightmaps = true, float lightmapLevel = 1.0f);
|
||||
|
||||
/// Reads FBX geometry from the supplied model and mapping data.
|
||||
/// \exception QString if an error occurs in parsing
|
||||
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, bool loadLightmaps = true, float lightmapLevel = 1.0f);
|
||||
FBXGeometry readFBX(QIODevice* device, const QVariantHash& mapping, const QString& url = "", bool loadLightmaps = true, float lightmapLevel = 1.0f);
|
||||
|
||||
#endif // hifi_FBXReader_h
|
||||
|
|
|
@ -306,7 +306,8 @@ bool OBJReader::parseOBJGroup(OBJTokenizer& tokenizer, const QVariantHash& mappi
|
|||
}
|
||||
QByteArray token = tokenizer.getDatum();
|
||||
//qCDebug(modelformat) << token;
|
||||
if (token == "g") {
|
||||
// we don't support separate objects in the same file, so treat "o" the same as "g".
|
||||
if (token == "g" || token == "o") {
|
||||
if (sawG) {
|
||||
// we've encountered the beginning of the next group.
|
||||
tokenizer.pushBackToken(OBJTokenizer::DATUM_TOKEN);
|
||||
|
|
|
@ -284,3 +284,7 @@ void Batch::getQuery(const QueryPointer& query) {
|
|||
_params.push_back(_queries.cache(query));
|
||||
}
|
||||
|
||||
void Batch::resetStages() {
|
||||
ADD_COMMAND(resetStages);
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ public:
|
|||
void setResourceTexture(uint32 slot, const TexturePointer& view);
|
||||
void setResourceTexture(uint32 slot, const TextureView& view); // not a command, just a shortcut from a TextureView
|
||||
|
||||
// Framebuffer Stage
|
||||
// Ouput Stage
|
||||
void setFramebuffer(const FramebufferPointer& framebuffer);
|
||||
|
||||
// Clear framebuffer layers
|
||||
|
@ -113,34 +113,17 @@ public:
|
|||
void endQuery(const QueryPointer& query);
|
||||
void getQuery(const QueryPointer& query);
|
||||
|
||||
// Reset the stage caches and states
|
||||
void resetStages();
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
// For now, instead of calling the raw gl Call, use the equivalent call on the batch so the call is beeing recorded
|
||||
// THe implementation of these functions is in GLBackend.cpp
|
||||
|
||||
void _glEnable(unsigned int cap);
|
||||
void _glDisable(unsigned int cap);
|
||||
void _glActiveBindTexture(unsigned int unit, unsigned int target, unsigned int texture);
|
||||
|
||||
void _glEnableClientState(unsigned int array);
|
||||
void _glDisableClientState(unsigned int array);
|
||||
|
||||
void _glCullFace(unsigned int mode);
|
||||
void _glAlphaFunc(unsigned int func, float ref);
|
||||
|
||||
void _glDepthFunc(unsigned int func);
|
||||
void _glDepthMask(unsigned char flag);
|
||||
void _glDepthRange(float zNear, float zFar);
|
||||
|
||||
void _glBindBuffer(unsigned int target, unsigned int buffer);
|
||||
|
||||
void _glBindTexture(unsigned int target, unsigned int texture);
|
||||
void _glActiveTexture(unsigned int texture);
|
||||
void _glTexParameteri(unsigned int target, unsigned int pname, int param);
|
||||
|
||||
void _glDrawBuffers(int n, const unsigned int* bufs);
|
||||
|
||||
void _glUseProgram(unsigned int program);
|
||||
void _glUniform1i(int location, int v0);
|
||||
void _glUniform1f(int location, float v0);
|
||||
void _glUniform2f(int location, float v0, float v1);
|
||||
|
@ -150,9 +133,6 @@ public:
|
|||
void _glUniform4iv(int location, int count, const int* value);
|
||||
void _glUniformMatrix4fv(int location, int count, unsigned char transpose, const float* value);
|
||||
|
||||
void _glEnableVertexAttribArray(int location);
|
||||
void _glDisableVertexAttribArray(int location);
|
||||
|
||||
void _glColor4f(float red, float green, float blue, float alpha);
|
||||
void _glLineWidth(float width);
|
||||
|
||||
|
@ -186,31 +166,13 @@ public:
|
|||
COMMAND_endQuery,
|
||||
COMMAND_getQuery,
|
||||
|
||||
COMMAND_resetStages,
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
COMMAND_glEnable,
|
||||
COMMAND_glDisable,
|
||||
COMMAND_glActiveBindTexture,
|
||||
|
||||
COMMAND_glEnableClientState,
|
||||
COMMAND_glDisableClientState,
|
||||
|
||||
COMMAND_glCullFace,
|
||||
COMMAND_glAlphaFunc,
|
||||
|
||||
COMMAND_glDepthFunc,
|
||||
COMMAND_glDepthMask,
|
||||
COMMAND_glDepthRange,
|
||||
|
||||
COMMAND_glBindBuffer,
|
||||
|
||||
COMMAND_glBindTexture,
|
||||
COMMAND_glActiveTexture,
|
||||
COMMAND_glTexParameteri,
|
||||
|
||||
COMMAND_glDrawBuffers,
|
||||
|
||||
COMMAND_glUseProgram,
|
||||
COMMAND_glUniform1i,
|
||||
COMMAND_glUniform1f,
|
||||
COMMAND_glUniform2f,
|
||||
|
@ -220,9 +182,6 @@ public:
|
|||
COMMAND_glUniform4iv,
|
||||
COMMAND_glUniformMatrix4fv,
|
||||
|
||||
COMMAND_glEnableVertexAttribArray,
|
||||
COMMAND_glDisableVertexAttribArray,
|
||||
|
||||
COMMAND_glColor4f,
|
||||
COMMAND_glLineWidth,
|
||||
|
||||
|
|
|
@ -45,3 +45,4 @@ void Context::syncCache() {
|
|||
void Context::downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) {
|
||||
_backend->downloadFramebuffer(srcFramebuffer, region, destImage);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,28 +46,10 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
|||
(&::gpu::GLBackend::do_endQuery),
|
||||
(&::gpu::GLBackend::do_getQuery),
|
||||
|
||||
(&::gpu::GLBackend::do_glEnable),
|
||||
(&::gpu::GLBackend::do_glDisable),
|
||||
(&::gpu::GLBackend::do_resetStages),
|
||||
|
||||
(&::gpu::GLBackend::do_glEnableClientState),
|
||||
(&::gpu::GLBackend::do_glDisableClientState),
|
||||
(&::gpu::GLBackend::do_glActiveBindTexture),
|
||||
|
||||
(&::gpu::GLBackend::do_glCullFace),
|
||||
(&::gpu::GLBackend::do_glAlphaFunc),
|
||||
|
||||
(&::gpu::GLBackend::do_glDepthFunc),
|
||||
(&::gpu::GLBackend::do_glDepthMask),
|
||||
(&::gpu::GLBackend::do_glDepthRange),
|
||||
|
||||
(&::gpu::GLBackend::do_glBindBuffer),
|
||||
|
||||
(&::gpu::GLBackend::do_glBindTexture),
|
||||
(&::gpu::GLBackend::do_glActiveTexture),
|
||||
(&::gpu::GLBackend::do_glTexParameteri),
|
||||
|
||||
(&::gpu::GLBackend::do_glDrawBuffers),
|
||||
|
||||
(&::gpu::GLBackend::do_glUseProgram),
|
||||
(&::gpu::GLBackend::do_glUniform1i),
|
||||
(&::gpu::GLBackend::do_glUniform1f),
|
||||
(&::gpu::GLBackend::do_glUniform2f),
|
||||
|
@ -77,9 +59,6 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
|||
(&::gpu::GLBackend::do_glUniform4iv),
|
||||
(&::gpu::GLBackend::do_glUniformMatrix4fv),
|
||||
|
||||
(&::gpu::GLBackend::do_glEnableVertexAttribArray),
|
||||
(&::gpu::GLBackend::do_glDisableVertexAttribArray),
|
||||
|
||||
(&::gpu::GLBackend::do_glColor4f),
|
||||
(&::gpu::GLBackend::do_glLineWidth),
|
||||
};
|
||||
|
@ -135,6 +114,8 @@ GLBackend::GLBackend() :
|
|||
}
|
||||
|
||||
GLBackend::~GLBackend() {
|
||||
resetStages();
|
||||
|
||||
killInput();
|
||||
killTransform();
|
||||
}
|
||||
|
@ -253,6 +234,22 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_resetStages(Batch& batch, uint32 paramOffset) {
|
||||
resetStages();
|
||||
}
|
||||
|
||||
void GLBackend::resetStages() {
|
||||
resetInputStage();
|
||||
resetPipelineStage();
|
||||
resetTransformStage();
|
||||
resetUniformStage();
|
||||
resetResourceStage();
|
||||
resetOutputStage();
|
||||
resetQueryStage();
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
|
@ -262,211 +259,24 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
|
|||
//#define DO_IT_NOW(call, offset) runLastCommand();
|
||||
#define DO_IT_NOW(call, offset)
|
||||
|
||||
|
||||
void Batch::_glEnable(GLenum cap) {
|
||||
ADD_COMMAND_GL(glEnable);
|
||||
|
||||
_params.push_back(cap);
|
||||
|
||||
DO_IT_NOW(_glEnable, 1);
|
||||
}
|
||||
void GLBackend::do_glEnable(Batch& batch, uint32 paramOffset) {
|
||||
glEnable(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glDisable(GLenum cap) {
|
||||
ADD_COMMAND_GL(glDisable);
|
||||
|
||||
_params.push_back(cap);
|
||||
|
||||
DO_IT_NOW(_glDisable, 1);
|
||||
}
|
||||
void GLBackend::do_glDisable(Batch& batch, uint32 paramOffset) {
|
||||
glDisable(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glEnableClientState(GLenum array) {
|
||||
ADD_COMMAND_GL(glEnableClientState);
|
||||
|
||||
_params.push_back(array);
|
||||
|
||||
DO_IT_NOW(_glEnableClientState, 1);
|
||||
}
|
||||
void GLBackend::do_glEnableClientState(Batch& batch, uint32 paramOffset) {
|
||||
glEnableClientState(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glDisableClientState(GLenum array) {
|
||||
ADD_COMMAND_GL(glDisableClientState);
|
||||
|
||||
_params.push_back(array);
|
||||
|
||||
DO_IT_NOW(_glDisableClientState, 1);
|
||||
}
|
||||
void GLBackend::do_glDisableClientState(Batch& batch, uint32 paramOffset) {
|
||||
glDisableClientState(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glCullFace(GLenum mode) {
|
||||
ADD_COMMAND_GL(glCullFace);
|
||||
|
||||
_params.push_back(mode);
|
||||
|
||||
DO_IT_NOW(_glCullFace, 1);
|
||||
}
|
||||
void GLBackend::do_glCullFace(Batch& batch, uint32 paramOffset) {
|
||||
glCullFace(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glAlphaFunc(GLenum func, GLclampf ref) {
|
||||
ADD_COMMAND_GL(glAlphaFunc);
|
||||
|
||||
_params.push_back(ref);
|
||||
_params.push_back(func);
|
||||
|
||||
DO_IT_NOW(_glAlphaFunc, 2);
|
||||
}
|
||||
void GLBackend::do_glAlphaFunc(Batch& batch, uint32 paramOffset) {
|
||||
glAlphaFunc(
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
batch._params[paramOffset + 0]._float);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glDepthFunc(GLenum func) {
|
||||
ADD_COMMAND_GL(glDepthFunc);
|
||||
|
||||
_params.push_back(func);
|
||||
|
||||
DO_IT_NOW(_glDepthFunc, 1);
|
||||
}
|
||||
void GLBackend::do_glDepthFunc(Batch& batch, uint32 paramOffset) {
|
||||
glDepthFunc(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glDepthMask(GLboolean flag) {
|
||||
ADD_COMMAND_GL(glDepthMask);
|
||||
|
||||
_params.push_back(flag);
|
||||
|
||||
DO_IT_NOW(_glDepthMask, 1);
|
||||
}
|
||||
void GLBackend::do_glDepthMask(Batch& batch, uint32 paramOffset) {
|
||||
glDepthMask(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glDepthRange(GLfloat zNear, GLfloat zFar) {
|
||||
ADD_COMMAND_GL(glDepthRange);
|
||||
|
||||
_params.push_back(zFar);
|
||||
_params.push_back(zNear);
|
||||
|
||||
DO_IT_NOW(_glDepthRange, 2);
|
||||
}
|
||||
void GLBackend::do_glDepthRange(Batch& batch, uint32 paramOffset) {
|
||||
glDepthRange(
|
||||
batch._params[paramOffset + 1]._float,
|
||||
batch._params[paramOffset + 0]._float);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glBindBuffer(GLenum target, GLuint buffer) {
|
||||
ADD_COMMAND_GL(glBindBuffer);
|
||||
|
||||
_params.push_back(buffer);
|
||||
_params.push_back(target);
|
||||
|
||||
DO_IT_NOW(_glBindBuffer, 2);
|
||||
}
|
||||
void GLBackend::do_glBindBuffer(Batch& batch, uint32 paramOffset) {
|
||||
glBindBuffer(
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
batch._params[paramOffset + 0]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glBindTexture(GLenum target, GLuint texture) {
|
||||
ADD_COMMAND_GL(glBindTexture);
|
||||
void Batch::_glActiveBindTexture(GLenum unit, GLenum target, GLuint texture) {
|
||||
ADD_COMMAND_GL(glActiveBindTexture);
|
||||
|
||||
_params.push_back(texture);
|
||||
_params.push_back(target);
|
||||
_params.push_back(unit);
|
||||
|
||||
DO_IT_NOW(_glBindTexture, 2);
|
||||
|
||||
DO_IT_NOW(_glActiveBindTexture, 3);
|
||||
}
|
||||
void GLBackend::do_glBindTexture(Batch& batch, uint32 paramOffset) {
|
||||
void GLBackend::do_glActiveBindTexture(Batch& batch, uint32 paramOffset) {
|
||||
glActiveTexture(batch._params[paramOffset + 2]._uint);
|
||||
glBindTexture(
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
batch._params[paramOffset + 0]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glActiveTexture(GLenum texture) {
|
||||
ADD_COMMAND_GL(glActiveTexture);
|
||||
|
||||
_params.push_back(texture);
|
||||
|
||||
DO_IT_NOW(_glActiveTexture, 1);
|
||||
}
|
||||
void GLBackend::do_glActiveTexture(Batch& batch, uint32 paramOffset) {
|
||||
glActiveTexture(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glTexParameteri(GLenum target, GLenum pname, GLint param) {
|
||||
ADD_COMMAND_GL(glTexParameteri);
|
||||
|
||||
_params.push_back(param);
|
||||
_params.push_back(pname);
|
||||
_params.push_back(target);
|
||||
|
||||
DO_IT_NOW(glTexParameteri, 3);
|
||||
}
|
||||
void GLBackend::do_glTexParameteri(Batch& batch, uint32 paramOffset) {
|
||||
glTexParameteri(batch._params[paramOffset + 2]._uint,
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
batch._params[paramOffset + 0]._int);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glDrawBuffers(GLsizei n, const GLenum* bufs) {
|
||||
ADD_COMMAND_GL(glDrawBuffers);
|
||||
|
||||
_params.push_back(cacheData(n * sizeof(GLenum), bufs));
|
||||
_params.push_back(n);
|
||||
|
||||
DO_IT_NOW(_glDrawBuffers, 2);
|
||||
}
|
||||
void GLBackend::do_glDrawBuffers(Batch& batch, uint32 paramOffset) {
|
||||
glDrawBuffers(
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLenum*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUseProgram(GLuint program) {
|
||||
ADD_COMMAND_GL(glUseProgram);
|
||||
|
||||
_params.push_back(program);
|
||||
|
||||
DO_IT_NOW(_glUseProgram, 1);
|
||||
}
|
||||
void GLBackend::do_glUseProgram(Batch& batch, uint32 paramOffset) {
|
||||
|
||||
_pipeline._program = batch._params[paramOffset]._uint;
|
||||
// for this call we still want to execute the glUseProgram in the order of the glCOmmand to avoid any issue
|
||||
_pipeline._invalidProgram = false;
|
||||
glUseProgram(_pipeline._program);
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniform1i(GLint location, GLint v0) {
|
||||
if (location < 0) {
|
||||
return;
|
||||
|
@ -667,30 +477,6 @@ void GLBackend::do_glUniformMatrix4fv(Batch& batch, uint32 paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glEnableVertexAttribArray(GLint location) {
|
||||
ADD_COMMAND_GL(glEnableVertexAttribArray);
|
||||
|
||||
_params.push_back(location);
|
||||
|
||||
DO_IT_NOW(_glEnableVertexAttribArray, 1);
|
||||
}
|
||||
void GLBackend::do_glEnableVertexAttribArray(Batch& batch, uint32 paramOffset) {
|
||||
glEnableVertexAttribArray(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glDisableVertexAttribArray(GLint location) {
|
||||
ADD_COMMAND_GL(glDisableVertexAttribArray);
|
||||
|
||||
_params.push_back(location);
|
||||
|
||||
DO_IT_NOW(_glDisableVertexAttribArray, 1);
|
||||
}
|
||||
void GLBackend::do_glDisableVertexAttribArray(Batch& batch, uint32 paramOffset) {
|
||||
glDisableVertexAttribArray(batch._params[paramOffset]._uint);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glColor4f(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha) {
|
||||
ADD_COMMAND_GL(glColor4f);
|
||||
|
||||
|
|
|
@ -193,8 +193,17 @@ public:
|
|||
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
|
||||
static const int MAX_NUM_INPUT_BUFFERS = 16;
|
||||
|
||||
uint32 getNumInputBuffers() const { return _input._buffersState.size(); }
|
||||
uint32 getNumInputBuffers() const { return _input._invalidBuffers.size(); }
|
||||
|
||||
// this is the maximum per shader stage on the low end apple
|
||||
// TODO make it platform dependant at init time
|
||||
static const int MAX_NUM_UNIFORM_BUFFERS = 12;
|
||||
uint32 getMaxNumUniformBuffers() const { return MAX_NUM_UNIFORM_BUFFERS; }
|
||||
|
||||
// this is the maximum per shader stage on the low end apple
|
||||
// TODO make it platform dependant at init time
|
||||
static const int MAX_NUM_RESOURCE_TEXTURES = 16;
|
||||
uint32 getMaxNumResourceTextures() const { return MAX_NUM_RESOURCE_TEXTURES; }
|
||||
|
||||
// The State setters called by the GLState::Commands when a new state is assigned
|
||||
void do_setStateFillMode(int32 mode);
|
||||
|
@ -248,12 +257,16 @@ protected:
|
|||
void killInput();
|
||||
void syncInputStateCache();
|
||||
void updateInput();
|
||||
void resetInputStage();
|
||||
struct InputStageState {
|
||||
bool _invalidFormat = true;
|
||||
Stream::FormatPointer _format;
|
||||
|
||||
typedef std::bitset<MAX_NUM_ATTRIBUTES> ActivationCache;
|
||||
ActivationCache _attributeActivation;
|
||||
|
||||
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> BuffersState;
|
||||
BuffersState _buffersState;
|
||||
BuffersState _invalidBuffers;
|
||||
|
||||
Buffers _buffers;
|
||||
Offsets _bufferOffsets;
|
||||
|
@ -264,23 +277,20 @@ protected:
|
|||
Offset _indexBufferOffset;
|
||||
Type _indexBufferType;
|
||||
|
||||
typedef std::bitset<MAX_NUM_ATTRIBUTES> ActivationCache;
|
||||
ActivationCache _attributeActivation;
|
||||
|
||||
GLuint _defaultVAO;
|
||||
|
||||
InputStageState() :
|
||||
_invalidFormat(true),
|
||||
_format(0),
|
||||
_buffersState(0),
|
||||
_buffers(_buffersState.size(), BufferPointer(0)),
|
||||
_bufferOffsets(_buffersState.size(), 0),
|
||||
_bufferStrides(_buffersState.size(), 0),
|
||||
_bufferVBOs(_buffersState.size(), 0),
|
||||
_attributeActivation(0),
|
||||
_invalidBuffers(0),
|
||||
_buffers(_invalidBuffers.size(), BufferPointer(0)),
|
||||
_bufferOffsets(_invalidBuffers.size(), 0),
|
||||
_bufferStrides(_invalidBuffers.size(), 0),
|
||||
_bufferVBOs(_invalidBuffers.size(), 0),
|
||||
_indexBuffer(0),
|
||||
_indexBufferOffset(0),
|
||||
_indexBufferType(UINT32),
|
||||
_attributeActivation(0),
|
||||
_defaultVAO(0)
|
||||
{}
|
||||
} _input;
|
||||
|
@ -296,6 +306,7 @@ protected:
|
|||
// Synchronize the state cache of this Backend with the actual real state of the GL Context
|
||||
void syncTransformStateCache();
|
||||
void updateTransform();
|
||||
void resetTransformStage();
|
||||
struct TransformStageState {
|
||||
TransformObject _transformObject;
|
||||
TransformCamera _transformCamera;
|
||||
|
@ -328,12 +339,31 @@ protected:
|
|||
|
||||
// Uniform Stage
|
||||
void do_setUniformBuffer(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void releaseUniformBuffer(int slot);
|
||||
void resetUniformStage();
|
||||
struct UniformStageState {
|
||||
Buffers _buffers;
|
||||
|
||||
UniformStageState():
|
||||
_buffers(MAX_NUM_UNIFORM_BUFFERS, nullptr)
|
||||
{}
|
||||
} _uniform;
|
||||
|
||||
// Resource Stage
|
||||
void do_setResourceTexture(Batch& batch, uint32 paramOffset);
|
||||
|
||||
struct UniformStageState {
|
||||
|
||||
};
|
||||
|
||||
void releaseResourceTexture(int slot);
|
||||
void resetResourceStage();
|
||||
struct ResourceStageState {
|
||||
Textures _textures;
|
||||
|
||||
ResourceStageState():
|
||||
_textures(MAX_NUM_RESOURCE_TEXTURES, nullptr)
|
||||
{}
|
||||
|
||||
} _resource;
|
||||
|
||||
// Pipeline Stage
|
||||
void do_setPipeline(Batch& batch, uint32 paramOffset);
|
||||
void do_setStateBlendFactor(Batch& batch, uint32 paramOffset);
|
||||
|
@ -347,6 +377,7 @@ protected:
|
|||
void syncPipelineStateCache();
|
||||
// Grab the actual gl state into it's gpu::State equivalent. THis is used by the above call syncPipleineStateCache()
|
||||
void getCurrentGLState(State::Data& state);
|
||||
void resetPipelineStage();
|
||||
|
||||
struct PipelineStageState {
|
||||
|
||||
|
@ -386,6 +417,7 @@ protected:
|
|||
|
||||
// Synchronize the state cache of this Backend with the actual real state of the GL Context
|
||||
void syncOutputStateCache();
|
||||
void resetOutputStage();
|
||||
|
||||
struct OutputStageState {
|
||||
|
||||
|
@ -400,31 +432,20 @@ protected:
|
|||
void do_endQuery(Batch& batch, uint32 paramOffset);
|
||||
void do_getQuery(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void resetQueryStage();
|
||||
struct QueryStageState {
|
||||
|
||||
};
|
||||
|
||||
// Reset stages
|
||||
void do_resetStages(Batch& batch, uint32 paramOffset);
|
||||
void resetStages();
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
void do_glEnable(Batch& batch, uint32 paramOffset);
|
||||
void do_glDisable(Batch& batch, uint32 paramOffset);
|
||||
void do_glActiveBindTexture(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glEnableClientState(Batch& batch, uint32 paramOffset);
|
||||
void do_glDisableClientState(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glCullFace(Batch& batch, uint32 paramOffset);
|
||||
void do_glAlphaFunc(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glDepthFunc(Batch& batch, uint32 paramOffset);
|
||||
void do_glDepthMask(Batch& batch, uint32 paramOffset);
|
||||
void do_glDepthRange(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glBindBuffer(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glBindTexture(Batch& batch, uint32 paramOffset);
|
||||
void do_glActiveTexture(Batch& batch, uint32 paramOffset);
|
||||
void do_glTexParameteri(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glDrawBuffers(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glUseProgram(Batch& batch, uint32 paramOffset);
|
||||
void do_glUniform1i(Batch& batch, uint32 paramOffset);
|
||||
void do_glUniform1f(Batch& batch, uint32 paramOffset);
|
||||
void do_glUniform2f(Batch& batch, uint32 paramOffset);
|
||||
|
@ -434,9 +455,6 @@ protected:
|
|||
void do_glUniform4iv(Batch& batch, uint32 paramOffset);
|
||||
void do_glUniformMatrix4fv(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glEnableVertexAttribArray(Batch& batch, uint32 paramOffset);
|
||||
void do_glDisableVertexAttribArray(Batch& batch, uint32 paramOffset);
|
||||
|
||||
void do_glColor4f(Batch& batch, uint32 paramOffset);
|
||||
void do_glLineWidth(Batch& batch, uint32 paramOffset);
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
|
|||
}
|
||||
|
||||
if (isModified) {
|
||||
_input._buffersState.set(channel);
|
||||
_input._invalidBuffers.set(channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ void GLBackend::updateInput() {
|
|||
_stats._ISNumFormatChanges++;
|
||||
}
|
||||
|
||||
if (_input._buffersState.any()) {
|
||||
if (_input._invalidBuffers.any()) {
|
||||
int numBuffers = _input._buffers.size();
|
||||
auto buffer = _input._buffers.data();
|
||||
auto vbo = _input._bufferVBOs.data();
|
||||
|
@ -162,7 +162,7 @@ void GLBackend::updateInput() {
|
|||
auto stride = _input._bufferStrides.data();
|
||||
|
||||
for (int bufferNum = 0; bufferNum < numBuffers; bufferNum++) {
|
||||
if (_input._buffersState.test(bufferNum)) {
|
||||
if (_input._invalidBuffers.test(bufferNum)) {
|
||||
glBindVertexBuffer(bufferNum, (*vbo), (*offset), (*stride));
|
||||
}
|
||||
buffer++;
|
||||
|
@ -170,11 +170,11 @@ void GLBackend::updateInput() {
|
|||
offset++;
|
||||
stride++;
|
||||
}
|
||||
_input._buffersState.reset();
|
||||
_input._invalidBuffers.reset();
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
#else
|
||||
if (_input._invalidFormat || _input._buffersState.any()) {
|
||||
if (_input._invalidFormat || _input._invalidBuffers.any()) {
|
||||
|
||||
if (_input._invalidFormat) {
|
||||
InputStageState::ActivationCache newActivation;
|
||||
|
@ -232,7 +232,7 @@ void GLBackend::updateInput() {
|
|||
if ((channelIt).first < buffers.size()) {
|
||||
int bufferNum = (channelIt).first;
|
||||
|
||||
if (_input._buffersState.test(bufferNum) || _input._invalidFormat) {
|
||||
if (_input._invalidBuffers.test(bufferNum) || _input._invalidFormat) {
|
||||
// GLuint vbo = gpu::GLBackend::getBufferID((*buffers[bufferNum]));
|
||||
GLuint vbo = _input._bufferVBOs[bufferNum];
|
||||
if (boundVBO != vbo) {
|
||||
|
@ -240,7 +240,7 @@ void GLBackend::updateInput() {
|
|||
(void) CHECK_GL_ERROR();
|
||||
boundVBO = vbo;
|
||||
}
|
||||
_input._buffersState[bufferNum] = false;
|
||||
_input._invalidBuffers[bufferNum] = false;
|
||||
|
||||
for (unsigned int i = 0; i < channel._slots.size(); i++) {
|
||||
const Stream::Attribute& attrib = attributes.at(channel._slots[i]);
|
||||
|
@ -285,6 +285,51 @@ void GLBackend::updateInput() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::resetInputStage() {
|
||||
// Reset index buffer
|
||||
_input._indexBufferType = UINT32;
|
||||
_input._indexBufferOffset = 0;
|
||||
_input._indexBuffer.reset();
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
|
||||
#if defined(SUPPORT_VAO)
|
||||
// TODO
|
||||
#else
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
|
||||
size_t i = 0;
|
||||
#if defined(SUPPORT_LEGACY_OPENGL)
|
||||
for (; i < NUM_CLASSIC_ATTRIBS; i++) {
|
||||
glDisableClientState(attributeSlotToClassicAttribName[i]);
|
||||
}
|
||||
glVertexPointer(4, GL_FLOAT, 0, 0);
|
||||
glNormalPointer(GL_FLOAT, 0, 0);
|
||||
glColorPointer(4, GL_FLOAT, 0, 0);
|
||||
glTexCoordPointer(4, GL_FLOAT, 0, 0);
|
||||
#endif
|
||||
|
||||
for (; i < _input._attributeActivation.size(); i++) {
|
||||
glDisableVertexAttribArray(i);
|
||||
glVertexAttribPointer(i, 4, GL_FLOAT, GL_FALSE, 0, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Reset vertex buffer and format
|
||||
_input._format.reset();
|
||||
_input._invalidFormat = false;
|
||||
_input._attributeActivation.reset();
|
||||
|
||||
for (int i = 0; i < _input._buffers.size(); i++) {
|
||||
_input._buffers[i].reset();
|
||||
_input._bufferOffsets[i] = 0;
|
||||
_input._bufferStrides[i] = 0;
|
||||
_input._bufferVBOs[i] = 0;
|
||||
}
|
||||
_input._invalidBuffers.reset();
|
||||
|
||||
}
|
||||
|
||||
void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
|
||||
_input._indexBufferType = (Type) batch._params[paramOffset + 2]._uint;
|
||||
|
|
|
@ -177,6 +177,13 @@ void GLBackend::syncOutputStateCache() {
|
|||
_output._framebuffer.reset();
|
||||
}
|
||||
|
||||
void GLBackend::resetOutputStage() {
|
||||
if (_output._framebuffer) {
|
||||
_output._framebuffer.reset();
|
||||
_output._drawFBO = 0;
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_setFramebuffer(Batch& batch, uint32 paramOffset) {
|
||||
auto framebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
|
||||
|
@ -206,12 +213,21 @@ void GLBackend::do_clearFramebuffer(Batch& batch, uint32 paramOffset) {
|
|||
if (masks & Framebuffer::BUFFER_STENCIL) {
|
||||
glClearStencil(stencil);
|
||||
glmask |= GL_STENCIL_BUFFER_BIT;
|
||||
// TODO: we will probably need to also check the write mask of stencil like we do
|
||||
// for depth buffer, but as would say a famous Fez owner "We'll cross that bridge when we come to it"
|
||||
}
|
||||
|
||||
bool restoreDepthMask = false;
|
||||
if (masks & Framebuffer::BUFFER_DEPTH) {
|
||||
glClearDepth(depth);
|
||||
glmask |= GL_DEPTH_BUFFER_BIT;
|
||||
}
|
||||
|
||||
bool cacheDepthMask = _pipeline._stateCache.depthTest.getWriteMask();
|
||||
if (!cacheDepthMask) {
|
||||
restoreDepthMask = true;
|
||||
glDepthMask(GL_TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<GLenum> drawBuffers;
|
||||
if (masks & Framebuffer::BUFFER_COLORS) {
|
||||
|
@ -245,6 +261,11 @@ void GLBackend::do_clearFramebuffer(Batch& batch, uint32 paramOffset) {
|
|||
glDisable(GL_SCISSOR_TEST);
|
||||
}
|
||||
|
||||
// Restore write mask meaning turn back off
|
||||
if (restoreDepthMask) {
|
||||
glDepthMask(GL_FALSE);
|
||||
}
|
||||
|
||||
// Restore the color draw buffers only if a frmaebuffer is bound
|
||||
if (_output._framebuffer && !drawBuffers.empty()) {
|
||||
auto glFramebuffer = syncGPUObject(*_output._framebuffer);
|
||||
|
|
|
@ -164,15 +164,74 @@ void GLBackend::updatePipeline() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::resetPipelineStage() {
|
||||
// First reset State to default
|
||||
State::Signature resetSignature(0);
|
||||
resetPipelineState(resetSignature);
|
||||
_pipeline._state = nullptr;
|
||||
_pipeline._invalidState = false;
|
||||
|
||||
// Second the shader side
|
||||
_pipeline._invalidProgram = false;
|
||||
_pipeline._program = 0;
|
||||
_pipeline._pipeline.reset();
|
||||
glUseProgram(0);
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::releaseUniformBuffer(int slot) {
|
||||
#if (GPU_FEATURE_PROFILE == GPU_CORE)
|
||||
auto& buf = _uniform._buffers[slot];
|
||||
if (buf) {
|
||||
auto* object = Backend::getGPUObject<GLBackend::GLBuffer>(*buf);
|
||||
if (object) {
|
||||
GLuint bo = object->_buffer;
|
||||
glBindBufferBase(GL_UNIFORM_BUFFER, slot, 0); // RELEASE
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
buf.reset();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::resetUniformStage() {
|
||||
for (int i = 0; i < _uniform._buffers.size(); i++) {
|
||||
releaseUniformBuffer(i);
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
|
||||
GLuint slot = batch._params[paramOffset + 3]._uint;
|
||||
BufferPointer uniformBuffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
|
||||
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
|
||||
GLsizeiptr rangeSize = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
|
||||
|
||||
|
||||
#if (GPU_FEATURE_PROFILE == GPU_CORE)
|
||||
GLuint bo = getBufferID(*uniformBuffer);
|
||||
glBindBufferRange(GL_UNIFORM_BUFFER, slot, bo, rangeStart, rangeSize);
|
||||
if (!uniformBuffer) {
|
||||
releaseUniformBuffer(slot);
|
||||
return;
|
||||
}
|
||||
|
||||
// check cache before thinking
|
||||
if (_uniform._buffers[slot] == uniformBuffer) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Sync BufferObject
|
||||
auto* object = GLBackend::syncGPUObject(*uniformBuffer);
|
||||
if (object) {
|
||||
glBindBufferRange(GL_UNIFORM_BUFFER, slot, object->_buffer, rangeStart, rangeSize);
|
||||
|
||||
_uniform._buffers[slot] = uniformBuffer;
|
||||
(void) CHECK_GL_ERROR();
|
||||
} else {
|
||||
releaseResourceTexture(slot);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
// because we rely on the program uniform mechanism we need to have
|
||||
// the program bound, thank you MacOSX Legacy profile.
|
||||
|
@ -184,19 +243,49 @@ void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
|
|||
// NOT working so we ll stick to the uniform float array until we move to core profile
|
||||
// GLuint bo = getBufferID(*uniformBuffer);
|
||||
//glUniformBufferEXT(_shader._program, slot, bo);
|
||||
#endif
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::releaseResourceTexture(int slot) {
|
||||
auto& tex = _resource._textures[slot];
|
||||
if (tex) {
|
||||
auto* object = Backend::getGPUObject<GLBackend::GLTexture>(*tex);
|
||||
if (object) {
|
||||
GLuint to = object->_texture;
|
||||
GLuint target = object->_target;
|
||||
glActiveTexture(GL_TEXTURE0 + slot);
|
||||
glBindTexture(target, 0); // RELEASE
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
tex.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::resetResourceStage() {
|
||||
for (int i = 0; i < _resource._textures.size(); i++) {
|
||||
releaseResourceTexture(i);
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_setResourceTexture(Batch& batch, uint32 paramOffset) {
|
||||
GLuint slot = batch._params[paramOffset + 1]._uint;
|
||||
TexturePointer uniformTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
|
||||
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
|
||||
|
||||
if (!uniformTexture) {
|
||||
if (!resourceTexture) {
|
||||
releaseResourceTexture(slot);
|
||||
return;
|
||||
}
|
||||
// check cache before thinking
|
||||
if (_resource._textures[slot] == resourceTexture) {
|
||||
return;
|
||||
}
|
||||
|
||||
GLTexture* object = GLBackend::syncGPUObject(*uniformTexture);
|
||||
// Always make sure the GLObject is in sync
|
||||
GLTexture* object = GLBackend::syncGPUObject(*resourceTexture);
|
||||
if (object) {
|
||||
GLuint to = object->_texture;
|
||||
GLuint target = object->_target;
|
||||
|
@ -205,7 +294,10 @@ void GLBackend::do_setResourceTexture(Batch& batch, uint32 paramOffset) {
|
|||
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
_resource._textures[slot] = resourceTexture;
|
||||
|
||||
} else {
|
||||
releaseResourceTexture(slot);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,3 +103,7 @@ void GLBackend::do_getQuery(Batch& batch, uint32 paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::resetQueryStage() {
|
||||
}
|
||||
|
||||
|
|
|
@ -184,4 +184,6 @@ void GLBackend::updateTransform() {
|
|||
_transform._invalidView = _transform._invalidProj = _transform._invalidModel = _transform._invalidViewport = false;
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::resetTransformStage() {
|
||||
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ const gpu::PipelinePointer& AmbientOcclusion::getBlendPipeline() {
|
|||
|
||||
// Blend on transparent
|
||||
state->setBlendFunction(true,
|
||||
gpu::State::SRC_COLOR, gpu::State::BLEND_OP_ADD, gpu::State::DEST_COLOR);
|
||||
gpu::State::INV_SRC_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::SRC_ALPHA);
|
||||
|
||||
// Good to go add the brand new pipeline
|
||||
_blendPipeline.reset(gpu::Pipeline::create(program, state));
|
||||
|
|
|
@ -1998,7 +1998,7 @@ void GeometryReader::run() {
|
|||
} else if (_url.path().toLower().endsWith("palaceoforinthilian4.fbx")) {
|
||||
lightmapLevel = 3.5f;
|
||||
}
|
||||
fbxgeo = readFBX(_reply, _mapping, grabLightmaps, lightmapLevel);
|
||||
fbxgeo = readFBX(_reply, _mapping, _url.path(), grabLightmaps, lightmapLevel);
|
||||
} else if (_url.path().toLower().endsWith(".obj")) {
|
||||
fbxgeo = OBJReader().readOBJ(_reply, _mapping, &_url);
|
||||
}
|
||||
|
|
|
@ -111,9 +111,6 @@ RenderDeferredTask::RenderDeferredTask() : Task() {
|
|||
_drawHitEffectJobIndex = _jobs.size() -1;
|
||||
|
||||
|
||||
_jobs.push_back(Job(new ResetGLState::JobModel()));
|
||||
|
||||
|
||||
// Give ourselves 3 frmaes of timer queries
|
||||
_timerQueries.push_back(std::make_shared<gpu::Query>());
|
||||
_timerQueries.push_back(std::make_shared<gpu::Query>());
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
// Based on NVidia HBAO implementation in D3D11
|
||||
// http://www.nvidia.co.uk/object/siggraph-2008-HBAO.html
|
||||
|
||||
varying vec2 varTexcoord;
|
||||
|
||||
uniform sampler2D depthTexture;
|
||||
|
@ -30,80 +33,213 @@ uniform float g_intensity;
|
|||
uniform float bufferWidth;
|
||||
uniform float bufferHeight;
|
||||
|
||||
#define SAMPLE_COUNT 4
|
||||
const float PI = 3.14159265;
|
||||
|
||||
float getRandom(vec2 uv) {
|
||||
const vec2 FocalLen = vec2(1.0, 1.0);
|
||||
|
||||
const vec2 LinMAD = vec2(0.1-10.0, 0.1+10.0) / (2.0*0.1*10.0);
|
||||
|
||||
const vec2 AORes = vec2(1024.0, 768.0);
|
||||
const vec2 InvAORes = vec2(1.0/1024.0, 1.0/768.0);
|
||||
const vec2 NoiseScale = vec2(1024.0, 768.0) / 4.0;
|
||||
|
||||
const float AOStrength = 1.9;
|
||||
const float R = 0.3;
|
||||
const float R2 = 0.3*0.3;
|
||||
const float NegInvR2 = - 1.0 / (0.3*0.3);
|
||||
const float TanBias = tan(30.0 * PI / 180.0);
|
||||
const float MaxRadiusPixels = 50.0;
|
||||
|
||||
const int NumDirections = 6;
|
||||
const int NumSamples = 4;
|
||||
|
||||
float ViewSpaceZFromDepth(float d){
|
||||
// [0,1] -> [-1,1] clip space
|
||||
d = d * 2.0 - 1.0;
|
||||
|
||||
// Get view space Z
|
||||
return -1.0 / (LinMAD.x * d + LinMAD.y);
|
||||
}
|
||||
|
||||
vec3 UVToViewSpace(vec2 uv, float z){
|
||||
//uv = UVToViewA * uv + UVToViewB;
|
||||
return vec3(uv * z, z);
|
||||
}
|
||||
|
||||
vec3 GetViewPos(vec2 uv){
|
||||
float z = ViewSpaceZFromDepth(texture2D(depthTexture, uv).r);
|
||||
return UVToViewSpace(uv, z);
|
||||
}
|
||||
|
||||
vec3 GetViewPosPoint(ivec2 uv){
|
||||
vec2 coord = vec2(gl_FragCoord.xy) + uv;
|
||||
//float z = texelFetch(texture0, coord, 0).r;
|
||||
float z = texture2D(depthTexture, uv).r;
|
||||
return UVToViewSpace(uv, z);
|
||||
}
|
||||
|
||||
float TanToSin(float x){
|
||||
return x * inversesqrt(x*x + 1.0);
|
||||
}
|
||||
|
||||
float InvLength(vec2 V){
|
||||
return inversesqrt(dot(V,V));
|
||||
}
|
||||
|
||||
float Tangent(vec3 V){
|
||||
return V.z * InvLength(V.xy);
|
||||
}
|
||||
|
||||
float BiasedTangent(vec3 V){
|
||||
return V.z * InvLength(V.xy) + TanBias;
|
||||
}
|
||||
|
||||
float Tangent(vec3 P, vec3 S){
|
||||
return -(P.z - S.z) * InvLength(S.xy - P.xy);
|
||||
}
|
||||
|
||||
float Length2(vec3 V){
|
||||
return dot(V,V);
|
||||
}
|
||||
|
||||
vec3 MinDiff(vec3 P, vec3 Pr, vec3 Pl){
|
||||
vec3 V1 = Pr - P;
|
||||
vec3 V2 = P - Pl;
|
||||
return (Length2(V1) < Length2(V2)) ? V1 : V2;
|
||||
}
|
||||
|
||||
vec2 SnapUVOffset(vec2 uv){
|
||||
return round(uv * AORes) * InvAORes;
|
||||
}
|
||||
|
||||
float Falloff(float d2){
|
||||
return d2 * NegInvR2 + 1.0f;
|
||||
}
|
||||
|
||||
float HorizonOcclusion( vec2 deltaUV, vec3 P, vec3 dPdu, vec3 dPdv, float randstep, float numSamples){
|
||||
float ao = 0;
|
||||
|
||||
// Offset the first coord with some noise
|
||||
vec2 uv = varTexcoord + SnapUVOffset(randstep*deltaUV);
|
||||
deltaUV = SnapUVOffset( deltaUV );
|
||||
|
||||
// Calculate the tangent vector
|
||||
vec3 T = deltaUV.x * dPdu + deltaUV.y * dPdv;
|
||||
|
||||
// Get the angle of the tangent vector from the viewspace axis
|
||||
float tanH = BiasedTangent(T);
|
||||
float sinH = TanToSin(tanH);
|
||||
|
||||
float tanS;
|
||||
float d2;
|
||||
vec3 S;
|
||||
|
||||
// Sample to find the maximum angle
|
||||
for(float s = 1; s <= numSamples; ++s){
|
||||
uv += deltaUV;
|
||||
S = GetViewPos(uv);
|
||||
tanS = Tangent(P, S);
|
||||
d2 = Length2(S - P);
|
||||
|
||||
// Is the sample within the radius and the angle greater?
|
||||
if(d2 < R2 && tanS > tanH)
|
||||
{
|
||||
float sinS = TanToSin(tanS);
|
||||
// Apply falloff based on the distance
|
||||
ao += Falloff(d2) * (sinS - sinH);
|
||||
|
||||
tanH = tanS;
|
||||
sinH = sinS;
|
||||
}
|
||||
}
|
||||
return ao;
|
||||
}
|
||||
|
||||
vec2 RotateDirections(vec2 Dir, vec2 CosSin){
|
||||
return vec2(Dir.x*CosSin.x - Dir.y*CosSin.y, Dir.x*CosSin.y + Dir.y*CosSin.x);
|
||||
}
|
||||
|
||||
void ComputeSteps(inout vec2 stepSizeUv, inout float numSteps, float rayRadiusPix, float rand){
|
||||
// Avoid oversampling if numSteps is greater than the kernel radius in pixels
|
||||
numSteps = min(NumSamples, rayRadiusPix);
|
||||
|
||||
// Divide by Ns+1 so that the farthest samples are not fully attenuated
|
||||
float stepSizePix = rayRadiusPix / (numSteps + 1);
|
||||
|
||||
// Clamp numSteps if it is greater than the max kernel footprint
|
||||
float maxNumSteps = MaxRadiusPixels / stepSizePix;
|
||||
if (maxNumSteps < numSteps)
|
||||
{
|
||||
// Use dithering to avoid AO discontinuities
|
||||
numSteps = floor(maxNumSteps + rand);
|
||||
numSteps = max(numSteps, 1);
|
||||
stepSizePix = MaxRadiusPixels / numSteps;
|
||||
}
|
||||
|
||||
// Step size in uv space
|
||||
stepSizeUv = stepSizePix * InvAORes;
|
||||
}
|
||||
|
||||
float getRandom(vec2 uv){
|
||||
return fract(sin(dot(uv.xy ,vec2(12.9898,78.233))) * 43758.5453);
|
||||
}
|
||||
|
||||
void main(void) {
|
||||
vec3 sampleKernel[4] = { vec3(0.2, 0.0, 0.0),
|
||||
vec3(0.0, 0.2, 0.0),
|
||||
vec3(0.0, 0.0, 0.2),
|
||||
vec3(0.2, 0.2, 0.2) };
|
||||
void main(void){
|
||||
float numDirections = NumDirections;
|
||||
|
||||
TransformCamera cam = getTransformCamera();
|
||||
TransformObject obj = getTransformObject();
|
||||
vec3 P, Pr, Pl, Pt, Pb;
|
||||
P = GetViewPos(varTexcoord);
|
||||
|
||||
vec3 eyeDir = vec3(0.0, 0.0, -3.0);
|
||||
vec3 cameraPositionWorldSpace;
|
||||
<$transformEyeToWorldDir(cam, eyeDir, cameraPositionWorldSpace)$>
|
||||
// Sample neighboring pixels
|
||||
Pr = GetViewPos(varTexcoord + vec2( InvAORes.x, 0));
|
||||
Pl = GetViewPos(varTexcoord + vec2(-InvAORes.x, 0));
|
||||
Pt = GetViewPos(varTexcoord + vec2( 0, InvAORes.y));
|
||||
Pb = GetViewPos(varTexcoord + vec2( 0,-InvAORes.y));
|
||||
|
||||
vec4 depthColor = texture2D(depthTexture, varTexcoord);
|
||||
// Calculate tangent basis vectors using the minimum difference
|
||||
vec3 dPdu = MinDiff(P, Pr, Pl);
|
||||
vec3 dPdv = MinDiff(P, Pt, Pb) * (AORes.y * InvAORes.x);
|
||||
|
||||
// z in non linear range [0,1]
|
||||
float depthVal = depthColor.r;
|
||||
// conversion into NDC [-1,1]
|
||||
float zNDC = depthVal * 2.0 - 1.0;
|
||||
float n = 1.0; // the near plane
|
||||
float f = 30.0; // the far plane
|
||||
float l = -1.0; // left
|
||||
float r = 1.0; // right
|
||||
float b = -1.0; // bottom
|
||||
float t = 1.0; // top
|
||||
|
||||
// conversion into eye space
|
||||
float zEye = 2*f*n / (zNDC*(f-n)-(f+n));
|
||||
// Converting from pixel coordinates to NDC
|
||||
float xNDC = gl_FragCoord.x/bufferWidth * 2.0 - 1.0;
|
||||
float yNDC = gl_FragCoord.y/bufferHeight * 2.0 - 1.0;
|
||||
// Unprojecting X and Y from NDC to eye space
|
||||
float xEye = -zEye*(xNDC*(r-l)+(r+l))/(2.0*n);
|
||||
float yEye = -zEye*(yNDC*(t-b)+(t+b))/(2.0*n);
|
||||
vec3 currentFragEyeSpace = vec3(xEye, yEye, zEye);
|
||||
vec3 currentFragWorldSpace;
|
||||
<$transformEyeToWorldDir(cam, currentFragEyeSpace, currentFragWorldSpace)$>
|
||||
|
||||
vec3 cameraToPositionRay = normalize(currentFragWorldSpace - cameraPositionWorldSpace);
|
||||
vec3 origin = cameraToPositionRay * depthVal + cameraPositionWorldSpace;
|
||||
|
||||
vec3 normal = normalize(texture2D(normalTexture, varTexcoord).xyz);
|
||||
//normal = normalize(normal * normalMatrix);
|
||||
|
||||
vec3 rvec = vec3(getRandom(varTexcoord.xy), getRandom(varTexcoord.yx), getRandom(varTexcoord.xx)) * 2.0 - 1.0;
|
||||
vec3 tangent = normalize(rvec - normal * dot(rvec, normal));
|
||||
vec3 bitangent = cross(normal, tangent);
|
||||
mat3 tbn = mat3(tangent, bitangent, normal);
|
||||
|
||||
float occlusion = 0.0;
|
||||
|
||||
for (int i = 0; i < SAMPLE_COUNT; ++i) {
|
||||
vec3 samplePos = origin + (tbn * sampleKernel[i]) * g_sample_rad;
|
||||
vec4 offset = cam._projectionViewUntranslated * vec4(samplePos, 1.0);
|
||||
|
||||
offset.xy = (offset.xy / offset.w) * 0.5 + 0.5;
|
||||
float depth = length(samplePos - cameraPositionWorldSpace);
|
||||
|
||||
float sampleDepthVal = texture2D(depthTexture, offset.xy).r;
|
||||
|
||||
float rangeDelta = abs(depthVal - sampleDepthVal);
|
||||
float rangeCheck = smoothstep(0.0, 1.0, g_sample_rad / rangeDelta);
|
||||
|
||||
occlusion += rangeCheck * step(sampleDepthVal, depth);
|
||||
}
|
||||
|
||||
occlusion = 1.0 - occlusion / float(SAMPLE_COUNT);
|
||||
occlusion = clamp(pow(occlusion, g_intensity), 0.0, 1.0);
|
||||
gl_FragColor = vec4(vec3(occlusion), 1.0);
|
||||
}
|
||||
// Get the random samples from the noise function
|
||||
vec3 random = vec3(getRandom(varTexcoord.xy), getRandom(varTexcoord.yx), getRandom(varTexcoord.xx));
|
||||
|
||||
// Calculate the projected size of the hemisphere
|
||||
vec2 rayRadiusUV = 0.5 * R * FocalLen / -P.z;
|
||||
float rayRadiusPix = rayRadiusUV.x * AORes.x;
|
||||
|
||||
float ao = 1.0;
|
||||
|
||||
// Make sure the radius of the evaluated hemisphere is more than a pixel
|
||||
if(rayRadiusPix > 1.0){
|
||||
ao = 0.0;
|
||||
float numSteps;
|
||||
vec2 stepSizeUV;
|
||||
|
||||
// Compute the number of steps
|
||||
ComputeSteps(stepSizeUV, numSteps, rayRadiusPix, random.z);
|
||||
|
||||
float alpha = 2.0 * PI / numDirections;
|
||||
|
||||
// Calculate the horizon occlusion of each direction
|
||||
for(float d = 0; d < numDirections; ++d){
|
||||
float theta = alpha * d;
|
||||
|
||||
// Apply noise to the direction
|
||||
vec2 dir = RotateDirections(vec2(cos(theta), sin(theta)), random.xy);
|
||||
vec2 deltaUV = dir * stepSizeUV;
|
||||
|
||||
// Sample the pixels along the direction
|
||||
ao += HorizonOcclusion( deltaUV,
|
||||
P,
|
||||
dPdu,
|
||||
dPdv,
|
||||
random.z,
|
||||
numSteps);
|
||||
}
|
||||
|
||||
// Average the results and produce the final AO
|
||||
ao = 1.0 - ao / numDirections * AOStrength;
|
||||
}
|
||||
|
||||
gl_FragColor = vec4(vec3(ao), 1.0);
|
||||
}
|
|
@ -21,9 +21,6 @@ uniform sampler2D blurredOcclusionTexture;
|
|||
void main(void) {
|
||||
vec4 occlusionColor = texture2D(blurredOcclusionTexture, varTexcoord);
|
||||
|
||||
if(occlusionColor.r > 0.8 && occlusionColor.r <= 1.0) {
|
||||
gl_FragColor = vec4(vec3(0.0), 0.0);
|
||||
} else {
|
||||
gl_FragColor = vec4(vec3(occlusionColor.r), 1.0);
|
||||
}
|
||||
gl_FragColor = vec4(vec3(0.0), occlusionColor.r);
|
||||
|
||||
}
|
||||
|
|
|
@ -216,46 +216,6 @@ void render::renderItems(const SceneContextPointer& sceneContext, const RenderCo
|
|||
}
|
||||
}
|
||||
|
||||
void addClearStateCommands(gpu::Batch& batch) {
|
||||
batch._glDepthMask(true);
|
||||
batch._glDepthFunc(GL_LESS);
|
||||
batch._glDisable(GL_CULL_FACE);
|
||||
|
||||
batch._glActiveTexture(GL_TEXTURE0 + 1);
|
||||
batch._glBindTexture(GL_TEXTURE_2D, 0);
|
||||
batch._glActiveTexture(GL_TEXTURE0 + 2);
|
||||
batch._glBindTexture(GL_TEXTURE_2D, 0);
|
||||
batch._glActiveTexture(GL_TEXTURE0 + 3);
|
||||
batch._glBindTexture(GL_TEXTURE_2D, 0);
|
||||
batch._glActiveTexture(GL_TEXTURE0);
|
||||
batch._glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
|
||||
// deactivate vertex arrays after drawing
|
||||
batch._glDisableClientState(GL_NORMAL_ARRAY);
|
||||
batch._glDisableClientState(GL_VERTEX_ARRAY);
|
||||
batch._glDisableClientState(GL_TEXTURE_COORD_ARRAY);
|
||||
batch._glDisableClientState(GL_COLOR_ARRAY);
|
||||
batch._glDisableVertexAttribArray(gpu::Stream::TANGENT);
|
||||
batch._glDisableVertexAttribArray(gpu::Stream::SKIN_CLUSTER_INDEX);
|
||||
batch._glDisableVertexAttribArray(gpu::Stream::SKIN_CLUSTER_WEIGHT);
|
||||
|
||||
// bind with 0 to switch back to normal operation
|
||||
batch._glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
batch._glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
|
||||
batch._glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
// Back to no program
|
||||
batch._glUseProgram(0);
|
||||
}
|
||||
void ResetGLState::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
|
||||
gpu::Batch theBatch;
|
||||
addClearStateCommands(theBatch);
|
||||
assert(renderContext->args);
|
||||
renderContext->args->_context->render(theBatch);
|
||||
}
|
||||
|
||||
void DrawLight::run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext) {
|
||||
assert(renderContext->args);
|
||||
assert(renderContext->args->_viewFrustum);
|
||||
|
|
|
@ -260,14 +260,6 @@ public:
|
|||
typedef Job::Model<DrawBackground> JobModel;
|
||||
};
|
||||
|
||||
class ResetGLState {
|
||||
public:
|
||||
void run(const SceneContextPointer& sceneContext, const RenderContextPointer& renderContext);
|
||||
|
||||
typedef Job::Model<ResetGLState> JobModel;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class DrawSceneTask : public Task {
|
||||
public:
|
||||
|
|
|
@ -215,7 +215,6 @@ public:
|
|||
SixenseEnabled,
|
||||
SixenseMouseInput,
|
||||
SixenseLasers,
|
||||
ShiftHipsForIdleAnimations,
|
||||
Stars,
|
||||
Stats,
|
||||
StereoAudio,
|
||||
|
|
|
@ -38,7 +38,7 @@ bool vhacd::VHACDUtil::loadFBX(const QString filename, FBXGeometry& result) {
|
|||
if (filename.toLower().endsWith(".obj")) {
|
||||
result = OBJReader().readOBJ(fbxContents, QVariantHash());
|
||||
} else if (filename.toLower().endsWith(".fbx")) {
|
||||
result = readFBX(fbxContents, QVariantHash());
|
||||
result = readFBX(fbxContents, QVariantHash(), filename);
|
||||
} else {
|
||||
qDebug() << "unknown file extension";
|
||||
return false;
|
||||
|
|
Loading…
Reference in a new issue