This commit is contained in:
samcake 2015-09-21 17:47:23 -07:00
commit 910038cf82
46 changed files with 2370 additions and 1062 deletions

View file

@ -305,7 +305,7 @@ function controller(hand, triggerAction) {
this.activateEntity(this.grabbedEntity);
var grabbedProperties = Entities.getEntityProperties(this.grabbedEntity, "position");
var grabbedProperties = Entities.getEntityProperties(this.grabbedEntity, ["position", "rotation"]);
var handRotation = this.getHandRotation();
var handPosition = this.getHandPosition();

View file

@ -36,7 +36,7 @@ for (var x = 0; x < SIDE_SIZE; x++) {
var position = Vec3.sum(MyAvatar.position, { x: x * 0.2, y: y * 0.2, z: z * 0.2});
var radius = Math.random() * 0.1;
boxes.push(Entities.addEntity({
type: cube ? "Box" : "Box",
type: cube ? "Box" : "Sphere",
name: "PerfTest",
position: position,
dimensions: { x: radius, y: radius, z: radius },
@ -52,7 +52,7 @@ for (var x = 0; x < SIDE_SIZE; x++) {
function scriptEnding() {
for (var i = 0; i < boxes.length; i++) {
//Entities.deleteEntity(boxes[i]);
Entities.deleteEntity(boxes[i]);
}
}
Script.scriptEnding.connect(scriptEnding);

File diff suppressed because it is too large Load diff

View file

@ -28,13 +28,10 @@ if (this.Vec2 == undefined) {
return new Vec2(v.x, v.y);
}
} else if (this.Vec2.clone == undefined) {
print("Vec2 exists; adding Vec2.clone");
this.Vec2.clone = function (v) {
return { 'x': v.x || 0.0, 'y': v.y || 0.0 };
}
} else {
print("Vec2...?");
}
} else {}
})();
var Rect = function (xmin, ymin, xmax, ymax) {
@ -566,46 +563,51 @@ var Slider = UI.Slider = function (properties) {
this.slider = new Box(properties.slider);
this.slider.parent = this;
var updateSliderPos = function (event, widget) {
var rx = Math.max(event.x * 1.0 - widget.position.x - widget.slider.width * 0.5, 0.0);
var clickOffset = { x: 0.0, y: 0.0 }; // offset relative to slider knob
var widget = this;
var updateDrag = function (event) {
var rx = Math.max(event.x * 1.0 - widget.position.x - clickOffset.x, 0.0);
var width = Math.max(widget.width - widget.slider.width - widget.padding.x * 2.0, 0.0);
var v = Math.min(rx, width) / (width || 1);
widget.value = widget.minValue + (
widget.maxValue - widget.minValue) * v;
// print("dragging slider: rx = " + rx + ", width = " + width + ", v = " + v);
widget.value = widget.minValue + (widget.maxValue - widget.minValue) * v;
widget.onValueChanged(widget.value);
UI.updateLayout();
}
var startDrag = function (event) {
// calculate position of slider knob
var x0 = widget.position.x + widget.padding.x;
var width = (widget.width - widget.slider.width - widget.padding.x * 2.0);
var normalizedValue = (widget.value - widget.minValue) / (widget.maxValue - widget.minValue)
var widget = this;
this.addAction('onMouseDown', function (event) {
sliderRel.x = sliderRel.y = 0.0;
// sliderRel.x = widget.slider.width * 0.5;
// sliderRel.y = widget.slider.height * 0.5;
updateSliderPos(event, widget);
var sliderX = x0 + normalizedValue * width;
var sliderWidth = widget.slider.width;
// hack
ui.clickedWidget = ui.draggedWidget = widget.slider;
});
if (event.x >= sliderX && event.x <= sliderX + sliderWidth) {
// print("Start drag -- on slider knob");
clickOffset.x = event.x - sliderX;
} else if (event.x >= x0 && event.x <= x0 + width) {
// print("Start drag -- on slider bar");
clickOffset.x = sliderWidth * 0.5;
} else {
clickOffset.x = 0.0;
// print("Start drag -- out of bounds!");
// print("event.x = " + event.x);
// print("x0 = " + x0 + ", x1 = " + (x0 + width) + " (width = " + width + ")");
// print("s0 = " + sliderX + ", s1 = " + (sliderX + sliderWidth) + "(slider width = " + sliderWidth + ")");
// print("widget = " + widget);
// print("widget.slider = " + widget.slider);
// print("widget.width = " + widget.width + ", widget.slider.width = " + widget.slider.width);
}
updateDrag(event);
}
var sliderRel = {};
this.slider.addAction('onMouseDown', function (event) {
sliderRel.x = widget.slider.position.x - event.x;
sliderRel.y = widget.slider.position.y - event.y;
event.x += sliderRel.x;
event.y += sliderRel.y;
updateSliderPos(event, widget);
});
this.slider.addAction('onDragBegin', function (event) {
event.x += sliderRel.x;
event.y += sliderRel.y;
updateSliderPos(event, widget);
})
this.slider.addAction('onDragUpdate', function (event) {
event.x += sliderRel.x;
event.y += sliderRel.y;
updateSliderPos(event, widget);
})
this.addAction('onMouseDown', startDrag);
this.addAction('onDragBegin', updateDrag);
this.addAction('onDragUpdate', updateDrag);
this.slider.actions = this.actions;
};
Slider.prototype = new Box();
Slider.prototype.constructor = Slider;
@ -947,16 +949,25 @@ var dispatchEvent = function (action, event, widget) {
}
}
function hasAction (widget, action) {
// print("widget = " + widget);
// print("action = " + action);
// if (widget) {
// print("widget.actions[<action>] = " + widget.actions[action]);
// print("widget.parent = " + widget.parent);
// }
return widget && (widget.actions[action] || hasAction(widget.parent, action));
}
UI.handleMouseMove = function (event, canStartDrag) {
if (canStartDrag === undefined)
// if (canStartDrag === undefined)
if (arguments.length < 2)
canStartDrag = true;
// print("mouse moved x = " + event.x + ", y = " + event.y);
var focused = getFocusedWidget(event);
// print("got focus: " + focused);
if (canStartDrag && !ui.draggedWidget && ui.clickedWidget && ui.clickedWidget.actions['onDragBegin']) {
if (!ui.draggedWidget && ui.clickedWidget && hasAction(ui.clickedWidget, 'onDragBegin')) {
ui.draggedWidget = ui.clickedWidget;
dispatchEvent('onDragBegin', event, ui.draggedWidget);
} else if (ui.draggedWidget) {
@ -980,26 +991,24 @@ UI.handleMousePress = function (event) {
}
UI.handleMouseDoublePress = function (event) {
// print("DOUBLE CLICK!");
var focused = getFocusedWidget(event);
UI.handleMouseMove(event);
if (focused) {
// print("dispatched onDoubleClick");
dispatchEvent('onDoubleClick', event, focused);
}
}
UI.handleMouseRelease = function (event) {
// print("Mouse released");
if (ui.draggedWidget) {
dispatchEvent('onDragEnd', event, ui.draggedWidget);
} else {
UI.handleMouseMove(event, false);
var clicked = ui.clickedWidget;
ui.clickedWidget = null;
UI.handleMouseMove(event);
if (ui.focusedWidget) {
dispatchEvent('onMouseUp', event, ui.focusedWidget);
if (ui.clickedWidget == ui.focusedWidget) {
if (clicked == ui.focusedWidget) {
dispatchEvent('onClick', event, ui.focusedWidget);
}
}

View file

@ -168,7 +168,7 @@ Item {
color: root.fontColor;
font.pixelSize: root.fontSize
text: "Triangles: " + root.triangles +
" / Quads: " + root.quads + " / Material Switches: " + root.materialSwitches
" / Material Switches: " + root.materialSwitches
}
Text {
color: root.fontColor;

View file

@ -1151,14 +1151,27 @@ void Application::paintGL() {
}
} else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
_myCamera.setRotation(_myAvatar->getWorldAlignedOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
_myCamera.setPosition(_myAvatar->getDefaultEyePosition() +
glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0) +
(_myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
if (isHMDMode()) {
glm::quat hmdRotation = extractRotation(_myAvatar->getHMDSensorMatrix());
_myCamera.setRotation(_myAvatar->getWorldAlignedOrientation()
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)) * hmdRotation);
glm::vec3 hmdOffset = extractTranslation(_myAvatar->getHMDSensorMatrix());
_myCamera.setPosition(_myAvatar->getDefaultEyePosition()
+ glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0)
+ (_myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror
+ (_myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f))) * hmdOffset);
} else {
_myCamera.setRotation(_myAvatar->getWorldAlignedOrientation()
* glm::quat(glm::vec3(0.0f, PI + _rotateMirror, 0.0f)));
_myCamera.setPosition(_myAvatar->getDefaultEyePosition()
+ glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0)
+ (_myAvatar->getOrientation() * glm::quat(glm::vec3(0.0f, _rotateMirror, 0.0f))) *
glm::vec3(0.0f, 0.0f, -1.0f) * MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
}
renderArgs._renderMode = RenderArgs::MIRROR_RENDER_MODE;
}
// Update camera position
// Update camera position
if (!isHMDMode()) {
_myCamera.update(1.0f / _fps);
}
@ -2600,11 +2613,7 @@ void Application::updateMyAvatarLookAtPosition() {
if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
// When I am in mirror mode, just look right at the camera (myself); don't switch gaze points because when physically
// looking in a mirror one's eyes appear steady.
if (!isHMD) {
lookAtSpot = _myCamera.getPosition();
} else {
lookAtSpot = _myCamera.getPosition() + transformPoint(_myAvatar->getSensorToWorldMatrix(), extractTranslation(getHMDSensorPose()));
}
lookAtSpot = _myCamera.getPosition();
} else if (eyeTracker->isTracking() && (isHMD || eyeTracker->isSimulating())) {
// Look at the point that the user is looking at.
if (isHMD) {

View file

@ -206,7 +206,7 @@ void Stars::render(RenderArgs* renderArgs, float alpha) {
float msecs = (float)(usecTimestampNow() - start) / (float)USECS_PER_MSEC;
float secs = msecs / (float)MSECS_PER_SECOND;
batch._glUniform1f(_timeSlot, secs);
geometryCache->renderUnitCube(batch);
geometryCache->renderCube(batch);
static const size_t VERTEX_STRIDE = sizeof(StarVertex);
size_t offset = offsetof(StarVertex, position);

View file

@ -23,6 +23,7 @@
#include <ByteCountCoding.h>
#include <SharedUtil.h>
#include <DeferredLightingEffect.h>
#include "world.h"
#include "Application.h"
@ -93,29 +94,28 @@ void renderWorldBox(gpu::Batch& batch) {
geometryCache->renderLine(batch, glm::vec3(-HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE),
glm::vec3(HALF_TREE_SCALE, 0.0f, HALF_TREE_SCALE), GREY);
geometryCache->renderWireCube(batch, TREE_SCALE, GREY4);
auto deferredLighting = DependencyManager::get<DeferredLightingEffect>();
deferredLighting->renderWireCubeInstance(batch, Transform(), GREY4);
// Draw meter markers along the 3 axis to help with measuring things
const float MARKER_DISTANCE = 1.0f;
const float MARKER_RADIUS = 0.05f;
geometryCache->renderSphere(batch, MARKER_RADIUS, 10, 10, RED);
transform = Transform().setScale(MARKER_RADIUS);
deferredLighting->renderSolidSphereInstance(batch, transform, RED);
transform.setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, 0.0f));
batch.setModelTransform(transform);
geometryCache->renderSphere(batch, MARKER_RADIUS, 10, 10, RED);
transform = Transform().setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, 0.0f)).setScale(MARKER_RADIUS);
deferredLighting->renderSolidSphereInstance(batch, transform, RED);
transform.setTranslation(glm::vec3(0.0f, MARKER_DISTANCE, 0.0f));
batch.setModelTransform(transform);
geometryCache->renderSphere(batch, MARKER_RADIUS, 10, 10, GREEN);
transform = Transform().setTranslation(glm::vec3(0.0f, MARKER_DISTANCE, 0.0f)).setScale(MARKER_RADIUS);
deferredLighting->renderSolidSphereInstance(batch, transform, GREEN);
transform.setTranslation(glm::vec3(0.0f, 0.0f, MARKER_DISTANCE));
batch.setModelTransform(transform);
geometryCache->renderSphere(batch, MARKER_RADIUS, 10, 10, BLUE);
transform = Transform().setTranslation(glm::vec3(0.0f, 0.0f, MARKER_DISTANCE)).setScale(MARKER_RADIUS);
deferredLighting->renderSolidSphereInstance(batch, transform, BLUE);
transform.setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, MARKER_DISTANCE));
batch.setModelTransform(transform);
geometryCache->renderSphere(batch, MARKER_RADIUS, 10, 10, GREY);
transform = Transform().setTranslation(glm::vec3(MARKER_DISTANCE, 0.0f, MARKER_DISTANCE)).setScale(MARKER_RADIUS);
deferredLighting->renderSolidSphereInstance(batch, transform, GREY);
}
// Return a random vector of average length 1

View file

@ -448,15 +448,14 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
// If this is the avatar being looked at, render a little ball above their head
if (_isLookAtTarget && Menu::getInstance()->isOptionChecked(MenuOption::RenderFocusIndicator)) {
const float INDICATOR_OFFSET = 0.22f;
const float INDICATOR_RADIUS = 0.03f;
const glm::vec4 LOOK_AT_INDICATOR_COLOR = { 0.8f, 0.0f, 0.0f, 0.75f };
static const float INDICATOR_OFFSET = 0.22f;
static const float INDICATOR_RADIUS = 0.03f;
static const glm::vec4 LOOK_AT_INDICATOR_COLOR = { 0.8f, 0.0f, 0.0f, 0.75f };
glm::vec3 position = glm::vec3(_position.x, getDisplayNamePosition().y + INDICATOR_OFFSET, _position.z);
Transform transform;
transform.setTranslation(position);
batch.setModelTransform(transform);
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch, INDICATOR_RADIUS,
15, 15, LOOK_AT_INDICATOR_COLOR);
transform.postScale(INDICATOR_RADIUS);
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch, transform, LOOK_AT_INDICATOR_COLOR);
}
// If the avatar is looking at me, indicate that they are
@ -473,27 +472,29 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
if (geometry && geometry->isLoaded()) {
const float DEFAULT_EYE_DIAMETER = 0.048f; // Typical human eye
const float RADIUS_INCREMENT = 0.005f;
Transform transform;
batch.setModelTransform(Transform());
glm::vec3 position = getHead()->getLeftEyePosition();
Transform transform;
transform.setTranslation(position);
batch.setModelTransform(transform);
float eyeDiameter = geometry->getFBXGeometry().leftEyeSize;
if (eyeDiameter == 0.0f) {
eyeDiameter = DEFAULT_EYE_DIAMETER;
}
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch,
eyeDiameter * _scale / 2.0f + RADIUS_INCREMENT, 15, 15, glm::vec4(LOOKING_AT_ME_COLOR, alpha));
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch,
Transform(transform).postScale(eyeDiameter * _scale / 2.0f + RADIUS_INCREMENT),
glm::vec4(LOOKING_AT_ME_COLOR, alpha));
position = getHead()->getRightEyePosition();
transform.setTranslation(position);
batch.setModelTransform(transform);
eyeDiameter = geometry->getFBXGeometry().rightEyeSize;
if (eyeDiameter == 0.0f) {
eyeDiameter = DEFAULT_EYE_DIAMETER;
}
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch,
eyeDiameter * _scale / 2.0f + RADIUS_INCREMENT, 15, 15, glm::vec4(LOOKING_AT_ME_COLOR, alpha));
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch,
Transform(transform).postScale(eyeDiameter * _scale / 2.0f + RADIUS_INCREMENT),
glm::vec4(LOOKING_AT_ME_COLOR, alpha));
}
}
@ -518,19 +519,16 @@ void Avatar::render(RenderArgs* renderArgs, const glm::vec3& cameraPosition) {
if (renderArgs->_renderMode == RenderArgs::DEFAULT_RENDER_MODE && (sphereRadius > MIN_SPHERE_SIZE) &&
(angle < MAX_SPHERE_ANGLE) && (angle > MIN_SPHERE_ANGLE)) {
batch.setModelTransform(Transform());
Transform transform;
transform.setTranslation(_position);
transform.setScale(height);
batch.setModelTransform(transform);
if (_voiceSphereID == GeometryCache::UNKNOWN_ID) {
_voiceSphereID = DependencyManager::get<GeometryCache>()->allocateID();
}
DependencyManager::get<DeferredLightingEffect>()->bindSimpleProgram(batch);
DependencyManager::get<GeometryCache>()->renderSphere(batch, sphereRadius, 15, 15,
glm::vec4(SPHERE_COLOR[0], SPHERE_COLOR[1], SPHERE_COLOR[2], 1.0f - angle / MAX_SPHERE_ANGLE), true,
_voiceSphereID);
transform.postScale(sphereRadius);
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch,
transform,
glm::vec4(SPHERE_COLOR[0], SPHERE_COLOR[1], SPHERE_COLOR[2], 1.0f - angle / MAX_SPHERE_ANGLE));
}
}
}

View file

@ -14,6 +14,7 @@
#include <GeometryUtil.h>
#include <RenderArgs.h>
#include <DeferredLightingEffect.h>
#include "Avatar.h"
#include "AvatarManager.h"
@ -65,16 +66,16 @@ void Hand::renderHandTargets(RenderArgs* renderArgs, bool isMine) {
Transform transform = Transform();
transform.setTranslation(position);
transform.setRotation(palm.getRotation());
batch.setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderSphere(batch, SPHERE_RADIUS,
NUM_FACETS, NUM_FACETS, grayColor);
transform.postScale(SPHERE_RADIUS);
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch, transform, grayColor);
// draw a green sphere at the old "finger tip"
transform = Transform();
position = palm.getTipPosition();
transform.setTranslation(position);
batch.setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderSphere(batch, SPHERE_RADIUS,
NUM_FACETS, NUM_FACETS, greenColor, false);
transform.setRotation(palm.getRotation());
transform.postScale(SPHERE_RADIUS);
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch, transform, greenColor);
}
}

View file

@ -462,13 +462,10 @@ void Head::renderLookatTarget(RenderArgs* renderArgs, glm::vec3 lookatPosition)
auto& batch = *renderArgs->_batch;
auto transform = Transform{};
transform.setTranslation(lookatPosition);
batch.setModelTransform(transform);
auto deferredLighting = DependencyManager::get<DeferredLightingEffect>();
deferredLighting->bindSimpleProgram(batch);
auto geometryCache = DependencyManager::get<GeometryCache>();
const float LOOK_AT_TARGET_RADIUS = 0.075f;
transform.postScale(LOOK_AT_TARGET_RADIUS);
const glm::vec4 LOOK_AT_TARGET_COLOR = { 0.8f, 0.0f, 0.0f, 0.75f };
geometryCache->renderSphere(batch, LOOK_AT_TARGET_RADIUS, 15, 15, LOOK_AT_TARGET_COLOR, true);
deferredLighting->renderSolidSphereInstance(batch, transform, LOOK_AT_TARGET_COLOR);
}

View file

@ -639,27 +639,25 @@ void SkeletonModel::renderBoundingCollisionShapes(gpu::Batch& batch, float alpha
auto geometryCache = DependencyManager::get<GeometryCache>();
auto deferredLighting = DependencyManager::get<DeferredLightingEffect>();
Transform transform; // = Transform();
// draw a blue sphere at the capsule top point
glm::vec3 topPoint = _translation + _boundingCapsuleLocalOffset + (0.5f * _boundingCapsuleHeight) * glm::vec3(0.0f, 1.0f, 0.0f);
transform.setTranslation(topPoint);
batch.setModelTransform(transform);
deferredLighting->bindSimpleProgram(batch);
geometryCache->renderSphere(batch, _boundingCapsuleRadius, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS,
glm::vec4(0.6f, 0.6f, 0.8f, alpha));
deferredLighting->renderSolidSphereInstance(batch,
Transform().setTranslation(topPoint).postScale(_boundingCapsuleRadius),
glm::vec4(0.6f, 0.6f, 0.8f, alpha));
// draw a yellow sphere at the capsule bottom point
glm::vec3 bottomPoint = topPoint - glm::vec3(0.0f, _boundingCapsuleHeight, 0.0f);
glm::vec3 axis = topPoint - bottomPoint;
transform.setTranslation(bottomPoint);
batch.setModelTransform(transform);
deferredLighting->bindSimpleProgram(batch);
geometryCache->renderSphere(batch, _boundingCapsuleRadius, BALL_SUBDIVISIONS, BALL_SUBDIVISIONS,
glm::vec4(0.8f, 0.8f, 0.6f, alpha));
deferredLighting->renderSolidSphereInstance(batch,
Transform().setTranslation(bottomPoint).postScale(_boundingCapsuleRadius),
glm::vec4(0.8f, 0.8f, 0.6f, alpha));
// draw a green cylinder between the two points
glm::vec3 origin(0.0f);
batch.setModelTransform(Transform().setTranslation(bottomPoint));
deferredLighting->bindSimpleProgram(batch);
Avatar::renderJointConnectingCone(batch, origin, axis, _boundingCapsuleRadius, _boundingCapsuleRadius,
glm::vec4(0.6f, 0.8f, 0.6f, alpha));
}

View file

@ -337,7 +337,6 @@ void Stats::updateStats() {
void Stats::setRenderDetails(const RenderDetails& details) {
STAT_UPDATE(triangles, details._trianglesRendered);
STAT_UPDATE(quads, details._quadsRendered);
STAT_UPDATE(materialSwitches, details._materialSwitches);
if (_expanded) {
STAT_UPDATE(meshOpaque, details._opaque._rendered);

View file

@ -61,8 +61,7 @@ void Cube3DOverlay::render(RenderArgs* args) {
// }
transform.setScale(dimensions);
batch->setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderSolidCube(*batch, 1.0f, cubeColor);
DependencyManager::get<DeferredLightingEffect>()->renderSolidCubeInstance(*batch, transform, cubeColor);
} else {
if (getIsDashedLine()) {
@ -98,9 +97,9 @@ void Cube3DOverlay::render(RenderArgs* args) {
geometryCache->renderDashedLine(*batch, bottomRightFar, topRightFar, cubeColor);
} else {
batch->setModelTransform(Transform());
transform.setScale(dimensions);
batch->setModelTransform(transform);
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(*batch, 1.0f, cubeColor);
DependencyManager::get<DeferredLightingEffect>()->renderWireCubeInstance(*batch, transform, cubeColor);
}
}
}

View file

@ -12,11 +12,16 @@
#include <DependencyManager.h>
#include <GeometryCache.h>
#include <DeferredLightingEffect.h>
#include <gpu/Batch.h>
#include <SharedUtil.h>
QString const Sphere3DOverlay::TYPE = "sphere";
// Sphere overlays should fit inside a cube of the specified dimensions, hence it needs to be a half unit sphere.
// However, the geometry cache renders a UNIT sphere, so we need to scale down.
static const float SPHERE_OVERLAY_SCALE = 0.5f;
Sphere3DOverlay::Sphere3DOverlay(const Sphere3DOverlay* Sphere3DOverlay) :
Volume3DOverlay(Sphere3DOverlay)
{
@ -36,10 +41,15 @@ void Sphere3DOverlay::render(RenderArgs* args) {
auto batch = args->_batch;
if (batch) {
batch->setModelTransform(Transform());
Transform transform = _transform;
transform.postScale(getDimensions());
batch->setModelTransform(transform);
DependencyManager::get<GeometryCache>()->renderSphere(*batch, 1.0f, SLICES, SLICES, sphereColor, _isSolid);
transform.postScale(getDimensions() * SPHERE_OVERLAY_SCALE);
if (_isSolid) {
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(*batch, transform, sphereColor);
} else {
DependencyManager::get<DeferredLightingEffect>()->renderWireSphereInstance(*batch, transform, sphereColor);
}
}
}

View file

@ -57,7 +57,9 @@ void RenderableBoxEntityItem::render(RenderArgs* args) {
if (_procedural->ready()) {
batch.setModelTransform(getTransformToCenter()); // we want to include the scale as well
_procedural->prepare(batch, this->getDimensions());
DependencyManager::get<GeometryCache>()->renderSolidCube(batch, 1.0f, _procedural->getColor(cubeColor));
auto color = _procedural->getColor(cubeColor);
batch._glColor4f(color.r, color.g, color.b, color.a);
DependencyManager::get<GeometryCache>()->renderCube(batch);
} else {
DependencyManager::get<DeferredLightingEffect>()->renderSolidCubeInstance(batch, getTransformToCenter(), cubeColor);
}

View file

@ -23,8 +23,13 @@ void RenderableDebugableEntityItem::renderBoundingBox(EntityItem* entity, Render
float puffedOut, glm::vec4& color) {
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(entity->getTransformToCenter()); // we want to include the scale as well
DependencyManager::get<DeferredLightingEffect>()->renderWireCube(batch, 1.0f + puffedOut, color);
auto shapeTransform = entity->getTransformToCenter();
if (puffedOut != 0.0) {
shapeTransform.postScale(1.0 + puffedOut);
}
batch.setModelTransform(Transform()); // we want to include the scale as well
DependencyManager::get<DeferredLightingEffect>()->renderWireCubeInstance(batch, shapeTransform, color);
}
void RenderableDebugableEntityItem::render(EntityItem* entity, RenderArgs* args) {

View file

@ -24,6 +24,11 @@
#include "../render-utils/simple_vert.h"
#include "../render-utils/simple_frag.h"
// Sphere entities should fit inside a cube entity of the same size, so a sphere that has dimensions 1x1x1
// is a half unit sphere. However, the geometry cache renders a UNIT sphere, so we need to scale down.
static const float SPHERE_ENTITY_SCALE = 0.5f;
EntityItemPointer RenderableSphereEntityItem::factory(const EntityItemID& entityID, const EntityItemProperties& properties) {
return std::make_shared<RenderableSphereEntityItem>(entityID, properties);
}
@ -39,15 +44,7 @@ void RenderableSphereEntityItem::render(RenderArgs* args) {
PerformanceTimer perfTimer("RenderableSphereEntityItem::render");
Q_ASSERT(getType() == EntityTypes::Sphere);
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(getTransformToCenter()); // use a transform with scale, rotation, registration point and translation
// TODO: it would be cool to select different slices/stacks geometry based on the size of the sphere
// and the distance to the viewer. This would allow us to reduce the triangle count for smaller spheres
// that aren't close enough to see the tessellation and use larger triangle count for spheres that would
// expose that effect
static const int SLICES = 15, STACKS = 15;
if (!_procedural) {
_procedural.reset(new Procedural(getUserData()));
_procedural->_vertexSource = simple_vert;
@ -59,12 +56,19 @@ void RenderableSphereEntityItem::render(RenderArgs* args) {
gpu::State::FACTOR_ALPHA, gpu::State::BLEND_OP_ADD, gpu::State::ONE);
}
gpu::Batch& batch = *args->_batch;
glm::vec4 sphereColor(toGlm(getXColor()), getLocalRenderAlpha());
Transform modelTransform = getTransformToCenter();
modelTransform.postScale(SPHERE_ENTITY_SCALE);
if (_procedural->ready()) {
batch.setModelTransform(modelTransform); // use a transform with scale, rotation, registration point and translation
_procedural->prepare(batch, getDimensions());
DependencyManager::get<GeometryCache>()->renderSphere(batch, 0.5f, SLICES, STACKS, _procedural->getColor(sphereColor));
auto color = _procedural->getColor(sphereColor);
batch._glColor4f(color.r, color.g, color.b, color.a);
DependencyManager::get<GeometryCache>()->renderSphere(batch);
} else {
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphere(batch, 0.5f, SLICES, STACKS, sphereColor);
batch.setModelTransform(Transform());
DependencyManager::get<DeferredLightingEffect>()->renderSolidSphereInstance(batch, modelTransform, sphereColor);
}

View file

@ -19,6 +19,10 @@
#include <GeometryCache.h>
#include <PerfStat.h>
// Sphere entities should fit inside a cube entity of the same size, so a sphere that has dimensions 1x1x1
// is a half unit sphere. However, the geometry cache renders a UNIT sphere, so we need to scale down.
static const float SPHERE_ENTITY_SCALE = 0.5f;
EntityItemPointer RenderableZoneEntityItem::factory(const EntityItemID& entityID, const EntityItemProperties& properties) {
return std::make_shared<RenderableZoneEntityItem>(entityID, properties);
}
@ -121,15 +125,15 @@ void RenderableZoneEntityItem::render(RenderArgs* args) {
Q_ASSERT(args->_batch);
gpu::Batch& batch = *args->_batch;
batch.setModelTransform(getTransformToCenter());
batch.setModelTransform(Transform());
auto shapeTransform = getTransformToCenter();
auto deferredLightingEffect = DependencyManager::get<DeferredLightingEffect>();
if (getShapeType() == SHAPE_TYPE_SPHERE) {
const int SLICES = 15, STACKS = 15;
deferredLightingEffect->renderWireSphere(batch, 0.5f, SLICES, STACKS, DEFAULT_COLOR);
shapeTransform.postScale(SPHERE_ENTITY_SCALE);
deferredLightingEffect->renderWireSphereInstance(batch, shapeTransform, DEFAULT_COLOR);
} else {
deferredLightingEffect->renderWireCube(batch, 1.0f, DEFAULT_COLOR);
deferredLightingEffect->renderWireCubeInstance(batch, shapeTransform, DEFAULT_COLOR);
}
break;
}

View file

@ -498,18 +498,21 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
// Models only
if (_type == EntityTypes::Model) {
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_MODEL_URL, modelURL);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_COMPOUND_SHAPE_URL, compoundShapeURL);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_ANIMATION_URL, animationURL);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_TEXTURES, textures);
}
if (_type == EntityTypes::Model || _type == EntityTypes::Zone || _type == EntityTypes::ParticleEffect) {
COPY_PROPERTY_TO_QSCRIPTVALUE_GETTER(PROP_SHAPE_TYPE, shapeType, getShapeTypeAsString());
}
// FIXME - it seems like ParticleEffect should also support this
if (_type == EntityTypes::Model || _type == EntityTypes::Zone) {
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_COMPOUND_SHAPE_URL, compoundShapeURL);
}
// Models & Particles
if (_type == EntityTypes::Model || _type == EntityTypes::ParticleEffect) {
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_TEXTURES, textures);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_ANIMATION_PLAYING, animationIsPlaying);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_ANIMATION_FPS, animationFPS);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_ANIMATION_FRAME_INDEX, animationFrameIndex);

View file

@ -265,24 +265,30 @@ void appendModelIDs(const QString& parentID, const QMultiHash<QString, QString>&
}
gpu::BufferPointer FBXMeshPart::getTrianglesForQuads() const {
gpu::BufferPointer FBXMeshPart::getMergedTriangles() const {
// if we've been asked for our triangulation of the original quads, but we don't yet have them
// then create them now.
if (!trianglesForQuadsAvailable) {
trianglesForQuadsAvailable = true;
if (!mergedTrianglesAvailable) {
mergedTrianglesAvailable = true;
quadsAsTrianglesIndicesBuffer = std::make_shared<gpu::Buffer>();
mergedTrianglesIndicesBuffer = std::make_shared<gpu::Buffer>();
// QVector<int> quadIndices; // original indices from the FBX mesh
QVector<quint32> quadsAsTrianglesIndices; // triangle versions of quads converted when first needed
QVector<quint32> mergedTrianglesIndices; // triangle versions of quads converted when first needed
const int INDICES_PER_ORIGINAL_TRIANGLE = 3;
const int INDICES_PER_ORIGINAL_QUAD = 4;
const int INDICES_PER_TRIANGULATED_QUAD = 6;
int numberOfQuads = quadIndices.size() / INDICES_PER_ORIGINAL_QUAD;
quadsAsTrianglesIndices.resize(numberOfQuads * INDICES_PER_TRIANGULATED_QUAD);
int numberOfTriangles = triangleIndices.size() / INDICES_PER_ORIGINAL_TRIANGLE;
int mergedNumberOfIndices = (numberOfQuads * INDICES_PER_TRIANGULATED_QUAD) + triangleIndices.size();
// resized our merged indices to be enough room for our triangulated quads and our original triangles
mergedTrianglesIndices.resize(mergedNumberOfIndices);
int originalIndex = 0;
int triangulatedIndex = 0;
// triangulate our quads
for (int fromQuad = 0; fromQuad < numberOfQuads; fromQuad++) {
int i0 = quadIndices[originalIndex + 0];
int i1 = quadIndices[originalIndex + 1];
@ -296,23 +302,38 @@ gpu::BufferPointer FBXMeshPart::getTrianglesForQuads() const {
// Triangle tri1 = { v0, v1, v2 };
// Triangle tri2 = { v2, v3, v0 };
quadsAsTrianglesIndices[triangulatedIndex + 0] = i0;
quadsAsTrianglesIndices[triangulatedIndex + 1] = i1;
quadsAsTrianglesIndices[triangulatedIndex + 2] = i3;
mergedTrianglesIndices[triangulatedIndex + 0] = i0;
mergedTrianglesIndices[triangulatedIndex + 1] = i1;
mergedTrianglesIndices[triangulatedIndex + 2] = i3;
quadsAsTrianglesIndices[triangulatedIndex + 3] = i1;
quadsAsTrianglesIndices[triangulatedIndex + 4] = i2;
quadsAsTrianglesIndices[triangulatedIndex + 5] = i3;
mergedTrianglesIndices[triangulatedIndex + 3] = i1;
mergedTrianglesIndices[triangulatedIndex + 4] = i2;
mergedTrianglesIndices[triangulatedIndex + 5] = i3;
originalIndex += INDICES_PER_ORIGINAL_QUAD;
triangulatedIndex += INDICES_PER_TRIANGULATED_QUAD;
}
trianglesForQuadsIndicesCount = INDICES_PER_TRIANGULATED_QUAD * numberOfQuads;
quadsAsTrianglesIndicesBuffer->append(quadsAsTrianglesIndices.size() * sizeof(quint32), (gpu::Byte*)quadsAsTrianglesIndices.data());
// add our original triangs
originalIndex = 0;
for (int fromTriangle = 0; fromTriangle < numberOfTriangles; fromTriangle++) {
int i0 = triangleIndices[originalIndex + 0];
int i1 = triangleIndices[originalIndex + 1];
int i2 = triangleIndices[originalIndex + 2];
mergedTrianglesIndices[triangulatedIndex + 0] = i0;
mergedTrianglesIndices[triangulatedIndex + 1] = i1;
mergedTrianglesIndices[triangulatedIndex + 2] = i2;
originalIndex += INDICES_PER_ORIGINAL_TRIANGLE;
triangulatedIndex += INDICES_PER_ORIGINAL_TRIANGLE;
}
mergedTrianglesIndicesCount = mergedNumberOfIndices;
mergedTrianglesIndicesBuffer->append(mergedNumberOfIndices * sizeof(quint32), (gpu::Byte*)mergedTrianglesIndices.data());
}
return quadsAsTrianglesIndicesBuffer;
return mergedTrianglesIndicesBuffer;
}
FBXBlendshape extractBlendshape(const FBXNode& object) {
@ -479,8 +500,6 @@ FBXLight extractLight(const FBXNode& object) {
return light;
}
QByteArray fileOnUrl(const QByteArray& filenameString, const QString& url) {
QString path = QFileInfo(url).path();
QByteArray filename = filenameString;

View file

@ -121,17 +121,17 @@ public:
/// A single part of a mesh (with the same material).
class FBXMeshPart {
public:
QVector<int> quadIndices; // original indices from the FBX mesh
QVector<int> triangleIndices; // original indices from the FBX mesh
mutable gpu::BufferPointer quadsAsTrianglesIndicesBuffer;
mutable gpu::BufferPointer mergedTrianglesIndicesBuffer; // both the quads and the triangles merged into a single set of triangles
QString materialID;
mutable bool trianglesForQuadsAvailable = false;
mutable int trianglesForQuadsIndicesCount = 0;
mutable bool mergedTrianglesAvailable = false;
mutable int mergedTrianglesIndicesCount = 0;
gpu::BufferPointer getTrianglesForQuads() const;
gpu::BufferPointer getMergedTriangles() const;
};
class FBXMaterial {
@ -153,7 +153,6 @@ public:
FBXTexture emissiveTexture;
bool needTangentSpace() const;
};
/// A single mesh (with optional blendshapes) extracted from an FBX document.

View file

@ -441,12 +441,12 @@ void FBXReader::buildModelMesh(ExtractedMesh& extracted, const QString& url) {
if (clusterIndicesSize) {
mesh.addAttribute(gpu::Stream::SKIN_CLUSTER_INDEX,
model::BufferView(attribBuffer, clusterIndicesOffset, clusterIndicesSize,
gpu::Element(gpu::VEC4, gpu::NFLOAT, gpu::XYZW)));
gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW)));
}
if (clusterWeightsSize) {
mesh.addAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT,
model::BufferView(attribBuffer, clusterWeightsOffset, clusterWeightsSize,
gpu::Element(gpu::VEC4, gpu::NFLOAT, gpu::XYZW)));
gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW)));
}

View file

@ -304,12 +304,16 @@ bool Batch::isSkyboxEnabled() const {
return _enableSkybox;
}
void Batch::setupNamedCalls(const std::string& instanceName, NamedBatchData::Function function) {
void Batch::setupNamedCalls(const std::string& instanceName, size_t count, NamedBatchData::Function function) {
NamedBatchData& instance = _namedData[instanceName];
++instance._count;
instance._count += count;
instance._function = function;
}
void Batch::setupNamedCalls(const std::string& instanceName, NamedBatchData::Function function) {
setupNamedCalls(instanceName, 1, function);
}
BufferPointer Batch::getNamedBuffer(const std::string& instanceName, uint8_t index) {
NamedBatchData& instance = _namedData[instanceName];
if (instance._buffers.size() <= index) {

View file

@ -62,8 +62,10 @@ public:
Function _function;
void process(Batch& batch) {
if (_function) {
_function(batch, *this);
}
}
};
using NamedBatchDataMap = std::map<std::string, NamedBatchData>;
@ -96,6 +98,7 @@ public:
void drawIndexedInstanced(uint32 nbInstances, Primitive primitiveType, uint32 nbIndices, uint32 startIndex = 0, uint32 startInstance = 0);
void setupNamedCalls(const std::string& instanceName, size_t count, NamedBatchData::Function function);
void setupNamedCalls(const std::string& instanceName, NamedBatchData::Function function);
BufferPointer getNamedBuffer(const std::string& instanceName, uint8_t index = 0);

View file

@ -0,0 +1,21 @@
//
// Created by Bradley Austin Davis on 2015/09/20
// Copyright 2013-2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Format.h"
using namespace gpu;
const Element Element::COLOR_RGBA_32{ VEC4, NUINT8, RGBA };
const Element Element::COLOR_RGBA{ VEC4, FLOAT, RGBA };
const Element Element::VEC2F_UV{ VEC2, FLOAT, UV };
const Element Element::VEC2F_XY{ VEC2, FLOAT, XY };
const Element Element::VEC3F_XYZ{ VEC3, FLOAT, XYZ };
const Element Element::VEC4F_XYZW{ VEC4, FLOAT, XYZW };
const Element Element::INDEX_UINT16{ SCALAR, UINT16, INDEX };
const Element Element::PART_DRAWCALL{ VEC4, UINT32, PART };

View file

@ -56,10 +56,8 @@ enum Type {
INT8,
UINT8,
NFLOAT,
NINT32,
NUINT32,
NHALF,
NINT16,
NUINT16,
NINT8,
@ -68,6 +66,7 @@ enum Type {
NUM_TYPES,
BOOL = UINT8,
NORMALIZED_START = NINT32,
};
// Array providing the size in bytes for a given scalar type
static const int TYPE_SIZE[NUM_TYPES] = {
@ -79,10 +78,10 @@ static const int TYPE_SIZE[NUM_TYPES] = {
2,
1,
1,
// normalized values
4,
4,
4,
2,
2,
2,
1,
@ -99,10 +98,9 @@ static const bool TYPE_IS_INTEGER[NUM_TYPES] = {
true,
true,
false,
// Normalized values
true,
true,
false,
true,
true,
true,
@ -151,6 +149,7 @@ enum Semantic {
RGB,
RGBA,
BGRA,
XY,
XYZ,
XYZW,
QUAT,
@ -199,7 +198,7 @@ public:
uint8 getLocationCount() const { return LOCATION_COUNT[(Dimension)_dimension]; }
Type getType() const { return (Type)_type; }
bool isNormalized() const { return (getType() >= NFLOAT); }
bool isNormalized() const { return (getType() >= NORMALIZED_START); }
bool isInteger() const { return TYPE_IS_INTEGER[getType()]; }
uint32 getSize() const { return DIMENSION_COUNT[_dimension] * TYPE_SIZE[_type]; }
@ -215,10 +214,14 @@ public:
}
static const Element COLOR_RGBA_32;
static const Element COLOR_RGBA;
static const Element VEC2F_UV;
static const Element VEC2F_XY;
static const Element VEC3F_XYZ;
static const Element VEC4F_XYZW;
static const Element INDEX_UINT16;
static const Element PART_DRAWCALL;
protected:
uint8 _semantic;
uint8 _dimension : 4;

View file

@ -127,7 +127,12 @@ void GLBackend::renderPassTransfer(Batch& batch) {
const size_t numCommands = batch.getCommands().size();
const Batch::Commands::value_type* command = batch.getCommands().data();
const Batch::CommandOffsets::value_type* offset = batch.getCommandOffsets().data();
for (auto& cached : batch._buffers._items) {
if (cached._data) {
syncGPUObject(*cached._data);
}
}
// Reset the transform buffers
_transform._cameras.resize(0);
_transform._cameraOffsets.clear();
@ -330,7 +335,7 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
uint32 startInstance = batch._params[paramOffset + 0]._uint;
GLenum glType = _elementTypeToGLType[_input._indexBufferType];
glDrawElementsInstanced(mode, numIndices, glType, nullptr, numInstances);
glDrawElementsInstanced(mode, numIndices, glType, reinterpret_cast<GLvoid*>(startIndex + _input._indexBufferOffset), numInstances);
(void)CHECK_GL_ERROR();
}

View file

@ -34,10 +34,9 @@ static const GLenum _elementTypeToGLType[gpu::NUM_TYPES] = {
GL_UNSIGNED_SHORT,
GL_BYTE,
GL_UNSIGNED_BYTE,
GL_FLOAT,
// Normalized values
GL_INT,
GL_UNSIGNED_INT,
GL_HALF_FLOAT,
GL_SHORT,
GL_UNSIGNED_SHORT,
GL_BYTE,

View file

@ -156,7 +156,6 @@ public:
texel.internalFormat = GL_DEPTH_COMPONENT32;
break;
}
case gpu::NFLOAT:
case gpu::FLOAT: {
texel.internalFormat = GL_DEPTH_COMPONENT32F;
break;
@ -165,8 +164,7 @@ public:
case gpu::INT16:
case gpu::NUINT16:
case gpu::NINT16:
case gpu::HALF:
case gpu::NHALF: {
case gpu::HALF: {
texel.internalFormat = GL_DEPTH_COMPONENT16;
break;
}

View file

@ -14,11 +14,6 @@
using namespace gpu;
const Element Element::COLOR_RGBA_32 = Element(VEC4, NUINT8, RGBA);
const Element Element::VEC3F_XYZ = Element(VEC3, FLOAT, XYZ);
const Element Element::INDEX_UINT16 = Element(SCALAR, UINT16, INDEX);
const Element Element::PART_DRAWCALL = Element(VEC4, UINT32, PART);
Resource::Size Resource::Sysmem::allocateMemory(Byte** dataAllocated, Size size) {
if ( !dataAllocated ) {
qWarning() << "Buffer::Sysmem::allocateMemory() : Must have a valid dataAllocated pointer.";

View file

@ -144,6 +144,11 @@ public:
return append(sizeof(t), reinterpret_cast<const Byte*>(&t));
}
template <typename T>
Size append(const std::vector<T>& t) {
return append(sizeof(T) * t.size(), reinterpret_cast<const Byte*>(&t[0]));
}
// Access the sysmem object.
const Sysmem& getSysmem() const { assert(_sysmem); return (*_sysmem); }
Sysmem& editSysmem() { assert(_sysmem); return (*_sysmem); }

View file

@ -15,6 +15,37 @@
using namespace gpu;
using ElementArray = std::array<Element, Stream::NUM_INPUT_SLOTS>;
const ElementArray& getDefaultElements() {
static ElementArray defaultElements{
//POSITION = 0,
Element::VEC3F_XYZ,
//NORMAL = 1,
Element::VEC3F_XYZ,
//COLOR = 2,
Element::COLOR_RGBA_32,
//TEXCOORD0 = 3,
Element::VEC2F_UV,
//TANGENT = 4,
Element::VEC3F_XYZ,
//SKIN_CLUSTER_INDEX = 5,
Element::VEC4F_XYZW,
//SKIN_CLUSTER_WEIGHT = 6,
Element::VEC4F_XYZW,
//TEXCOORD1 = 7,
Element::VEC2F_UV,
//INSTANCE_SCALE = 8,
Element::VEC3F_XYZ,
//INSTANCE_TRANSLATE = 9,
Element::VEC3F_XYZ,
//INSTANCE_XFM = 10,
// FIXME make a matrix element
Element::VEC4F_XYZW
};
return defaultElements;
}
void Stream::Format::evaluateCache() {
_channels.clear();
_elementTotalSize = 0;
@ -34,6 +65,19 @@ bool Stream::Format::setAttribute(Slot slot, Slot channel, Element element, Offs
return true;
}
bool Stream::Format::setAttribute(Slot slot, Frequency frequency) {
_attributes[slot] = Attribute((InputSlot)slot, slot, getDefaultElements()[slot], 0, frequency);
evaluateCache();
return true;
}
bool Stream::Format::setAttribute(Slot slot, Slot channel, Frequency frequency) {
_attributes[slot] = Attribute((InputSlot)slot, channel, getDefaultElements()[slot], 0, frequency);
evaluateCache();
return true;
}
BufferStream::BufferStream() :
_buffers(),
_offsets(),

View file

@ -11,12 +11,14 @@
#ifndef hifi_gpu_Stream_h
#define hifi_gpu_Stream_h
#include <vector>
#include <map>
#include <array>
#include <assert.h>
#include "Resource.h"
#include "Format.h"
#include <vector>
#include <map>
namespace gpu {
@ -55,6 +57,8 @@ public:
// Every thing that is needed to detail a stream attribute and how to interpret it
class Attribute {
public:
Attribute() {}
Attribute(Slot slot, Slot channel, Element element, Offset offset = 0, Frequency frequency = PER_VERTEX) :
_slot(slot),
_channel(channel),
@ -62,21 +66,12 @@ public:
_offset(offset),
_frequency(frequency)
{}
Attribute() :
_slot(POSITION),
_channel(0),
_element(),
_offset(0),
_frequency(PER_VERTEX)
{}
Slot _slot; // Logical slot assigned to the attribute
Slot _channel; // index of the channel where to get the data from
Element _element;
Offset _offset;
uint32 _frequency;
Slot _slot{ POSITION }; // Logical slot assigned to the attribute
Slot _channel{ POSITION }; // index of the channel where to get the data from
Element _element{ Element::VEC3F_XYZ };
Offset _offset{ 0 };
uint32 _frequency{ PER_VERTEX };
// Size of the
uint32 getSize() const { return _element.getSize(); }
@ -113,6 +108,9 @@ public:
uint32 getElementTotalSize() const { return _elementTotalSize; }
bool setAttribute(Slot slot, Slot channel, Element element, Offset offset = 0, Frequency frequency = PER_VERTEX);
bool setAttribute(Slot slot, Frequency frequency = PER_VERTEX);
bool setAttribute(Slot slot, Slot channel, Frequency frequency = PER_VERTEX);
protected:
AttributeMap _attributes;

View file

@ -348,8 +348,8 @@ static NetworkMesh* buildNetworkMesh(const FBXMesh& mesh, const QUrl& textureBas
// need lightmap texcoord UV but doesn't have uv#1 so just reuse the same channel
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, channelNum - 1, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
}
if (mesh.clusterIndices.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::NFLOAT, gpu::XYZW));
if (mesh.clusterWeights.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::NFLOAT, gpu::XYZW));
if (mesh.clusterIndices.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
if (mesh.clusterWeights.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
}
else {
int colorsOffset = mesh.tangents.size() * sizeof(glm::vec3);
@ -381,8 +381,8 @@ static NetworkMesh* buildNetworkMesh(const FBXMesh& mesh, const QUrl& textureBas
if (mesh.tangents.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::TANGENT, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
if (mesh.colors.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::COLOR, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::RGB));
if (mesh.texCoords.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD, channelNum++, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
if (mesh.clusterIndices.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::NFLOAT, gpu::XYZW));
if (mesh.clusterWeights.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::NFLOAT, gpu::XYZW));
if (mesh.clusterIndices.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
if (mesh.clusterWeights.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
}
}

View file

@ -187,17 +187,6 @@ gpu::PipelinePointer DeferredLightingEffect::bindSimpleProgram(gpu::Batch& batch
return pipeline;
}
void DeferredLightingEffect::renderSolidSphere(gpu::Batch& batch, float radius, int slices, int stacks, const glm::vec4& color) {
bindSimpleProgram(batch);
DependencyManager::get<GeometryCache>()->renderSphere(batch, radius, slices, stacks, color);
}
void DeferredLightingEffect::renderWireSphere(gpu::Batch& batch, float radius, int slices, int stacks, const glm::vec4& color) {
bindSimpleProgram(batch);
DependencyManager::get<GeometryCache>()->renderSphere(batch, radius, slices, stacks, color, false);
}
uint32_t toCompactColor(const glm::vec4& color) {
uint32_t compactColor = ((int(color.x * 255.0f) & 0xFF)) |
((int(color.y * 255.0f) & 0xFF) << 8) |
@ -206,39 +195,103 @@ uint32_t toCompactColor(const glm::vec4& color) {
return compactColor;
}
void DeferredLightingEffect::renderSolidCubeInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec4& color) {
static const std::string INSTANCE_NAME = __FUNCTION__;
static const size_t TRANSFORM_BUFFER = 0;
static const size_t COLOR_BUFFER = 1;
{
gpu::BufferPointer instanceTransformBuffer = batch.getNamedBuffer(INSTANCE_NAME, TRANSFORM_BUFFER);
glm::mat4 xfmMat4;
instanceTransformBuffer->append(xfm.getMatrix(xfmMat4));
static const size_t INSTANCE_TRANSFORM_BUFFER = 0;
static const size_t INSTANCE_COLOR_BUFFER = 1;
gpu::BufferPointer instanceColorBuffer = batch.getNamedBuffer(INSTANCE_NAME, COLOR_BUFFER);
template <typename F>
void renderInstances(const std::string& name, gpu::Batch& batch, const Transform& transform, const glm::vec4& color, F f) {
{
gpu::BufferPointer instanceTransformBuffer = batch.getNamedBuffer(name, INSTANCE_TRANSFORM_BUFFER);
glm::mat4 glmTransform;
instanceTransformBuffer->append(transform.getMatrix(glmTransform));
gpu::BufferPointer instanceColorBuffer = batch.getNamedBuffer(name, INSTANCE_COLOR_BUFFER);
auto compactColor = toCompactColor(color);
instanceColorBuffer->append(compactColor);
}
batch.setupNamedCalls(INSTANCE_NAME, [=](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
auto pipeline = bindSimpleProgram(batch);
auto that = DependencyManager::get<DeferredLightingEffect>();
batch.setupNamedCalls(name, [=](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
auto pipeline = that->bindSimpleProgram(batch);
auto location = pipeline->getProgram()->getUniforms().findLocation("Instanced");
batch._glUniform1i(location, 1);
DependencyManager::get<GeometryCache>()->renderSolidCubeInstances(batch, data._count,
data._buffers[TRANSFORM_BUFFER], data._buffers[COLOR_BUFFER]);
f(batch, data);
batch._glUniform1i(location, 0);
});
}
void DeferredLightingEffect::renderSolidCube(gpu::Batch& batch, float size, const glm::vec4& color) {
bindSimpleProgram(batch);
DependencyManager::get<GeometryCache>()->renderSolidCube(batch, size, color);
void DeferredLightingEffect::renderSolidSphereInstance(gpu::Batch& batch, const Transform& transform, const glm::vec4& color) {
static const std::string INSTANCE_NAME = __FUNCTION__;
renderInstances(INSTANCE_NAME, batch, transform, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderShapeInstances(batch, GeometryCache::Sphere, data._count,
data._buffers[INSTANCE_TRANSFORM_BUFFER], data._buffers[INSTANCE_COLOR_BUFFER]);
});
}
void DeferredLightingEffect::renderWireCube(gpu::Batch& batch, float size, const glm::vec4& color) {
bindSimpleProgram(batch);
DependencyManager::get<GeometryCache>()->renderWireCube(batch, size, color);
void DeferredLightingEffect::renderWireSphereInstance(gpu::Batch& batch, const Transform& transform, const glm::vec4& color) {
static const std::string INSTANCE_NAME = __FUNCTION__;
renderInstances(INSTANCE_NAME, batch, transform, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderWireShapeInstances(batch, GeometryCache::Sphere, data._count,
data._buffers[INSTANCE_TRANSFORM_BUFFER], data._buffers[INSTANCE_COLOR_BUFFER]);
});
}
// Enable this in a debug build to cause 'box' entities to iterate through all the
// available shape types, both solid and wireframes
//#define DEBUG_SHAPES
void DeferredLightingEffect::renderSolidCubeInstance(gpu::Batch& batch, const Transform& transform, const glm::vec4& color) {
static const std::string INSTANCE_NAME = __FUNCTION__;
#ifdef DEBUG_SHAPES
static auto startTime = usecTimestampNow();
renderInstances(INSTANCE_NAME, batch, transform, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
auto usecs = usecTimestampNow();
usecs -= startTime;
auto msecs = usecs / USECS_PER_MSEC;
float seconds = msecs;
seconds /= MSECS_PER_SECOND;
float fractionalSeconds = seconds - floor(seconds);
int shapeIndex = (int)seconds;
// Every second we flip to the next shape.
static const int SHAPE_COUNT = 5;
GeometryCache::Shape shapes[SHAPE_COUNT] = {
GeometryCache::Cube,
GeometryCache::Tetrahedron,
GeometryCache::Sphere,
GeometryCache::Icosahedron,
GeometryCache::Line,
};
shapeIndex %= SHAPE_COUNT;
GeometryCache::Shape shape = shapes[shapeIndex];
// For the first half second for a given shape, show the wireframe, for the second half, show the solid.
if (fractionalSeconds > 0.5f) {
DependencyManager::get<GeometryCache>()->renderShapeInstances(batch, shape, data._count,
data._buffers[INSTANCE_TRANSFORM_BUFFER], data._buffers[INSTANCE_COLOR_BUFFER]);
} else {
DependencyManager::get<GeometryCache>()->renderWireShapeInstances(batch, shape, data._count,
data._buffers[INSTANCE_TRANSFORM_BUFFER], data._buffers[INSTANCE_COLOR_BUFFER]);
}
});
#else
renderInstances(INSTANCE_NAME, batch, transform, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderCubeInstances(batch, data._count,
data._buffers[INSTANCE_TRANSFORM_BUFFER], data._buffers[INSTANCE_COLOR_BUFFER]);
});
#endif
}
void DeferredLightingEffect::renderWireCubeInstance(gpu::Batch& batch, const Transform& transform, const glm::vec4& color) {
static const std::string INSTANCE_NAME = __FUNCTION__;
renderInstances(INSTANCE_NAME, batch, transform, color, [](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
DependencyManager::get<GeometryCache>()->renderWireCubeInstances(batch, data._count,
data._buffers[INSTANCE_TRANSFORM_BUFFER], data._buffers[INSTANCE_COLOR_BUFFER]);
});
}
void DeferredLightingEffect::renderQuad(gpu::Batch& batch, const glm::vec3& minCorner, const glm::vec3& maxCorner,
@ -546,8 +599,9 @@ void DeferredLightingEffect::render(RenderArgs* args) {
} else {
Transform model;
model.setTranslation(glm::vec3(light->getPosition().x, light->getPosition().y, light->getPosition().z));
batch.setModelTransform(model);
geometryCache->renderSphere(batch, expandedRadius, 32, 32, glm::vec4(1.0f, 1.0f, 1.0f, 1.0f));
batch.setModelTransform(model.postScale(expandedRadius));
batch._glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
geometryCache->renderSphere(batch);
}
}
}

View file

@ -40,24 +40,26 @@ public:
gpu::PipelinePointer bindSimpleProgram(gpu::Batch& batch, bool textured = false, bool culled = true,
bool emmisive = false, bool depthBias = false);
/// Sets up the state necessary to render static untextured geometry with the simple program.
void bindInstanceProgram(gpu::Batch& batch, bool textured = false, bool culled = true,
bool emmisive = false, bool depthBias = false);
void renderSolidSphereInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec4& color);
void renderSolidSphereInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec3& color) {
renderSolidSphereInstance(batch, xfm, glm::vec4(color, 1.0));
}
//// Renders a solid sphere with the simple program.
void renderSolidSphere(gpu::Batch& batch, float radius, int slices, int stacks, const glm::vec4& color);
void renderWireSphereInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec4& color);
void renderWireSphereInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec3& color) {
renderWireSphereInstance(batch, xfm, glm::vec4(color, 1.0));
}
//// Renders a wireframe sphere with the simple program.
void renderWireSphere(gpu::Batch& batch, float radius, int slices, int stacks, const glm::vec4& color);
//// Renders a solid cube using instancing. Transform should include scaling.
void renderSolidCubeInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec4& color);
void renderSolidCubeInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec3& color) {
renderSolidCubeInstance(batch, xfm, glm::vec4(color, 1.0));
}
//// Renders a solid cube with the simple program.
void renderSolidCube(gpu::Batch& batch, float size, const glm::vec4& color);
void renderWireCubeInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec4& color);
void renderWireCubeInstance(gpu::Batch& batch, const Transform& xfm, const glm::vec3& color) {
renderWireCubeInstance(batch, xfm, glm::vec4(color, 1.0));
}
//// Renders a wireframe cube with the simple program.
void renderWireCube(gpu::Batch& batch, float size, const glm::vec4& color);
//// Renders a quad with the simple program.
void renderQuad(gpu::Batch& batch, const glm::vec3& minCorner, const glm::vec3& maxCorner, const glm::vec4& color);

View file

@ -197,7 +197,6 @@ bool Environment::findCapsulePenetration(const glm::vec3& start, const glm::vec3
}
void Environment::renderAtmosphere(gpu::Batch& batch, ViewFrustum& viewFrustum, const EnvironmentData& data) {
glm::vec3 center = data.getAtmosphereCenter();
// transform the model transform to the center of our atmosphere
@ -252,5 +251,6 @@ void Environment::renderAtmosphere(gpu::Batch& batch, ViewFrustum& viewFrustum,
batch._glUniform1f(locations[G_LOCATION], -0.990f);
batch._glUniform1f(locations[G2_LOCATION], -0.990f * -0.990f);
DependencyManager::get<GeometryCache>()->renderSphere(batch,1.0f, 100, 50, glm::vec4(1.0f, 0.0f, 0.0f, 0.5f)); //Draw a unit sphere
batch._glColor4f(1.0f, 0.0f, 0.0f, 0.5f);
DependencyManager::get<GeometryCache>()->renderSphere(batch); //Draw a unit sphere
}

File diff suppressed because it is too large Load diff

View file

@ -14,6 +14,9 @@
#include "model-networking/ModelCache.h"
#include <array>
#include <QMap>
#include <QRunnable>
@ -115,33 +118,55 @@ inline uint qHash(const Vec4PairVec4Pair& v, uint seed) {
seed);
}
using VertexVector = std::vector<glm::vec3>;
using IndexVector = std::vector<uint16_t>;
/// Stores cached geometry.
class GeometryCache : public Dependency {
SINGLETON_DEPENDENCY
public:
enum Shape {
Line,
Triangle,
Quad,
Circle,
Cube,
Sphere,
Tetrahedron,
Octahetron,
Dodecahedron,
Icosahedron,
Torus,
Cone,
Cylinder,
NUM_SHAPES,
};
int allocateID() { return _nextID++; }
static const int UNKNOWN_ID;
gpu::BufferPointer getCubeVertices(float size);
void setupCubeVertices(gpu::Batch& batch, gpu::BufferPointer& verticesBuffer);
void renderShapeInstances(gpu::Batch& batch, Shape shape, size_t count, gpu::BufferPointer& transformBuffer, gpu::BufferPointer& colorBuffer);
void renderWireShapeInstances(gpu::Batch& batch, Shape shape, size_t count, gpu::BufferPointer& transformBuffer, gpu::BufferPointer& colorBuffer);
void renderShape(gpu::Batch& batch, Shape shape);
void renderWireShape(gpu::Batch& batch, Shape shape);
gpu::BufferPointer getSolidCubeIndices();
void renderCubeInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer transformBuffer, gpu::BufferPointer colorBuffer);
void renderWireCubeInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer transformBuffer, gpu::BufferPointer colorBuffer);
void renderCube(gpu::Batch& batch);
void renderWireCube(gpu::Batch& batch);
void renderSphere(gpu::Batch& batch, float radius, int slices, int stacks, const glm::vec3& color, bool solid = true, int id = UNKNOWN_ID)
{ renderSphere(batch, radius, slices, stacks, glm::vec4(color, 1.0f), solid, id); }
void renderSphere(gpu::Batch& batch, float radius, int slices, int stacks, const glm::vec4& color, bool solid = true, int id = UNKNOWN_ID);
void renderSphereInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer transformBuffer, gpu::BufferPointer colorBuffer);
void renderWireSphereInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer transformBuffer, gpu::BufferPointer colorBuffer);
void renderSphere(gpu::Batch& batch);
void renderWireSphere(gpu::Batch& batch);
void renderGrid(gpu::Batch& batch, int xDivisions, int yDivisions, const glm::vec4& color);
void renderGrid(gpu::Batch& batch, int x, int y, int width, int height, int rows, int cols, const glm::vec4& color, int id = UNKNOWN_ID);
void renderSolidCubeInstances(gpu::Batch& batch, size_t count, gpu::BufferPointer transformBuffer, gpu::BufferPointer colorBuffer);
void renderSolidCube(gpu::Batch& batch, float size, const glm::vec4& color);
void renderWireCube(gpu::Batch& batch, float size, const glm::vec4& color);
void renderBevelCornersRect(gpu::Batch& batch, int x, int y, int width, int height, int bevelDistance, const glm::vec4& color, int id = UNKNOWN_ID);
void renderUnitCube(gpu::Batch& batch);
void renderUnitQuad(gpu::Batch& batch, const glm::vec4& color = glm::vec4(1), int id = UNKNOWN_ID);
void renderQuad(gpu::Batch& batch, int x, int y, int width, int height, const glm::vec4& color, int id = UNKNOWN_ID)
@ -204,30 +229,47 @@ public:
void updateVertices(int id, const QVector<glm::vec3>& points, const QVector<glm::vec2>& texCoords, const glm::vec4& color);
void renderVertices(gpu::Batch& batch, gpu::Primitive primitiveType, int id);
/// Loads geometry from the specified URL.
/// \param fallback a fallback URL to load if the desired one is unavailable
/// \param delayLoad if true, don't load the geometry immediately; wait until load is first requested
QSharedPointer<NetworkGeometry> getGeometry(const QUrl& url, const QUrl& fallback = QUrl(), bool delayLoad = false);
/// Set a batch to the simple pipeline, returning the previous pipeline
void useSimpleDrawPipeline(gpu::Batch& batch, bool noBlend = false);
private:
GeometryCache();
virtual ~GeometryCache();
void buildShapes();
typedef QPair<int, int> IntPair;
typedef QPair<unsigned int, unsigned int> VerticesIndices;
struct ShapeData {
size_t _indexOffset{ 0 };
size_t _indexCount{ 0 };
size_t _wireIndexOffset{ 0 };
size_t _wireIndexCount{ 0 };
gpu::BufferView _positionView;
gpu::BufferView _normalView;
gpu::BufferPointer _indices;
void setupVertices(gpu::BufferPointer& vertexBuffer, const VertexVector& vertices);
void setupIndices(gpu::BufferPointer& indexBuffer, const IndexVector& indices, const IndexVector& wireIndices);
void setupBatch(gpu::Batch& batch) const;
void draw(gpu::Batch& batch) const;
void drawWire(gpu::Batch& batch) const;
void drawInstances(gpu::Batch& batch, size_t count) const;
void drawWireInstances(gpu::Batch& batch, size_t count) const;
};
using VShape = std::array<ShapeData, NUM_SHAPES>;
VShape _shapes;
gpu::PipelinePointer _standardDrawPipeline;
gpu::PipelinePointer _standardDrawPipelineNoBlend;
QHash<float, gpu::BufferPointer> _cubeVerticies;
QHash<Vec2Pair, gpu::BufferPointer> _cubeColors;
gpu::BufferPointer _wireCubeIndexBuffer;
QHash<float, gpu::BufferPointer> _solidCubeVertices;
QHash<Vec2Pair, gpu::BufferPointer> _solidCubeColors;
gpu::BufferPointer _solidCubeIndexBuffer;
gpu::BufferPointer _shapeVertices{ std::make_shared<gpu::Buffer>() };
gpu::BufferPointer _shapeIndices{ std::make_shared<gpu::Buffer>() };
class BatchItemDetails {
public:
@ -249,7 +291,7 @@ private:
QHash<IntPair, VerticesIndices> _coneVBOs;
int _nextID;
int _nextID{ 0 };
QHash<int, Vec3PairVec4Pair> _lastRegisteredQuad3DTexture;
QHash<Vec3PairVec4Pair, BatchItemDetails> _quad3DTextures;
@ -291,15 +333,7 @@ private:
QHash<int, Vec3Pair> _lastRegisteredAlternateGridBuffers;
QHash<Vec3Pair, gpu::BufferPointer> _gridColors;
QHash<Vec2Pair, gpu::BufferPointer> _sphereVertices;
QHash<int, gpu::BufferPointer> _registeredSphereVertices;
QHash<int, Vec2Pair> _lastRegisteredSphereVertices;
QHash<IntPair, gpu::BufferPointer> _sphereIndices;
QHash<int, gpu::BufferPointer> _registeredSphereIndices;
QHash<int, IntPair> _lastRegisteredSphereIndices;
QHash<Vec3Pair, gpu::BufferPointer> _sphereColors;
QHash<int, gpu::BufferPointer> _registeredSphereColors;
QHash<int, Vec3Pair> _lastRegisteredSphereColors;
QHash<QUrl, QWeakPointer<NetworkGeometry> > _networkGeometry;
};
#endif // hifi_GeometryCache_h

View file

@ -75,7 +75,6 @@ Model::Model(RigPointer rig, QObject* parent) :
_isVisible(true),
_blendNumber(0),
_appliedBlendNumber(0),
_calculatedMeshPartOffsetValid(false),
_calculatedMeshPartBoxesValid(false),
_calculatedMeshBoxesValid(false),
_calculatedMeshTrianglesValid(false),
@ -606,25 +605,6 @@ bool Model::convexHullContains(glm::vec3 point) {
return false;
}
void Model::recalculateMeshPartOffsets() {
if (!_calculatedMeshPartOffsetValid) {
const FBXGeometry& geometry = _geometry->getFBXGeometry();
int numberOfMeshes = geometry.meshes.size();
_calculatedMeshPartOffset.clear();
for (int i = 0; i < numberOfMeshes; i++) {
const FBXMesh& mesh = geometry.meshes.at(i);
qint64 partOffset = 0;
for (int j = 0; j < mesh.parts.size(); j++) {
const FBXMeshPart& part = mesh.parts.at(j);
_calculatedMeshPartOffset[QPair<int,int>(i, j)] = partOffset;
partOffset += part.quadIndices.size() * sizeof(int);
partOffset += part.triangleIndices.size() * sizeof(int);
}
}
_calculatedMeshPartOffsetValid = true;
}
}
// TODO: we seem to call this too often when things haven't actually changed... look into optimizing this
// Any script might trigger findRayIntersectionAgainstSubMeshes (and maybe convexHullContains), so these
// can occur multiple times. In addition, rendering does it's own ray picking in order to decide which
@ -641,8 +621,6 @@ void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
_calculatedMeshTriangles.clear();
_calculatedMeshTriangles.resize(numberOfMeshes);
_calculatedMeshPartBoxes.clear();
_calculatedMeshPartOffset.clear();
_calculatedMeshPartOffsetValid = false;
for (int i = 0; i < numberOfMeshes; i++) {
const FBXMesh& mesh = geometry.meshes.at(i);
Extents scaledMeshExtents = calculateScaledOffsetExtents(mesh.meshExtents);
@ -651,7 +629,6 @@ void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
if (pickAgainstTriangles) {
QVector<Triangle> thisMeshTriangles;
qint64 partOffset = 0;
for (int j = 0; j < mesh.parts.size(); j++) {
const FBXMeshPart& part = mesh.parts.at(j);
@ -737,15 +714,9 @@ void Model::recalculateMeshBoxes(bool pickAgainstTriangles) {
}
}
_calculatedMeshPartBoxes[QPair<int,int>(i, j)] = thisPartBounds;
_calculatedMeshPartOffset[QPair<int,int>(i, j)] = partOffset;
partOffset += part.quadIndices.size() * sizeof(int);
partOffset += part.triangleIndices.size() * sizeof(int);
}
_calculatedMeshTriangles[i] = thisMeshTriangles;
_calculatedMeshPartBoxesValid = true;
_calculatedMeshPartOffsetValid = true;
}
}
_calculatedMeshBoxesValid = true;
@ -1475,12 +1446,6 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, int shape
return; // bail asap
}
// We need to make sure we have valid offsets calculated before we can render
if (!_calculatedMeshPartOffsetValid) {
_mutex.lock();
recalculateMeshPartOffsets();
_mutex.unlock();
}
auto textureCache = DependencyManager::get<TextureCache>();
gpu::Batch& batch = *(args->_batch);
@ -1729,32 +1694,12 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, int shape
}
}
qint64 offset;
{
// FIXME_STUTTER: We should n't have any lock here
_mutex.lock();
offset = _calculatedMeshPartOffset[QPair<int,int>(meshIndex, partIndex)];
_mutex.unlock();
}
if (part.quadIndices.size() > 0) {
batch.setIndexBuffer(gpu::UINT32, part.getTrianglesForQuads(), 0);
batch.drawIndexed(gpu::TRIANGLES, part.trianglesForQuadsIndicesCount, 0);
offset += part.quadIndices.size() * sizeof(int);
batch.setIndexBuffer(gpu::UINT32, (networkMesh._indexBuffer), 0); // restore this in case there are triangles too
}
if (part.triangleIndices.size() > 0) {
batch.drawIndexed(gpu::TRIANGLES, part.triangleIndices.size(), offset);
offset += part.triangleIndices.size() * sizeof(int);
}
batch.setIndexBuffer(gpu::UINT32, part.getMergedTriangles(), 0);
batch.drawIndexed(gpu::TRIANGLES, part.mergedTrianglesIndicesCount, 0);
if (args) {
const int INDICES_PER_TRIANGLE = 3;
const int INDICES_PER_QUAD = 4;
args->_details._trianglesRendered += part.triangleIndices.size() / INDICES_PER_TRIANGLE;
args->_details._quadsRendered += part.quadIndices.size() / INDICES_PER_QUAD;
args->_details._trianglesRendered += part.mergedTrianglesIndicesCount / INDICES_PER_TRIANGLE;
}
}

View file

@ -353,9 +353,6 @@ private:
};
QHash<QPair<int,int>, AABox> _calculatedMeshPartBoxes; // world coordinate AABoxes for all sub mesh part boxes
QHash<QPair<int,int>, qint64> _calculatedMeshPartOffset;
bool _calculatedMeshPartOffsetValid;
bool _calculatedMeshPartBoxesValid;
QVector<AABox> _calculatedMeshBoxes; // world coordinate AABoxes for all sub mesh boxes
@ -366,7 +363,6 @@ private:
QMutex _mutex;
void recalculateMeshBoxes(bool pickAgainstTriangles = false);
void recalculateMeshPartOffsets();
void segregateMeshGroups(); // used to calculate our list of translucent vs opaque meshes

View file

@ -43,7 +43,6 @@ public:
int _materialSwitches = 0;
int _trianglesRendered = 0;
int _quadsRendered = 0;
Item _opaque;
Item _translucent;

View file

@ -10,15 +10,22 @@
#include <unordered_map>
#include <memory>
#include <cstdio>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <QApplication>
#include <QDir>
#include <QElapsedTimer>
#include <QFile>
#include <QImage>
#include <QLoggingCategory>
#include <QtCore/QTime>
#include <QtCore/QTimer>
#include <QtCore/QDir>
#include <QtCore/QElapsedTimer>
#include <QtCore/QFile>
#include <QtCore/QLoggingCategory>
#include <QtGui/QResizeEvent>
#include <QtGui/QWindow>
#include <QtGui/QGuiApplication>
#include <QtGui/QImage>
#include <gpu/Context.h>
#include <gpu/Batch.h>
@ -26,17 +33,17 @@
#include <gpu/StandardShaderLib.h>
#include <gpu/GLBackend.h>
#include <QOpenGLContext>
#include <QResizeEvent>
#include <QTime>
#include <QTimer>
#include <QWindow>
#include <cstdio>
// Must come after GL headers
#include <QtGui/QOpenGLContext>
#include <GLMHelpers.h>
#include <PathUtils.h>
#include <GeometryCache.h>
#include <DeferredLightingEffect.h>
#include <NumericalConstants.h>
#include "simple_frag.h"
#include "simple_vert.h"
#include "unlit_frag.h"
#include "unlit_vert.h"
class RateCounter {
std::vector<float> times;
@ -74,173 +81,7 @@ public:
}
};
#define MOVE_PARAM(name) decltype(name) && name
struct BasicModel {
gpu::PipelinePointer pipeline;
// gpu::BufferPointer vertexBuffer;
// gpu::BufferPointer indexBuffer;
// gpu::BufferPointer normalBuffer;
gpu::BufferView vertices;
gpu::BufferView normals;
gpu::BufferPointer indices;
gpu::Stream::FormatPointer format;
BasicModel (MOVE_PARAM(pipeline), MOVE_PARAM(vertices), MOVE_PARAM(normals), MOVE_PARAM(indices), MOVE_PARAM(format))
: pipeline(pipeline), vertices(vertices), normals(normals), indices(indices), format(format) {}
// BasicModel (gpu::PipelinePointer && pipeline, gpu::BufferPointer && buffer, gpu::Stream::FormatPointer && format)
// : pipeline(pipeline), buffer(buffer), format(format) {}
};
typedef std::shared_ptr<BasicModel> BasicModelPointer;
#undef MOVE_PARAM
BasicModelPointer makeCube () {
// Axis-aligned cube, facing the user at +z
// coords == binary mapping of each index, with z inverted (front face faces camera,
// instead of away from the camera)
//
// -x,+y,-z ----------- +x,+y,-z
// ___--- | ___--- |
// -x,+y,+z --------- +x,+y,+z |
// | | | |
// | | | |
// | | | |
// | | | |
// | -x,-y,-z ------|---- +x,-y,-z
// | ___--- | ___----
// -x,-y,+z --------- +x,-y,+z
//
float s = 1.0f;
const glm::vec3 raw_verts[8] = {
// x, y, z
{ -s, -s, +s }, // 0b000 0x0
{ +s, -s, +s }, // 0b001 0x1
{ -s, +s, +s }, // 0b010 0x2
{ +s, +s, +s }, // 0b011 0x3
{ -s, -s, -s }, // 0b100 0x4
{ +s, -s, -s }, // 0b101 0x5
{ -s, +s, -s }, // 0b110 0x6
{ +s, +s, -s } // 0b111 0x7
};
const glm::vec3 raw_normals[6] = {
{ 0.0f, 0.0f, +1.0f }, // x > 0: 1, 3, 5, 7 (N 0)
{ 0.0f, 0.0f, -1.0f }, // x < 0: 0, 2, 4, 6 (N 1)
{ 0.0f, +1.0f, 0.0f }, // y > 0: 2, 3, 6, 7 (N 2)
{ 0.0f, -1.0f, 0.0f }, // y < 0: 0, 1, 4, 5 (N 3)
{ +1.0f, 0.0f, 0.0f }, // z > 0: 0, 1, 2, 3 (N 4)
{ -1.0f, 0.0f, 0.0f } // z < 0: 4, 5, 6, 7 (N 5)
};
const glm::vec3 cube_verts[24] = {
raw_verts[1], raw_verts[3], raw_verts[5], raw_verts[7],
raw_verts[0], raw_verts[2], raw_verts[4], raw_verts[6],
raw_verts[2], raw_verts[3], raw_verts[6], raw_verts[7],
raw_verts[0], raw_verts[1], raw_verts[4], raw_verts[5],
raw_verts[0], raw_verts[1], raw_verts[2], raw_verts[3],
raw_verts[4], raw_verts[5], raw_verts[6], raw_verts[7]
};
const glm::vec3 cube_normals[24] = {
raw_normals[0], raw_normals[0], raw_normals[0], raw_normals[0],
raw_normals[1], raw_normals[1], raw_normals[1], raw_normals[1],
raw_normals[2], raw_normals[2], raw_normals[2], raw_normals[2],
raw_normals[3], raw_normals[3], raw_normals[3], raw_normals[3],
raw_normals[4], raw_normals[4], raw_normals[4], raw_normals[4],
raw_normals[5], raw_normals[5], raw_normals[5], raw_normals[5]
};
int16_t cube_indices_tris[36];
for (int i = 0, k = 0; i < 36; k += 4) {
cube_indices_tris[i++] = k + 0;
cube_indices_tris[i++] = k + 3;
cube_indices_tris[i++] = k + 1;
cube_indices_tris[i++] = k + 0;
cube_indices_tris[i++] = k + 2;
cube_indices_tris[i++] = k + 3;
}
// const int16_t cube_indices_tris[36] {
// 0, 3, 1, 0, 2, 3,
// };
// const glm::vec3 cube_normals[] = {
// { 0.0f, 0.0f, 1.0f },
// { 0.0f, 0.0f, 1.0f },
// { 0.0f, 0.0f, 1.0f },
// { 0.0f, 0.0f, 1.0f },
// { -1.0f, 0.0f, 0.0f },
// { -1.0f, 0.0f, 0.0f },
// { -1.0f, 0.0f, 0.0f },
// { -1.0f, 0.0f, 0.0f },
// };
// const int16_t cube_indices[] = {
// 3, 1, 0, 2, 3, 0,
// 6, 2, 0, 4, 6, 0,
// };
gpu::Stream::FormatPointer format = std::make_shared<gpu::Stream::Format>();
assert(gpu::Stream::POSITION == 0 && gpu::Stream::NORMAL == 1);
const int BUFFER_SLOT = 0;
format->setAttribute(gpu::Stream::POSITION, BUFFER_SLOT, gpu::Element::VEC3F_XYZ);
format->setAttribute(gpu::Stream::NORMAL, BUFFER_SLOT, gpu::Element::VEC3F_XYZ);
auto vertexBuffer = std::make_shared<gpu::Buffer>(24 * sizeof(glm::vec3), (gpu::Byte*)cube_verts);
auto normalBuffer = std::make_shared<gpu::Buffer>(24 * sizeof(glm::vec3), (gpu::Byte*)cube_normals);
gpu::BufferPointer indexBuffer = std::make_shared<gpu::Buffer>(36 * sizeof(int16_t), (gpu::Byte*)cube_indices_tris);
auto positionElement = format->getAttributes().at(gpu::Stream::POSITION)._element;
auto normalElement = format->getAttributes().at(gpu::Stream::NORMAL)._element;
gpu::BufferView vertexView { vertexBuffer, positionElement };
gpu::BufferView normalView { normalBuffer, normalElement };
// Create shaders
auto vs = gpu::ShaderPointer(gpu::Shader::createVertex({ simple_vert }));
auto fs = gpu::ShaderPointer(gpu::Shader::createPixel({ simple_frag }));
auto shader = gpu::ShaderPointer(gpu::Shader::createProgram(vs, fs));
gpu::Shader::BindingSet bindings;
bindings.insert({ "lightPosition", 1 });
if (!gpu::Shader::makeProgram(*shader, bindings)) {
printf("Could not compile shader\n");
if (!vs)
printf("bad vertex shader\n");
if (!fs)
printf("bad fragment shader\n");
if (!shader)
printf("bad shader program\n");
exit(-1);
}
auto state = std::make_shared<gpu::State>();
// state->setAntialiasedLineEnable(true);
state->setMultisampleEnable(true);
state->setDepthTest({ true });
auto pipeline = gpu::PipelinePointer(gpu::Pipeline::create(shader, state));
return std::make_shared<BasicModel>(
std::move(pipeline),
std::move(vertexView),
std::move(normalView),
std::move(indexBuffer),
std::move(format)
);
}
void renderCube(gpu::Batch & batch, const BasicModel & cube) {
batch.setPipeline(cube.pipeline);
batch.setInputFormat(cube.format);
batch.setInputBuffer(gpu::Stream::POSITION, cube.vertices);
batch.setInputBuffer(gpu::Stream::NORMAL, cube.normals);
batch.setIndexBuffer(gpu::INT16, cube.indices, 0);
// batch.drawIndexed(gpu::TRIANGLES, 12);
batch.draw(gpu::TRIANGLES, 24);
}
uint32_t toCompactColor(const glm::vec4& color);
gpu::ShaderPointer makeShader(const std::string & vertexShaderSrc, const std::string & fragmentShaderSrc, const gpu::Shader::BindingSet & bindings) {
auto vs = gpu::ShaderPointer(gpu::Shader::createVertex(vertexShaderSrc));
@ -253,6 +94,14 @@ gpu::ShaderPointer makeShader(const std::string & vertexShaderSrc, const std::st
return shader;
}
float getSeconds(quint64 start = 0) {
auto usecs = usecTimestampNow() - start;
auto msecs = usecs / USECS_PER_MSEC;
float seconds = (float)msecs / MSECS_PER_SECOND;
return seconds;
}
// Creates an OpenGL window that renders a simple unlit scene using the gpu library and GeometryCache
// Should eventually get refactored into something that supports multiple gpu backends.
@ -265,9 +114,9 @@ class QTestWindow : public QWindow {
gpu::ContextPointer _context;
gpu::PipelinePointer _pipeline;
glm::mat4 _projectionMatrix;
// BasicModelPointer _cubeModel;
RateCounter fps;
QTime _time;
int _instanceLocation{ -1 };
protected:
void renderText();
@ -288,6 +137,7 @@ public:
format.setVersion(4, 1);
format.setProfile(QSurfaceFormat::OpenGLContextProfile::CoreProfile);
format.setOption(QSurfaceFormat::DebugContext);
format.setSwapInterval(0);
setFormat(format);
@ -301,23 +151,21 @@ public:
gpu::Context::init<gpu::GLBackend>();
_context = std::make_shared<gpu::Context>();
auto shader = makeShader(simple_vert, simple_frag, gpu::Shader::BindingSet {});
auto shader = makeShader(unlit_vert, unlit_frag, gpu::Shader::BindingSet{});
auto state = std::make_shared<gpu::State>();
state->setMultisampleEnable(true);
state->setDepthTest(gpu::State::DepthTest { true });
_pipeline = gpu::PipelinePointer(gpu::Pipeline::create(shader, state));
_instanceLocation = _pipeline->getProgram()->getUniforms().findLocation("Instanced");
// Clear screen
gpu::Batch batch;
batch.clearColorFramebuffer(gpu::Framebuffer::BUFFER_COLORS, { 1.0, 0.0, 0.5, 1.0 });
_context->render(batch);
// _cubeModel = makeCube();
DependencyManager::set<GeometryCache>();
DependencyManager::set<DeferredLightingEffect>();
setFramePosition(QPoint(-1000, 0));
resize(QSize(800, 600));
_time.start();
@ -327,6 +175,8 @@ public:
}
void draw() {
static auto startTime = usecTimestampNow();
if (!isVisible()) {
return;
}
@ -342,37 +192,81 @@ public:
glm::vec3 unitscale { 1.0f };
glm::vec3 up { 0.0f, 1.0f, 0.0f };
glm::vec3 cam_pos { 1.5f * sinf(t), 0.0f, 2.0f };
// glm::vec3 camera_focus { 5.0f * cosf(t * 0.1f), 0.0f, 0.0f };
glm::vec3 camera_focus { 0.0f, 0.0f, 0.0f };
glm::quat cam_rotation;
// glm::quat cam_rotation = glm::quat_cast(glm::lookAt(cam_pos, camera_focus, up));
// cam_rotation.w = -cam_rotation.w;
// printf("cam rotation: %f %f %f %f\n", cam_rotation.x, cam_rotation.y, cam_rotation.z, cam_rotation.w);
Transform cam_transform { cam_rotation, unitscale, cam_pos };
batch.setViewTransform(cam_transform);
glm::vec3 camera_position { 1.5f * sinf(t), 0.0f, 1.5f * cos(t) };
static const vec3 camera_focus(0);
static const vec3 camera_up(0, 1, 0);
glm::mat4 camera = glm::inverse(glm::lookAt(camera_position, camera_focus, up));
batch.setViewTransform(camera);
batch.setPipeline(_pipeline);
batch.setModelTransform(Transform());
auto geometryCache = DependencyManager::get<GeometryCache>();
// Render grid on xz plane (not the optimal way to do things, but w/e)
// Note: GeometryCache::renderGrid will *not* work, as it is apparenly unaffected by batch rotations and renders xy only
batch.setModelTransform(Transform());
static const std::string GRID_INSTANCE = "Grid";
static auto compactColor1 = toCompactColor(vec4{ 0.35f, 0.25f, 0.15f, 1.0f });
static auto compactColor2 = toCompactColor(vec4{ 0.15f, 0.25f, 0.35f, 1.0f });
auto transformBuffer = batch.getNamedBuffer(GRID_INSTANCE, 0);
auto colorBuffer = batch.getNamedBuffer(GRID_INSTANCE, 1);
for (int i = 0; i < 100; ++i) {
geometryCache->renderLine(batch, { -100.0f, -1.0f, -50.0f + float(i) }, { 100.0f, -1.0f, -50.0f + float(i) }, { 0.35f, 0.25f, 0.15f, 1.0f });
{
glm::mat4 transform = glm::translate(mat4(), vec3(0, -1, -50 + i));
transform = glm::scale(transform, vec3(100, 1, 1));
transformBuffer->append(transform);
colorBuffer->append(compactColor1);
}
{
glm::mat4 transform = glm::mat4_cast(quat(vec3(0, PI / 2.0f, 0)));
transform = glm::translate(transform, vec3(0, -1, -50 + i));
transform = glm::scale(transform, vec3(100, 1, 1));
transformBuffer->append(transform);
colorBuffer->append(compactColor2);
}
}
for (int i = 0; i < 100; ++i) {
geometryCache->renderLine(batch, { -50.0f + float(i), -1.0f, -100.0f}, { -50.0f + float(i), -1.0f, 100.0f }, { 0.15f, 0.25f, 0.35f, 1.0f });
}
batch.setupNamedCalls(GRID_INSTANCE, 200, [=](gpu::Batch& batch, gpu::Batch::NamedBatchData& data) {
batch.setViewTransform(camera);
batch.setModelTransform(Transform());
batch.setPipeline(_pipeline);
auto& xfm = data._buffers[0];
auto& color = data._buffers[1];
batch._glUniform1i(_instanceLocation, 1);
geometryCache->renderWireShapeInstances(batch, GeometryCache::Line, data._count, xfm, color);
batch._glUniform1i(_instanceLocation, 0);
});
// Render unlit cube + sphere
geometryCache->renderUnitCube(batch);
geometryCache->renderWireCube(batch, 1.0f, { 0.4f, 0.4f, 0.7f, 1.0f });
batch.setModelTransform(Transform().setTranslation({ 1.5f, -0.5f, -0.5f }));
geometryCache->renderSphere(batch, 0.5f, 50, 50, { 0.8f, 0.25f, 0.25f });
static GeometryCache::Shape SHAPE[] = {
GeometryCache::Cube,
GeometryCache::Sphere,
GeometryCache::Tetrahedron,
GeometryCache::Icosahedron,
};
static auto startUsecs = usecTimestampNow();
float seconds = getSeconds(startUsecs);
seconds /= 4.0;
int shapeIndex = ((int)seconds) % 4;
bool wire = seconds - floor(seconds) > 0.5f;
batch.setModelTransform(Transform());
batch._glColor4f(0.8f, 0.25f, 0.25f, 1.0f);
if (wire) {
geometryCache->renderWireShape(batch, SHAPE[shapeIndex]);
} else {
geometryCache->renderShape(batch, SHAPE[shapeIndex]);
}
batch.setModelTransform(Transform().setScale(1.05f));
batch._glColor4f(1, 1, 1, 1);
geometryCache->renderWireCube(batch);
_context->render(batch);
_qGlContext->swapBuffers(this);

View file

@ -20,7 +20,6 @@ in vec3 _normal;
in vec3 _color;
void main(void) {
Material material = getMaterial();
packDeferredFragment(
normalize(_normal.xyz),
glowIntensity,

View file

@ -19,6 +19,7 @@
<$declareStandardTransform()$>
// the interpolated normal
uniform bool Instanced = false;
out vec3 _normal;
out vec3 _color;
@ -31,6 +32,12 @@ void main(void) {
// standard transform
TransformCamera cam = getTransformCamera();
TransformObject obj = getTransformObject();
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
if (Instanced) {
<$transformInstancedModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformInstancedModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
} else {
<$transformModelToClipPos(cam, obj, inPosition, gl_Position)$>
<$transformModelToEyeDir(cam, obj, inNormal.xyz, _normal)$>
}
_normal = vec3(0.0, 0.0, 1.0);
}