mirror of
https://github.com/overte-org/overte.git
synced 2025-04-21 09:24:00 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into audio-src
This commit is contained in:
commit
630abda35f
64 changed files with 3817 additions and 2904 deletions
|
@ -113,7 +113,7 @@ endif()
|
|||
target_link_libraries(${TARGET_NAME} ${BULLET_LIBRARIES})
|
||||
|
||||
# link required hifi libraries
|
||||
link_hifi_libraries(shared octree environment gpu gpu-networking procedural model render fbx networking entities avatars
|
||||
link_hifi_libraries(shared octree environment gpu procedural model render fbx networking model-networking entities avatars
|
||||
audio audio-client animation script-engine physics
|
||||
render-utils entities-renderer ui auto-updater
|
||||
plugins display-plugins input-plugins)
|
||||
|
|
BIN
interface/resources/images/NormalsFittingTexture.dds
Normal file
BIN
interface/resources/images/NormalsFittingTexture.dds
Normal file
Binary file not shown.
|
@ -278,6 +278,7 @@ bool setupEssentials(int& argc, char** argv) {
|
|||
auto addressManager = DependencyManager::set<AddressManager>();
|
||||
auto nodeList = DependencyManager::set<NodeList>(NodeType::Agent, listenPort);
|
||||
auto geometryCache = DependencyManager::set<GeometryCache>();
|
||||
auto modelCache = DependencyManager::set<ModelCache>();
|
||||
auto scriptCache = DependencyManager::set<ScriptCache>();
|
||||
auto soundCache = DependencyManager::set<SoundCache>();
|
||||
auto faceshift = DependencyManager::set<Faceshift>();
|
||||
|
@ -418,12 +419,12 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
|
|||
// put the NodeList and datagram processing on the node thread
|
||||
nodeList->moveToThread(nodeThread);
|
||||
|
||||
// geometry background downloads need to happen on the Datagram Processor Thread. The idle loop will
|
||||
// emit checkBackgroundDownloads to cause the GeometryCache to check it's queue for requested background
|
||||
// Model background downloads need to happen on the Datagram Processor Thread. The idle loop will
|
||||
// emit checkBackgroundDownloads to cause the ModelCache to check it's queue for requested background
|
||||
// downloads.
|
||||
QSharedPointer<GeometryCache> geometryCacheP = DependencyManager::get<GeometryCache>();
|
||||
ResourceCache* geometryCache = geometryCacheP.data();
|
||||
connect(this, &Application::checkBackgroundDownloads, geometryCache, &ResourceCache::checkAsynchronousGets);
|
||||
QSharedPointer<ModelCache> modelCacheP = DependencyManager::get<ModelCache>();
|
||||
ResourceCache* modelCache = modelCacheP.data();
|
||||
connect(this, &Application::checkBackgroundDownloads, modelCache, &ResourceCache::checkAsynchronousGets);
|
||||
|
||||
// put the audio processing on a separate thread
|
||||
QThread* audioThread = new QThread();
|
||||
|
@ -892,6 +893,7 @@ Application::~Application() {
|
|||
DependencyManager::destroy<AnimationCache>();
|
||||
DependencyManager::destroy<FramebufferCache>();
|
||||
DependencyManager::destroy<TextureCache>();
|
||||
DependencyManager::destroy<ModelCache>();
|
||||
DependencyManager::destroy<GeometryCache>();
|
||||
DependencyManager::destroy<ScriptCache>();
|
||||
DependencyManager::destroy<SoundCache>();
|
||||
|
@ -1120,34 +1122,39 @@ void Application::paintGL() {
|
|||
// The render mode is default or mirror if the camera is in mirror mode, assigned further below
|
||||
renderArgs._renderMode = RenderArgs::DEFAULT_RENDER_MODE;
|
||||
|
||||
// Always use the default eye position, not the actual head eye position.
|
||||
// Using the latter will cause the camera to wobble with idle animations,
|
||||
// or with changes from the face tracker
|
||||
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||
// Always use the default eye position, not the actual head eye position.
|
||||
// Using the latter will cause the camera to wobble with idle animations,
|
||||
// or with changes from the face tracker
|
||||
renderArgs._renderMode = RenderArgs::DEFAULT_RENDER_MODE;
|
||||
|
||||
if (!getActiveDisplayPlugin()->isHmd()) {
|
||||
_myCamera.setPosition(_myAvatar->getDefaultEyePosition());
|
||||
_myCamera.setRotation(_myAvatar->getHead()->getCameraOrientation());
|
||||
} else {
|
||||
if (isHMDMode()) {
|
||||
mat4 camMat = _myAvatar->getSensorToWorldMatrix() * _myAvatar->getHMDSensorMatrix();
|
||||
_myCamera.setPosition(extractTranslation(camMat));
|
||||
_myCamera.setRotation(glm::quat_cast(camMat));
|
||||
} else {
|
||||
_myCamera.setPosition(_myAvatar->getDefaultEyePosition());
|
||||
_myCamera.setRotation(_myAvatar->getHead()->getCameraOrientation());
|
||||
}
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||
if (isHMDMode()) {
|
||||
_myCamera.setRotation(_myAvatar->getWorldAlignedOrientation());
|
||||
glm::quat hmdRotation = extractRotation(_myAvatar->getHMDSensorMatrix());
|
||||
_myCamera.setRotation(_myAvatar->getWorldAlignedOrientation() * hmdRotation);
|
||||
// Ignore MenuOption::CenterPlayerInView in HMD view
|
||||
glm::vec3 hmdOffset = extractTranslation(_myAvatar->getHMDSensorMatrix());
|
||||
_myCamera.setPosition(_myAvatar->getDefaultEyePosition()
|
||||
+ _myAvatar->getOrientation()
|
||||
* (_myAvatar->getScale() * _myAvatar->getBoomLength() * glm::vec3(0.0f, 0.0f, 1.0f) + hmdOffset));
|
||||
} else {
|
||||
_myCamera.setRotation(_myAvatar->getHead()->getOrientation());
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CenterPlayerInView)) {
|
||||
_myCamera.setPosition(_myAvatar->getDefaultEyePosition()
|
||||
+ _myCamera.getRotation()
|
||||
* (_myAvatar->getScale() * _myAvatar->getBoomLength() * glm::vec3(0.0f, 0.0f, 1.0f)));
|
||||
} else {
|
||||
_myCamera.setPosition(_myAvatar->getDefaultEyePosition()
|
||||
+ _myAvatar->getOrientation()
|
||||
* (_myAvatar->getScale() * _myAvatar->getBoomLength() * glm::vec3(0.0f, 0.0f, 1.0f)));
|
||||
}
|
||||
}
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::CenterPlayerInView)) {
|
||||
_myCamera.setPosition(_myAvatar->getDefaultEyePosition() +
|
||||
_myCamera.getRotation() * glm::vec3(0.0f, 0.0f, 1.0f) * _myAvatar->getBoomLength() * _myAvatar->getScale());
|
||||
} else {
|
||||
_myCamera.setPosition(_myAvatar->getDefaultEyePosition() +
|
||||
_myAvatar->getOrientation() * glm::vec3(0.0f, 0.0f, 1.0f) * _myAvatar->getBoomLength() * _myAvatar->getScale());
|
||||
}
|
||||
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
if (isHMDMode()) {
|
||||
glm::quat hmdRotation = extractRotation(_myAvatar->getHMDSensorMatrix());
|
||||
|
@ -2727,7 +2734,7 @@ void Application::reloadResourceCaches() {
|
|||
emptyLocalCache();
|
||||
|
||||
DependencyManager::get<AnimationCache>()->refreshAll();
|
||||
DependencyManager::get<GeometryCache>()->refreshAll();
|
||||
DependencyManager::get<ModelCache>()->refreshAll();
|
||||
DependencyManager::get<SoundCache>()->refreshAll();
|
||||
DependencyManager::get<TextureCache>()->refreshAll();
|
||||
}
|
||||
|
@ -3514,7 +3521,7 @@ namespace render {
|
|||
|
||||
skybox = skyStage->getSkybox();
|
||||
if (skybox) {
|
||||
model::Skybox::render(batch, *(Application::getInstance()->getDisplayViewFrustum()), *skybox);
|
||||
skybox->render(batch, *(Application::getInstance()->getDisplayViewFrustum()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -340,25 +340,23 @@ void ModelPackager::populateBasicMapping(QVariantHash& mapping, QString filename
|
|||
|
||||
void ModelPackager::listTextures() {
|
||||
_textures.clear();
|
||||
foreach (FBXMesh mesh, _geometry->meshes) {
|
||||
foreach (FBXMeshPart part, mesh.parts) {
|
||||
if (!part.diffuseTexture.filename.isEmpty() && part.diffuseTexture.content.isEmpty() &&
|
||||
!_textures.contains(part.diffuseTexture.filename)) {
|
||||
_textures << part.diffuseTexture.filename;
|
||||
}
|
||||
if (!part.normalTexture.filename.isEmpty() && part.normalTexture.content.isEmpty() &&
|
||||
!_textures.contains(part.normalTexture.filename)) {
|
||||
foreach (const FBXMaterial mat, _geometry->materials) {
|
||||
if (!mat.diffuseTexture.filename.isEmpty() && mat.diffuseTexture.content.isEmpty() &&
|
||||
!_textures.contains(mat.diffuseTexture.filename)) {
|
||||
_textures << mat.diffuseTexture.filename;
|
||||
}
|
||||
if (!mat.normalTexture.filename.isEmpty() && mat.normalTexture.content.isEmpty() &&
|
||||
!_textures.contains(mat.normalTexture.filename)) {
|
||||
|
||||
_textures << part.normalTexture.filename;
|
||||
}
|
||||
if (!part.specularTexture.filename.isEmpty() && part.specularTexture.content.isEmpty() &&
|
||||
!_textures.contains(part.specularTexture.filename)) {
|
||||
_textures << part.specularTexture.filename;
|
||||
}
|
||||
if (!part.emissiveTexture.filename.isEmpty() && part.emissiveTexture.content.isEmpty() &&
|
||||
!_textures.contains(part.emissiveTexture.filename)) {
|
||||
_textures << part.emissiveTexture.filename;
|
||||
}
|
||||
_textures << mat.normalTexture.filename;
|
||||
}
|
||||
if (!mat.specularTexture.filename.isEmpty() && mat.specularTexture.content.isEmpty() &&
|
||||
!_textures.contains(mat.specularTexture.filename)) {
|
||||
_textures << mat.specularTexture.filename;
|
||||
}
|
||||
if (!mat.emissiveTexture.filename.isEmpty() && mat.emissiveTexture.content.isEmpty() &&
|
||||
!_textures.contains(mat.emissiveTexture.filename)) {
|
||||
_textures << mat.emissiveTexture.filename;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,8 +188,10 @@ void Stars::render(RenderArgs* renderArgs, float alpha) {
|
|||
colorElement = streamFormat->getAttributes().at(gpu::Stream::COLOR)._element;
|
||||
});
|
||||
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
auto modelCache = DependencyManager::get<ModelCache>();
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
auto geometryCache = DependencyManager::get<GeometryCache>();
|
||||
|
||||
|
||||
gpu::Batch& batch = *renderArgs->_batch;
|
||||
batch.setViewTransform(Transform());
|
||||
|
|
|
@ -643,7 +643,7 @@ void Avatar::renderBillboard(RenderArgs* renderArgs) {
|
|||
// Using a unique URL ensures we don't get another avatar's texture from TextureCache
|
||||
QUrl uniqueUrl = QUrl(QUuid::createUuid().toString());
|
||||
_billboardTexture = DependencyManager::get<TextureCache>()->getTexture(
|
||||
uniqueUrl, DEFAULT_TEXTURE, false, _billboard);
|
||||
uniqueUrl, DEFAULT_TEXTURE, _billboard);
|
||||
}
|
||||
if (!_billboardTexture || !_billboardTexture->isLoaded()) {
|
||||
return;
|
||||
|
|
|
@ -56,7 +56,7 @@ CachesSizeDialog::CachesSizeDialog(QWidget* parent) :
|
|||
|
||||
void CachesSizeDialog::confirmClicked(bool checked) {
|
||||
DependencyManager::get<AnimationCache>()->setUnusedResourceCacheSize(_animations->value() * BYTES_PER_MEGABYTES);
|
||||
DependencyManager::get<GeometryCache>()->setUnusedResourceCacheSize(_geometries->value() * BYTES_PER_MEGABYTES);
|
||||
DependencyManager::get<ModelCache>()->setUnusedResourceCacheSize(_geometries->value() * BYTES_PER_MEGABYTES);
|
||||
DependencyManager::get<SoundCache>()->setUnusedResourceCacheSize(_sounds->value() * BYTES_PER_MEGABYTES);
|
||||
DependencyManager::get<TextureCache>()->setUnusedResourceCacheSize(_textures->value() * BYTES_PER_MEGABYTES);
|
||||
|
||||
|
@ -65,7 +65,7 @@ void CachesSizeDialog::confirmClicked(bool checked) {
|
|||
|
||||
void CachesSizeDialog::resetClicked(bool checked) {
|
||||
_animations->setValue(DependencyManager::get<AnimationCache>()->getUnusedResourceCacheSize() / BYTES_PER_MEGABYTES);
|
||||
_geometries->setValue(DependencyManager::get<GeometryCache>()->getUnusedResourceCacheSize() / BYTES_PER_MEGABYTES);
|
||||
_geometries->setValue(DependencyManager::get<ModelCache>()->getUnusedResourceCacheSize() / BYTES_PER_MEGABYTES);
|
||||
_sounds->setValue(DependencyManager::get<SoundCache>()->getUnusedResourceCacheSize() / BYTES_PER_MEGABYTES);
|
||||
_textures->setValue(DependencyManager::get<TextureCache>()->getUnusedResourceCacheSize() / BYTES_PER_MEGABYTES);
|
||||
}
|
||||
|
|
|
@ -26,4 +26,4 @@ find_package(PolyVox REQUIRED)
|
|||
target_include_directories(${TARGET_NAME} SYSTEM PUBLIC ${POLYVOX_INCLUDE_DIRS})
|
||||
target_link_libraries(${TARGET_NAME} ${POLYVOX_LIBRARIES})
|
||||
|
||||
link_hifi_libraries(shared gpu gpu-networking procedural script-engine render render-utils)
|
||||
link_hifi_libraries(shared gpu procedural model model-networking script-engine render render-utils)
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include <PerfStat.h>
|
||||
#include <SceneScriptingInterface.h>
|
||||
#include <ScriptEngine.h>
|
||||
#include <procedural/Procedural.h>
|
||||
#include <procedural/ProceduralSkybox.h>
|
||||
#include <TextureCache.h>
|
||||
|
||||
#include "EntityTreeRenderer.h"
|
||||
|
@ -294,16 +294,16 @@ void EntityTreeRenderer::applyZonePropertiesToScene(std::shared_ptr<ZoneEntityIt
|
|||
_viewState->endOverrideEnvironmentData();
|
||||
auto stage = scene->getSkyStage();
|
||||
if (zone->getBackgroundMode() == BACKGROUND_MODE_SKYBOX) {
|
||||
auto skybox = stage->getSkybox();
|
||||
auto skybox = std::dynamic_pointer_cast<ProceduralSkybox>(stage->getSkybox());
|
||||
skybox->setColor(zone->getSkyboxProperties().getColorVec3());
|
||||
static QString userData;
|
||||
if (userData != zone->getUserData()) {
|
||||
userData = zone->getUserData();
|
||||
QSharedPointer<Procedural> procedural(new Procedural(userData));
|
||||
ProceduralPointer procedural(new Procedural(userData));
|
||||
if (procedural->_enabled) {
|
||||
skybox->setProcedural(procedural);
|
||||
} else {
|
||||
skybox->setProcedural(QSharedPointer<Procedural>());
|
||||
skybox->setProcedural(ProceduralPointer());
|
||||
}
|
||||
}
|
||||
if (zone->getSkyboxProperties().getURL().isEmpty()) {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -112,6 +112,10 @@ public:
|
|||
Transform transform;
|
||||
int texcoordSet;
|
||||
QString texcoordSetName;
|
||||
|
||||
bool isBumpmap{ false };
|
||||
|
||||
bool isNull() const { return name.isEmpty() && filename.isEmpty() && content.isEmpty(); }
|
||||
};
|
||||
|
||||
/// A single part of a mesh (with the same material).
|
||||
|
@ -122,24 +126,33 @@ public:
|
|||
QVector<int> triangleIndices; // original indices from the FBX mesh
|
||||
mutable gpu::BufferPointer mergedTrianglesIndicesBuffer; // both the quads and the triangles merged into a single set of triangles
|
||||
|
||||
QString materialID;
|
||||
|
||||
mutable bool mergedTrianglesAvailable = false;
|
||||
mutable int mergedTrianglesIndicesCount = 0;
|
||||
|
||||
gpu::BufferPointer getMergedTriangles() const;
|
||||
};
|
||||
|
||||
class FBXMaterial {
|
||||
public:
|
||||
glm::vec3 diffuseColor;
|
||||
glm::vec3 specularColor;
|
||||
glm::vec3 emissiveColor;
|
||||
glm::vec2 emissiveParams;
|
||||
float shininess;
|
||||
float opacity;
|
||||
|
||||
|
||||
QString materialID;
|
||||
model::MaterialPointer _material;
|
||||
|
||||
FBXTexture diffuseTexture;
|
||||
FBXTexture opacityTexture;
|
||||
FBXTexture normalTexture;
|
||||
FBXTexture specularTexture;
|
||||
FBXTexture emissiveTexture;
|
||||
|
||||
QString materialID;
|
||||
model::MaterialPointer _material;
|
||||
mutable bool mergedTrianglesAvailable = false;
|
||||
mutable int mergedTrianglesIndicesCount = 0;
|
||||
|
||||
gpu::BufferPointer getMergedTriangles() const;
|
||||
bool needTangentSpace() const;
|
||||
};
|
||||
|
||||
/// A single mesh (with optional blendshapes) extracted from an FBX document.
|
||||
|
@ -165,9 +178,6 @@ public:
|
|||
bool isEye;
|
||||
|
||||
QVector<FBXBlendshape> blendshapes;
|
||||
|
||||
bool hasSpecularTexture() const;
|
||||
bool hasEmissiveTexture() const;
|
||||
|
||||
unsigned int meshIndex; // the order the meshes appeared in the object file
|
||||
# if USE_MODEL_MESH
|
||||
|
@ -175,6 +185,16 @@ public:
|
|||
# endif
|
||||
};
|
||||
|
||||
class ExtractedMesh {
|
||||
public:
|
||||
FBXMesh mesh;
|
||||
QMultiHash<int, int> newIndices;
|
||||
QVector<QHash<int, int> > blendshapeIndexMaps;
|
||||
QVector<QPair<int, int> > partMaterialTextures;
|
||||
QHash<QString, int> texcoordSetMap;
|
||||
std::map<QString, int> texcoordSetMap2;
|
||||
};
|
||||
|
||||
/// A single animation frame extracted from an FBX document.
|
||||
class FBXAnimationFrame {
|
||||
public:
|
||||
|
@ -233,7 +253,9 @@ public:
|
|||
bool hasSkeletonJoints;
|
||||
|
||||
QVector<FBXMesh> meshes;
|
||||
|
||||
|
||||
QHash<QString, FBXMaterial> materials;
|
||||
|
||||
glm::mat4 offset;
|
||||
|
||||
int leftEyeJointIndex = -1;
|
||||
|
@ -291,4 +313,114 @@ FBXGeometry* readFBX(const QByteArray& model, const QVariantHash& mapping, const
|
|||
/// \exception QString if an error occurs in parsing
|
||||
FBXGeometry* readFBX(QIODevice* device, const QVariantHash& mapping, const QString& url = "", bool loadLightmaps = true, float lightmapLevel = 1.0f);
|
||||
|
||||
class TextureParam {
|
||||
public:
|
||||
glm::vec2 UVTranslation;
|
||||
glm::vec2 UVScaling;
|
||||
glm::vec4 cropping;
|
||||
QString UVSet;
|
||||
|
||||
glm::vec3 translation;
|
||||
glm::vec3 rotation;
|
||||
glm::vec3 scaling;
|
||||
uint8_t alphaSource;
|
||||
uint8_t currentTextureBlendMode;
|
||||
bool useMaterial;
|
||||
|
||||
template <typename T>
|
||||
bool assign(T& ref, const T& v) {
|
||||
if (ref == v) {
|
||||
return false;
|
||||
} else {
|
||||
ref = v;
|
||||
isDefault = false;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool isDefault;
|
||||
|
||||
TextureParam() :
|
||||
UVTranslation(0.0f),
|
||||
UVScaling(1.0f),
|
||||
cropping(0.0f),
|
||||
UVSet("map1"),
|
||||
translation(0.0f),
|
||||
rotation(0.0f),
|
||||
scaling(1.0f),
|
||||
alphaSource(0),
|
||||
currentTextureBlendMode(0),
|
||||
useMaterial(true),
|
||||
isDefault(true)
|
||||
{}
|
||||
|
||||
TextureParam(const TextureParam& src) :
|
||||
UVTranslation(src.UVTranslation),
|
||||
UVScaling(src.UVScaling),
|
||||
cropping(src.cropping),
|
||||
UVSet(src.UVSet),
|
||||
translation(src.translation),
|
||||
rotation(src.rotation),
|
||||
scaling(src.scaling),
|
||||
alphaSource(src.alphaSource),
|
||||
currentTextureBlendMode(src.currentTextureBlendMode),
|
||||
useMaterial(src.useMaterial),
|
||||
isDefault(src.isDefault)
|
||||
{}
|
||||
|
||||
};
|
||||
|
||||
class ExtractedMesh;
|
||||
|
||||
class FBXReader {
|
||||
public:
|
||||
FBXGeometry* _fbxGeometry;
|
||||
|
||||
FBXNode _fbxNode;
|
||||
static FBXNode parseFBX(QIODevice* device);
|
||||
|
||||
FBXGeometry* extractFBXGeometry(const QVariantHash& mapping, const QString& url);
|
||||
|
||||
ExtractedMesh extractMesh(const FBXNode& object, unsigned int& meshIndex);
|
||||
QHash<QString, ExtractedMesh> meshes;
|
||||
void buildModelMesh(ExtractedMesh& extracted, const QString& url);
|
||||
|
||||
FBXTexture getTexture(const QString& textureID);
|
||||
|
||||
QHash<QString, QString> _textureNames;
|
||||
QHash<QString, QByteArray> _textureFilenames;
|
||||
QHash<QByteArray, QByteArray> _textureContent;
|
||||
QHash<QString, TextureParam> _textureParams;
|
||||
|
||||
|
||||
QHash<QString, QString> diffuseTextures;
|
||||
QHash<QString, QString> bumpTextures;
|
||||
QHash<QString, QString> normalTextures;
|
||||
QHash<QString, QString> specularTextures;
|
||||
QHash<QString, QString> emissiveTextures;
|
||||
QHash<QString, QString> ambientTextures;
|
||||
|
||||
QHash<QString, FBXMaterial> _fbxMaterials;
|
||||
|
||||
void consolidateFBXMaterials();
|
||||
|
||||
bool _loadLightmaps = true;
|
||||
float _lightmapOffset = 0.0f;
|
||||
float _lightmapLevel;
|
||||
|
||||
QMultiHash<QString, QString> _connectionParentMap;
|
||||
QMultiHash<QString, QString> _connectionChildMap;
|
||||
|
||||
static glm::vec3 getVec3(const QVariantList& properties, int index);
|
||||
static QVector<glm::vec4> createVec4Vector(const QVector<double>& doubleVector);
|
||||
static QVector<glm::vec4> createVec4VectorRGBA(const QVector<double>& doubleVector, glm::vec4& average);
|
||||
static QVector<glm::vec3> createVec3Vector(const QVector<double>& doubleVector);
|
||||
static QVector<glm::vec2> createVec2Vector(const QVector<double>& doubleVector);
|
||||
static glm::mat4 createMat4(const QVector<double>& doubleVector);
|
||||
|
||||
static QVector<int> getIntVector(const FBXNode& node);
|
||||
static QVector<float> getFloatVector(const FBXNode& node);
|
||||
static QVector<double> getDoubleVector(const FBXNode& node);
|
||||
};
|
||||
|
||||
#endif // hifi_FBXReader_h
|
||||
|
|
152
libraries/fbx/src/FBXReader_Material.cpp
Normal file
152
libraries/fbx/src/FBXReader_Material.cpp
Normal file
|
@ -0,0 +1,152 @@
|
|||
//
|
||||
// FBXReader_Material.cpp
|
||||
// interface/src/fbx
|
||||
//
|
||||
// Created by Sam Gateau on 8/27/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <iostream>
|
||||
#include <QBuffer>
|
||||
#include <QDataStream>
|
||||
#include <QIODevice>
|
||||
#include <QStringList>
|
||||
#include <QTextStream>
|
||||
#include <QtDebug>
|
||||
#include <QtEndian>
|
||||
#include <QFileInfo>
|
||||
#include "FBXReader.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
bool FBXMaterial::needTangentSpace() const {
|
||||
return !normalTexture.isNull();
|
||||
}
|
||||
|
||||
FBXTexture FBXReader::getTexture(const QString& textureID) {
|
||||
FBXTexture texture;
|
||||
texture.filename = _textureFilenames.value(textureID);
|
||||
texture.name = _textureNames.value(textureID);
|
||||
texture.content = _textureContent.value(texture.filename);
|
||||
texture.transform.setIdentity();
|
||||
texture.texcoordSet = 0;
|
||||
if (_textureParams.contains(textureID)) {
|
||||
auto p = _textureParams.value(textureID);
|
||||
|
||||
texture.transform.setTranslation(p.translation);
|
||||
texture.transform.setRotation(glm::quat(glm::radians(p.rotation)));
|
||||
|
||||
auto scaling = p.scaling;
|
||||
// Protect from bad scaling which should never happen
|
||||
if (scaling.x == 0.0f) {
|
||||
scaling.x = 1.0f;
|
||||
}
|
||||
if (scaling.y == 0.0f) {
|
||||
scaling.y = 1.0f;
|
||||
}
|
||||
if (scaling.z == 0.0f) {
|
||||
scaling.z = 1.0f;
|
||||
}
|
||||
texture.transform.setScale(scaling);
|
||||
|
||||
if ((p.UVSet != "map1") && (p.UVSet != "UVSet0")) {
|
||||
texture.texcoordSet = 1;
|
||||
}
|
||||
texture.texcoordSetName = p.UVSet;
|
||||
}
|
||||
return texture;
|
||||
}
|
||||
|
||||
void FBXReader::consolidateFBXMaterials() {
|
||||
|
||||
// foreach (const QString& materialID, materials) {
|
||||
for (QHash<QString, FBXMaterial>::iterator it = _fbxMaterials.begin(); it != _fbxMaterials.end(); it++) {
|
||||
FBXMaterial& material = (*it);
|
||||
// the pure material associated with this part
|
||||
bool detectDifferentUVs = false;
|
||||
FBXTexture diffuseTexture;
|
||||
QString diffuseTextureID = diffuseTextures.value(material.materialID);
|
||||
if (!diffuseTextureID.isNull()) {
|
||||
diffuseTexture = getTexture(diffuseTextureID);
|
||||
|
||||
// FBX files generated by 3DSMax have an intermediate texture parent, apparently
|
||||
foreach (const QString& childTextureID, _connectionChildMap.values(diffuseTextureID)) {
|
||||
if (_textureFilenames.contains(childTextureID)) {
|
||||
diffuseTexture = getTexture(diffuseTextureID);
|
||||
}
|
||||
}
|
||||
|
||||
material.diffuseTexture = diffuseTexture;
|
||||
|
||||
detectDifferentUVs = (diffuseTexture.texcoordSet != 0) || (!diffuseTexture.transform.isIdentity());
|
||||
}
|
||||
|
||||
FBXTexture normalTexture;
|
||||
QString bumpTextureID = bumpTextures.value(material.materialID);
|
||||
QString normalTextureID = normalTextures.value(material.materialID);
|
||||
if (!normalTextureID.isNull()) {
|
||||
normalTexture = getTexture(normalTextureID);
|
||||
normalTexture.isBumpmap = false;
|
||||
|
||||
material.normalTexture = normalTexture;
|
||||
detectDifferentUVs |= (normalTexture.texcoordSet != 0) || (!normalTexture.transform.isIdentity());
|
||||
} else if (!bumpTextureID.isNull()) {
|
||||
normalTexture = getTexture(bumpTextureID);
|
||||
normalTexture.isBumpmap = true;
|
||||
|
||||
material.normalTexture = normalTexture;
|
||||
detectDifferentUVs |= (normalTexture.texcoordSet != 0) || (!normalTexture.transform.isIdentity());
|
||||
}
|
||||
|
||||
|
||||
FBXTexture specularTexture;
|
||||
QString specularTextureID = specularTextures.value(material.materialID);
|
||||
if (!specularTextureID.isNull()) {
|
||||
specularTexture = getTexture(specularTextureID);
|
||||
detectDifferentUVs |= (specularTexture.texcoordSet != 0) || (!specularTexture.transform.isIdentity());
|
||||
}
|
||||
|
||||
FBXTexture emissiveTexture;
|
||||
glm::vec2 emissiveParams(0.f, 1.f);
|
||||
emissiveParams.x = _lightmapOffset;
|
||||
emissiveParams.y = _lightmapLevel;
|
||||
|
||||
QString emissiveTextureID = emissiveTextures.value(material.materialID);
|
||||
QString ambientTextureID = ambientTextures.value(material.materialID);
|
||||
if (_loadLightmaps && (!emissiveTextureID.isNull() || !ambientTextureID.isNull())) {
|
||||
|
||||
if (!emissiveTextureID.isNull()) {
|
||||
emissiveTexture = getTexture(emissiveTextureID);
|
||||
emissiveParams.y = 4.0f;
|
||||
} else if (!ambientTextureID.isNull()) {
|
||||
emissiveTexture = getTexture(ambientTextureID);
|
||||
}
|
||||
|
||||
material.emissiveParams = emissiveParams;
|
||||
material.emissiveTexture = emissiveTexture;
|
||||
|
||||
detectDifferentUVs |= (emissiveTexture.texcoordSet != 0) || (!emissiveTexture.transform.isIdentity());
|
||||
}
|
||||
|
||||
// Finally create the true material representation
|
||||
material._material = std::make_shared<model::Material>();
|
||||
material._material->setEmissive(material.emissiveColor);
|
||||
if (glm::all(glm::equal(material.diffuseColor, glm::vec3(0.0f)))) {
|
||||
material._material->setDiffuse(material.diffuseColor);
|
||||
} else {
|
||||
material._material->setDiffuse(material.diffuseColor);
|
||||
}
|
||||
material._material->setMetallic(glm::length(material.specularColor));
|
||||
material._material->setGloss(material.shininess);
|
||||
|
||||
if (material.opacity <= 0.0f) {
|
||||
material._material->setOpacity(1.0f);
|
||||
} else {
|
||||
material._material->setOpacity(material.opacity);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
511
libraries/fbx/src/FBXReader_Mesh.cpp
Normal file
511
libraries/fbx/src/FBXReader_Mesh.cpp
Normal file
|
@ -0,0 +1,511 @@
|
|||
//
|
||||
// FBXReader_Mesh.cpp
|
||||
// interface/src/fbx
|
||||
//
|
||||
// Created by Sam Gateau on 8/27/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <iostream>
|
||||
#include <QBuffer>
|
||||
#include <QDataStream>
|
||||
#include <QIODevice>
|
||||
#include <QStringList>
|
||||
#include <QTextStream>
|
||||
#include <QtDebug>
|
||||
#include <QtEndian>
|
||||
#include <QFileInfo>
|
||||
#include <QHash>
|
||||
#include <LogHandler.h>
|
||||
#include "ModelFormatLogging.h"
|
||||
|
||||
#include "FBXReader.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
|
||||
class Vertex {
|
||||
public:
|
||||
int originalIndex;
|
||||
glm::vec2 texCoord;
|
||||
glm::vec2 texCoord1;
|
||||
};
|
||||
|
||||
uint qHash(const Vertex& vertex, uint seed = 0) {
|
||||
return qHash(vertex.originalIndex, seed);
|
||||
}
|
||||
|
||||
bool operator==(const Vertex& v1, const Vertex& v2) {
|
||||
return v1.originalIndex == v2.originalIndex && v1.texCoord == v2.texCoord && v1.texCoord1 == v2.texCoord1;
|
||||
}
|
||||
|
||||
class AttributeData {
|
||||
public:
|
||||
QVector<glm::vec2> texCoords;
|
||||
QVector<int> texCoordIndices;
|
||||
QString name;
|
||||
int index;
|
||||
};
|
||||
|
||||
class MeshData {
|
||||
public:
|
||||
ExtractedMesh extracted;
|
||||
QVector<glm::vec3> vertices;
|
||||
QVector<int> polygonIndices;
|
||||
bool normalsByVertex;
|
||||
QVector<glm::vec3> normals;
|
||||
QVector<int> normalIndices;
|
||||
|
||||
bool colorsByVertex;
|
||||
glm::vec4 averageColor{1.0f, 1.0f, 1.0f, 1.0f};
|
||||
QVector<glm::vec4> colors;
|
||||
QVector<int> colorIndices;
|
||||
|
||||
QVector<glm::vec2> texCoords;
|
||||
QVector<int> texCoordIndices;
|
||||
|
||||
QHash<Vertex, int> indices;
|
||||
|
||||
std::vector<AttributeData> attributes;
|
||||
};
|
||||
|
||||
|
||||
void appendIndex(MeshData& data, QVector<int>& indices, int index) {
|
||||
if (index >= data.polygonIndices.size()) {
|
||||
return;
|
||||
}
|
||||
int vertexIndex = data.polygonIndices.at(index);
|
||||
if (vertexIndex < 0) {
|
||||
vertexIndex = -vertexIndex - 1;
|
||||
}
|
||||
Vertex vertex;
|
||||
vertex.originalIndex = vertexIndex;
|
||||
|
||||
glm::vec3 position;
|
||||
if (vertexIndex < data.vertices.size()) {
|
||||
position = data.vertices.at(vertexIndex);
|
||||
}
|
||||
|
||||
glm::vec3 normal;
|
||||
int normalIndex = data.normalsByVertex ? vertexIndex : index;
|
||||
if (data.normalIndices.isEmpty()) {
|
||||
if (normalIndex < data.normals.size()) {
|
||||
normal = data.normals.at(normalIndex);
|
||||
}
|
||||
} else if (normalIndex < data.normalIndices.size()) {
|
||||
normalIndex = data.normalIndices.at(normalIndex);
|
||||
if (normalIndex >= 0 && normalIndex < data.normals.size()) {
|
||||
normal = data.normals.at(normalIndex);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
glm::vec4 color;
|
||||
bool hasColors = (data.colors.size() > 1);
|
||||
if (hasColors) {
|
||||
int colorIndex = data.colorsByVertex ? vertexIndex : index;
|
||||
if (data.colorIndices.isEmpty()) {
|
||||
if (colorIndex < data.colors.size()) {
|
||||
color = data.colors.at(colorIndex);
|
||||
}
|
||||
} else if (colorIndex < data.colorIndices.size()) {
|
||||
colorIndex = data.colorIndices.at(colorIndex);
|
||||
if (colorIndex >= 0 && colorIndex < data.colors.size()) {
|
||||
color = data.colors.at(colorIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (data.texCoordIndices.isEmpty()) {
|
||||
if (index < data.texCoords.size()) {
|
||||
vertex.texCoord = data.texCoords.at(index);
|
||||
}
|
||||
} else if (index < data.texCoordIndices.size()) {
|
||||
int texCoordIndex = data.texCoordIndices.at(index);
|
||||
if (texCoordIndex >= 0 && texCoordIndex < data.texCoords.size()) {
|
||||
vertex.texCoord = data.texCoords.at(texCoordIndex);
|
||||
}
|
||||
}
|
||||
|
||||
bool hasMoreTexcoords = (data.attributes.size() > 1);
|
||||
if (hasMoreTexcoords) {
|
||||
if (data.attributes[1].texCoordIndices.empty()) {
|
||||
if (index < data.attributes[1].texCoords.size()) {
|
||||
vertex.texCoord1 = data.attributes[1].texCoords.at(index);
|
||||
}
|
||||
} else if (index < data.attributes[1].texCoordIndices.size()) {
|
||||
int texCoordIndex = data.attributes[1].texCoordIndices.at(index);
|
||||
if (texCoordIndex >= 0 && texCoordIndex < data.attributes[1].texCoords.size()) {
|
||||
vertex.texCoord1 = data.attributes[1].texCoords.at(texCoordIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
QHash<Vertex, int>::const_iterator it = data.indices.find(vertex);
|
||||
if (it == data.indices.constEnd()) {
|
||||
int newIndex = data.extracted.mesh.vertices.size();
|
||||
indices.append(newIndex);
|
||||
data.indices.insert(vertex, newIndex);
|
||||
data.extracted.newIndices.insert(vertexIndex, newIndex);
|
||||
data.extracted.mesh.vertices.append(position);
|
||||
data.extracted.mesh.normals.append(normal);
|
||||
data.extracted.mesh.texCoords.append(vertex.texCoord);
|
||||
if (hasColors) {
|
||||
data.extracted.mesh.colors.append(glm::vec3(color));
|
||||
}
|
||||
if (hasMoreTexcoords) {
|
||||
data.extracted.mesh.texCoords1.append(vertex.texCoord1);
|
||||
}
|
||||
} else {
|
||||
indices.append(*it);
|
||||
data.extracted.mesh.normals[*it] += normal;
|
||||
}
|
||||
}
|
||||
|
||||
ExtractedMesh FBXReader::extractMesh(const FBXNode& object, unsigned int& meshIndex) {
|
||||
MeshData data;
|
||||
data.extracted.mesh.meshIndex = meshIndex++;
|
||||
QVector<int> materials;
|
||||
QVector<int> textures;
|
||||
bool isMaterialPerPolygon = false;
|
||||
|
||||
foreach (const FBXNode& child, object.children) {
|
||||
if (child.name == "Vertices") {
|
||||
data.vertices = createVec3Vector(getDoubleVector(child));
|
||||
|
||||
} else if (child.name == "PolygonVertexIndex") {
|
||||
data.polygonIndices = getIntVector(child);
|
||||
|
||||
} else if (child.name == "LayerElementNormal") {
|
||||
data.normalsByVertex = false;
|
||||
bool indexToDirect = false;
|
||||
foreach (const FBXNode& subdata, child.children) {
|
||||
if (subdata.name == "Normals") {
|
||||
data.normals = createVec3Vector(getDoubleVector(subdata));
|
||||
|
||||
} else if (subdata.name == "NormalsIndex") {
|
||||
data.normalIndices = getIntVector(subdata);
|
||||
|
||||
} else if (subdata.name == "MappingInformationType" && subdata.properties.at(0) == "ByVertice") {
|
||||
data.normalsByVertex = true;
|
||||
|
||||
} else if (subdata.name == "ReferenceInformationType" && subdata.properties.at(0) == "IndexToDirect") {
|
||||
indexToDirect = true;
|
||||
}
|
||||
}
|
||||
if (indexToDirect && data.normalIndices.isEmpty()) {
|
||||
// hack to work around wacky Makehuman exports
|
||||
data.normalsByVertex = true;
|
||||
}
|
||||
} else if (child.name == "LayerElementColor") {
|
||||
data.colorsByVertex = false;
|
||||
bool indexToDirect = false;
|
||||
foreach (const FBXNode& subdata, child.children) {
|
||||
if (subdata.name == "Colors") {
|
||||
data.colors = createVec4VectorRGBA(getDoubleVector(subdata), data.averageColor);
|
||||
} else if (subdata.name == "ColorsIndex") {
|
||||
data.colorIndices = getIntVector(subdata);
|
||||
|
||||
} else if (subdata.name == "MappingInformationType" && subdata.properties.at(0) == "ByVertice") {
|
||||
data.colorsByVertex = true;
|
||||
|
||||
} else if (subdata.name == "ReferenceInformationType" && subdata.properties.at(0) == "IndexToDirect") {
|
||||
indexToDirect = true;
|
||||
}
|
||||
}
|
||||
if (indexToDirect && data.normalIndices.isEmpty()) {
|
||||
// hack to work around wacky Makehuman exports
|
||||
data.colorsByVertex = true;
|
||||
}
|
||||
|
||||
#if defined(FBXREADER_KILL_BLACK_COLOR_ATTRIBUTE)
|
||||
// Potential feature where we decide to kill the color attribute is to dark?
|
||||
// Tested with the model:
|
||||
// https://hifi-public.s3.amazonaws.com/ryan/gardenLight2.fbx
|
||||
// let's check if we did have true data ?
|
||||
if (glm::all(glm::lessThanEqual(data.averageColor, glm::vec4(0.09f)))) {
|
||||
data.colors.clear();
|
||||
data.colorIndices.clear();
|
||||
data.colorsByVertex = false;
|
||||
qCDebug(modelformat) << "LayerElementColor has an average value of 0.0f... let's forget it.";
|
||||
}
|
||||
#endif
|
||||
|
||||
} else if (child.name == "LayerElementUV") {
|
||||
if (child.properties.at(0).toInt() == 0) {
|
||||
AttributeData attrib;
|
||||
attrib.index = child.properties.at(0).toInt();
|
||||
foreach (const FBXNode& subdata, child.children) {
|
||||
if (subdata.name == "UV") {
|
||||
data.texCoords = createVec2Vector(getDoubleVector(subdata));
|
||||
attrib.texCoords = createVec2Vector(getDoubleVector(subdata));
|
||||
} else if (subdata.name == "UVIndex") {
|
||||
data.texCoordIndices = getIntVector(subdata);
|
||||
attrib.texCoordIndices = getIntVector(subdata);
|
||||
} else if (subdata.name == "Name") {
|
||||
attrib.name = subdata.properties.at(0).toString();
|
||||
}
|
||||
#if defined(DEBUG_FBXREADER)
|
||||
else {
|
||||
int unknown = 0;
|
||||
QString subname = subdata.name.data();
|
||||
if ( (subdata.name == "Version")
|
||||
|| (subdata.name == "MappingInformationType")
|
||||
|| (subdata.name == "ReferenceInformationType") ) {
|
||||
} else {
|
||||
unknown++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
data.extracted.texcoordSetMap.insert(attrib.name, data.attributes.size());
|
||||
data.attributes.push_back(attrib);
|
||||
} else {
|
||||
AttributeData attrib;
|
||||
attrib.index = child.properties.at(0).toInt();
|
||||
foreach (const FBXNode& subdata, child.children) {
|
||||
if (subdata.name == "UV") {
|
||||
attrib.texCoords = createVec2Vector(getDoubleVector(subdata));
|
||||
} else if (subdata.name == "UVIndex") {
|
||||
attrib.texCoordIndices = getIntVector(subdata);
|
||||
} else if (subdata.name == "Name") {
|
||||
attrib.name = subdata.properties.at(0).toString();
|
||||
}
|
||||
#if defined(DEBUG_FBXREADER)
|
||||
else {
|
||||
int unknown = 0;
|
||||
QString subname = subdata.name.data();
|
||||
if ( (subdata.name == "Version")
|
||||
|| (subdata.name == "MappingInformationType")
|
||||
|| (subdata.name == "ReferenceInformationType") ) {
|
||||
} else {
|
||||
unknown++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
QHash<QString, int>::iterator it = data.extracted.texcoordSetMap.find(attrib.name);
|
||||
if (it == data.extracted.texcoordSetMap.end()) {
|
||||
data.extracted.texcoordSetMap.insert(attrib.name, data.attributes.size());
|
||||
data.attributes.push_back(attrib);
|
||||
} else {
|
||||
// WTF same names for different UVs?
|
||||
qCDebug(modelformat) << "LayerElementUV #" << attrib.index << " is reusing the same name as #" << (*it) << ". Skip this texcoord attribute.";
|
||||
}
|
||||
}
|
||||
} else if (child.name == "LayerElementMaterial") {
|
||||
foreach (const FBXNode& subdata, child.children) {
|
||||
if (subdata.name == "Materials") {
|
||||
materials = getIntVector(subdata);
|
||||
} else if (subdata.name == "MappingInformationType") {
|
||||
if (subdata.properties.at(0) == "ByPolygon")
|
||||
isMaterialPerPolygon = true;
|
||||
} else {
|
||||
isMaterialPerPolygon = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} else if (child.name == "LayerElementTexture") {
|
||||
foreach (const FBXNode& subdata, child.children) {
|
||||
if (subdata.name == "TextureId") {
|
||||
textures = getIntVector(subdata);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool isMultiMaterial = false;
|
||||
if (isMaterialPerPolygon) {
|
||||
isMultiMaterial = true;
|
||||
}
|
||||
// TODO: make excellent use of isMultiMaterial
|
||||
Q_UNUSED(isMultiMaterial);
|
||||
|
||||
// convert the polygons to quads and triangles
|
||||
int polygonIndex = 0;
|
||||
QHash<QPair<int, int>, int> materialTextureParts;
|
||||
for (int beginIndex = 0; beginIndex < data.polygonIndices.size(); polygonIndex++) {
|
||||
int endIndex = beginIndex;
|
||||
while (endIndex < data.polygonIndices.size() && data.polygonIndices.at(endIndex++) >= 0);
|
||||
|
||||
QPair<int, int> materialTexture((polygonIndex < materials.size()) ? materials.at(polygonIndex) : 0,
|
||||
(polygonIndex < textures.size()) ? textures.at(polygonIndex) : 0);
|
||||
int& partIndex = materialTextureParts[materialTexture];
|
||||
if (partIndex == 0) {
|
||||
data.extracted.partMaterialTextures.append(materialTexture);
|
||||
data.extracted.mesh.parts.resize(data.extracted.mesh.parts.size() + 1);
|
||||
partIndex = data.extracted.mesh.parts.size();
|
||||
}
|
||||
FBXMeshPart& part = data.extracted.mesh.parts[partIndex - 1];
|
||||
|
||||
if (endIndex - beginIndex == 4) {
|
||||
appendIndex(data, part.quadIndices, beginIndex++);
|
||||
appendIndex(data, part.quadIndices, beginIndex++);
|
||||
appendIndex(data, part.quadIndices, beginIndex++);
|
||||
appendIndex(data, part.quadIndices, beginIndex++);
|
||||
} else {
|
||||
for (int nextIndex = beginIndex + 1;; ) {
|
||||
appendIndex(data, part.triangleIndices, beginIndex);
|
||||
appendIndex(data, part.triangleIndices, nextIndex++);
|
||||
appendIndex(data, part.triangleIndices, nextIndex);
|
||||
if (nextIndex >= data.polygonIndices.size() || data.polygonIndices.at(nextIndex) < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
beginIndex = endIndex;
|
||||
}
|
||||
}
|
||||
|
||||
return data.extracted;
|
||||
}
|
||||
|
||||
|
||||
#if USE_MODEL_MESH
|
||||
void FBXReader::buildModelMesh(ExtractedMesh& extracted, const QString& url) {
|
||||
static QString repeatedMessage = LogHandler::getInstance().addRepeatedMessageRegex("buildModelMesh failed -- .*");
|
||||
|
||||
if (extracted.mesh.vertices.size() == 0) {
|
||||
extracted.mesh._mesh = model::Mesh();
|
||||
qCDebug(modelformat) << "buildModelMesh failed -- no vertices, url = " << url;
|
||||
return;
|
||||
}
|
||||
FBXMesh& fbxMesh = extracted.mesh;
|
||||
model::Mesh mesh;
|
||||
|
||||
// Grab the vertices in a buffer
|
||||
auto vb = std::make_shared<gpu::Buffer>();
|
||||
vb->setData(extracted.mesh.vertices.size() * sizeof(glm::vec3),
|
||||
(const gpu::Byte*) extracted.mesh.vertices.data());
|
||||
gpu::BufferView vbv(vb, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
mesh.setVertexBuffer(vbv);
|
||||
|
||||
// evaluate all attribute channels sizes
|
||||
int normalsSize = fbxMesh.normals.size() * sizeof(glm::vec3);
|
||||
int tangentsSize = fbxMesh.tangents.size() * sizeof(glm::vec3);
|
||||
int colorsSize = fbxMesh.colors.size() * sizeof(glm::vec3);
|
||||
int texCoordsSize = fbxMesh.texCoords.size() * sizeof(glm::vec2);
|
||||
int texCoords1Size = fbxMesh.texCoords1.size() * sizeof(glm::vec2);
|
||||
int clusterIndicesSize = fbxMesh.clusterIndices.size() * sizeof(glm::vec4);
|
||||
int clusterWeightsSize = fbxMesh.clusterWeights.size() * sizeof(glm::vec4);
|
||||
|
||||
int normalsOffset = 0;
|
||||
int tangentsOffset = normalsOffset + normalsSize;
|
||||
int colorsOffset = tangentsOffset + tangentsSize;
|
||||
int texCoordsOffset = colorsOffset + colorsSize;
|
||||
int texCoords1Offset = texCoordsOffset + texCoordsSize;
|
||||
int clusterIndicesOffset = texCoords1Offset + texCoords1Size;
|
||||
int clusterWeightsOffset = clusterIndicesOffset + clusterIndicesSize;
|
||||
int totalAttributeSize = clusterWeightsOffset + clusterWeightsSize;
|
||||
|
||||
// Copy all attribute data in a single attribute buffer
|
||||
auto attribBuffer = std::make_shared<gpu::Buffer>();
|
||||
attribBuffer->resize(totalAttributeSize);
|
||||
attribBuffer->setSubData(normalsOffset, normalsSize, (gpu::Byte*) fbxMesh.normals.constData());
|
||||
attribBuffer->setSubData(tangentsOffset, tangentsSize, (gpu::Byte*) fbxMesh.tangents.constData());
|
||||
attribBuffer->setSubData(colorsOffset, colorsSize, (gpu::Byte*) fbxMesh.colors.constData());
|
||||
attribBuffer->setSubData(texCoordsOffset, texCoordsSize, (gpu::Byte*) fbxMesh.texCoords.constData());
|
||||
attribBuffer->setSubData(texCoords1Offset, texCoords1Size, (gpu::Byte*) fbxMesh.texCoords1.constData());
|
||||
attribBuffer->setSubData(clusterIndicesOffset, clusterIndicesSize, (gpu::Byte*) fbxMesh.clusterIndices.constData());
|
||||
attribBuffer->setSubData(clusterWeightsOffset, clusterWeightsSize, (gpu::Byte*) fbxMesh.clusterWeights.constData());
|
||||
|
||||
if (normalsSize) {
|
||||
mesh.addAttribute(gpu::Stream::NORMAL,
|
||||
model::BufferView(attribBuffer, normalsOffset, normalsSize,
|
||||
gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ)));
|
||||
}
|
||||
if (tangentsSize) {
|
||||
mesh.addAttribute(gpu::Stream::TANGENT,
|
||||
model::BufferView(attribBuffer, tangentsOffset, tangentsSize,
|
||||
gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ)));
|
||||
}
|
||||
if (colorsSize) {
|
||||
mesh.addAttribute(gpu::Stream::COLOR,
|
||||
model::BufferView(attribBuffer, colorsOffset, colorsSize,
|
||||
gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::RGB)));
|
||||
}
|
||||
if (texCoordsSize) {
|
||||
mesh.addAttribute(gpu::Stream::TEXCOORD,
|
||||
model::BufferView( attribBuffer, texCoordsOffset, texCoordsSize,
|
||||
gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV)));
|
||||
}
|
||||
if (texCoords1Size) {
|
||||
mesh.addAttribute(gpu::Stream::TEXCOORD1,
|
||||
model::BufferView(attribBuffer, texCoords1Offset, texCoords1Size,
|
||||
gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV)));
|
||||
}
|
||||
if (clusterIndicesSize) {
|
||||
mesh.addAttribute(gpu::Stream::SKIN_CLUSTER_INDEX,
|
||||
model::BufferView(attribBuffer, clusterIndicesOffset, clusterIndicesSize,
|
||||
gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW)));
|
||||
}
|
||||
if (clusterWeightsSize) {
|
||||
mesh.addAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT,
|
||||
model::BufferView(attribBuffer, clusterWeightsOffset, clusterWeightsSize,
|
||||
gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW)));
|
||||
}
|
||||
|
||||
|
||||
unsigned int totalIndices = 0;
|
||||
|
||||
foreach(const FBXMeshPart& part, extracted.mesh.parts) {
|
||||
totalIndices += (part.quadIndices.size() + part.triangleIndices.size());
|
||||
}
|
||||
|
||||
if (! totalIndices) {
|
||||
extracted.mesh._mesh = model::Mesh();
|
||||
qCDebug(modelformat) << "buildModelMesh failed -- no indices, url = " << url;
|
||||
return;
|
||||
}
|
||||
|
||||
auto ib = std::make_shared<gpu::Buffer>();
|
||||
ib->resize(totalIndices * sizeof(int));
|
||||
|
||||
int indexNum = 0;
|
||||
int offset = 0;
|
||||
|
||||
std::vector< model::Mesh::Part > parts;
|
||||
|
||||
foreach(const FBXMeshPart& part, extracted.mesh.parts) {
|
||||
model::Mesh::Part quadPart(indexNum, part.quadIndices.size(), 0, model::Mesh::QUADS);
|
||||
if (quadPart._numIndices) {
|
||||
parts.push_back(quadPart);
|
||||
ib->setSubData(offset, part.quadIndices.size() * sizeof(int),
|
||||
(gpu::Byte*) part.quadIndices.constData());
|
||||
offset += part.quadIndices.size() * sizeof(int);
|
||||
indexNum += part.quadIndices.size();
|
||||
}
|
||||
model::Mesh::Part triPart(indexNum, part.triangleIndices.size(), 0, model::Mesh::TRIANGLES);
|
||||
if (triPart._numIndices) {
|
||||
ib->setSubData(offset, part.triangleIndices.size() * sizeof(int),
|
||||
(gpu::Byte*) part.triangleIndices.constData());
|
||||
offset += part.triangleIndices.size() * sizeof(int);
|
||||
indexNum += part.triangleIndices.size();
|
||||
}
|
||||
}
|
||||
|
||||
gpu::BufferView ibv(ib, gpu::Element(gpu::SCALAR, gpu::UINT32, gpu::XYZ));
|
||||
mesh.setIndexBuffer(ibv);
|
||||
|
||||
if (parts.size()) {
|
||||
auto pb = std::make_shared<gpu::Buffer>();
|
||||
pb->setData(parts.size() * sizeof(model::Mesh::Part), (const gpu::Byte*) parts.data());
|
||||
gpu::BufferView pbv(pb, gpu::Element(gpu::VEC4, gpu::UINT32, gpu::XYZW));
|
||||
mesh.setPartBuffer(pbv);
|
||||
} else {
|
||||
extracted.mesh._mesh = model::Mesh();
|
||||
qCDebug(modelformat) << "buildModelMesh failed -- no parts, url = " << url;
|
||||
return;
|
||||
}
|
||||
|
||||
// model::Box box =
|
||||
mesh.evalPartBound(0);
|
||||
|
||||
extracted.mesh._mesh = mesh;
|
||||
}
|
||||
#endif // USE_MODEL_MESH
|
||||
|
463
libraries/fbx/src/FBXReader_Node.cpp
Normal file
463
libraries/fbx/src/FBXReader_Node.cpp
Normal file
|
@ -0,0 +1,463 @@
|
|||
//
|
||||
// FBXReader_Node.cpp
|
||||
// interface/src/fbx
|
||||
//
|
||||
// Created by Sam Gateau on 8/27/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <iostream>
|
||||
#include <QBuffer>
|
||||
#include <QDataStream>
|
||||
#include <QIODevice>
|
||||
#include <QStringList>
|
||||
#include <QTextStream>
|
||||
#include <QtDebug>
|
||||
#include <QtEndian>
|
||||
#include <QFileInfo>
|
||||
#include "FBXReader.h"
|
||||
|
||||
template<class T> int streamSize() {
|
||||
return sizeof(T);
|
||||
}
|
||||
|
||||
template<bool> int streamSize() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
template<class T> QVariant readBinaryArray(QDataStream& in, int& position) {
|
||||
quint32 arrayLength;
|
||||
quint32 encoding;
|
||||
quint32 compressedLength;
|
||||
|
||||
in >> arrayLength;
|
||||
in >> encoding;
|
||||
in >> compressedLength;
|
||||
position += sizeof(quint32) * 3;
|
||||
|
||||
QVector<T> values;
|
||||
const unsigned int DEFLATE_ENCODING = 1;
|
||||
if (encoding == DEFLATE_ENCODING) {
|
||||
// preface encoded data with uncompressed length
|
||||
QByteArray compressed(sizeof(quint32) + compressedLength, 0);
|
||||
*((quint32*)compressed.data()) = qToBigEndian<quint32>(arrayLength * sizeof(T));
|
||||
in.readRawData(compressed.data() + sizeof(quint32), compressedLength);
|
||||
position += compressedLength;
|
||||
QByteArray uncompressed = qUncompress(compressed);
|
||||
QDataStream uncompressedIn(uncompressed);
|
||||
uncompressedIn.setByteOrder(QDataStream::LittleEndian);
|
||||
uncompressedIn.setVersion(QDataStream::Qt_4_5); // for single/double precision switch
|
||||
for (quint32 i = 0; i < arrayLength; i++) {
|
||||
T value;
|
||||
uncompressedIn >> value;
|
||||
values.append(value);
|
||||
}
|
||||
} else {
|
||||
for (quint32 i = 0; i < arrayLength; i++) {
|
||||
T value;
|
||||
in >> value;
|
||||
position += streamSize<T>();
|
||||
values.append(value);
|
||||
}
|
||||
}
|
||||
return QVariant::fromValue(values);
|
||||
}
|
||||
|
||||
QVariant parseBinaryFBXProperty(QDataStream& in, int& position) {
|
||||
char ch;
|
||||
in.device()->getChar(&ch);
|
||||
position++;
|
||||
switch (ch) {
|
||||
case 'Y': {
|
||||
qint16 value;
|
||||
in >> value;
|
||||
position += sizeof(qint16);
|
||||
return QVariant::fromValue(value);
|
||||
}
|
||||
case 'C': {
|
||||
bool value;
|
||||
in >> value;
|
||||
position++;
|
||||
return QVariant::fromValue(value);
|
||||
}
|
||||
case 'I': {
|
||||
qint32 value;
|
||||
in >> value;
|
||||
position += sizeof(qint32);
|
||||
return QVariant::fromValue(value);
|
||||
}
|
||||
case 'F': {
|
||||
float value;
|
||||
in >> value;
|
||||
position += sizeof(float);
|
||||
return QVariant::fromValue(value);
|
||||
}
|
||||
case 'D': {
|
||||
double value;
|
||||
in >> value;
|
||||
position += sizeof(double);
|
||||
return QVariant::fromValue(value);
|
||||
}
|
||||
case 'L': {
|
||||
qint64 value;
|
||||
in >> value;
|
||||
position += sizeof(qint64);
|
||||
return QVariant::fromValue(value);
|
||||
}
|
||||
case 'f': {
|
||||
return readBinaryArray<float>(in, position);
|
||||
}
|
||||
case 'd': {
|
||||
return readBinaryArray<double>(in, position);
|
||||
}
|
||||
case 'l': {
|
||||
return readBinaryArray<qint64>(in, position);
|
||||
}
|
||||
case 'i': {
|
||||
return readBinaryArray<qint32>(in, position);
|
||||
}
|
||||
case 'b': {
|
||||
return readBinaryArray<bool>(in, position);
|
||||
}
|
||||
case 'S':
|
||||
case 'R': {
|
||||
quint32 length;
|
||||
in >> length;
|
||||
position += sizeof(quint32) + length;
|
||||
return QVariant::fromValue(in.device()->read(length));
|
||||
}
|
||||
default:
|
||||
throw QString("Unknown property type: ") + ch;
|
||||
}
|
||||
}
|
||||
|
||||
FBXNode parseBinaryFBXNode(QDataStream& in, int& position) {
|
||||
qint32 endOffset;
|
||||
quint32 propertyCount;
|
||||
quint32 propertyListLength;
|
||||
quint8 nameLength;
|
||||
|
||||
in >> endOffset;
|
||||
in >> propertyCount;
|
||||
in >> propertyListLength;
|
||||
in >> nameLength;
|
||||
position += sizeof(quint32) * 3 + sizeof(quint8);
|
||||
|
||||
FBXNode node;
|
||||
const int MIN_VALID_OFFSET = 40;
|
||||
if (endOffset < MIN_VALID_OFFSET || nameLength == 0) {
|
||||
// use a null name to indicate a null node
|
||||
return node;
|
||||
}
|
||||
node.name = in.device()->read(nameLength);
|
||||
position += nameLength;
|
||||
|
||||
for (quint32 i = 0; i < propertyCount; i++) {
|
||||
node.properties.append(parseBinaryFBXProperty(in, position));
|
||||
}
|
||||
|
||||
while (endOffset > position) {
|
||||
FBXNode child = parseBinaryFBXNode(in, position);
|
||||
if (child.name.isNull()) {
|
||||
return node;
|
||||
|
||||
} else {
|
||||
node.children.append(child);
|
||||
}
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
class Tokenizer {
|
||||
public:
|
||||
|
||||
Tokenizer(QIODevice* device) : _device(device), _pushedBackToken(-1) { }
|
||||
|
||||
enum SpecialToken {
|
||||
NO_TOKEN = -1,
|
||||
NO_PUSHBACKED_TOKEN = -1,
|
||||
DATUM_TOKEN = 0x100
|
||||
};
|
||||
|
||||
int nextToken();
|
||||
const QByteArray& getDatum() const { return _datum; }
|
||||
|
||||
void pushBackToken(int token) { _pushedBackToken = token; }
|
||||
void ungetChar(char ch) { _device->ungetChar(ch); }
|
||||
|
||||
private:
|
||||
|
||||
QIODevice* _device;
|
||||
QByteArray _datum;
|
||||
int _pushedBackToken;
|
||||
};
|
||||
|
||||
int Tokenizer::nextToken() {
|
||||
if (_pushedBackToken != NO_PUSHBACKED_TOKEN) {
|
||||
int token = _pushedBackToken;
|
||||
_pushedBackToken = NO_PUSHBACKED_TOKEN;
|
||||
return token;
|
||||
}
|
||||
|
||||
char ch;
|
||||
while (_device->getChar(&ch)) {
|
||||
if (QChar(ch).isSpace()) {
|
||||
continue; // skip whitespace
|
||||
}
|
||||
switch (ch) {
|
||||
case ';':
|
||||
_device->readLine(); // skip the comment
|
||||
break;
|
||||
|
||||
case ':':
|
||||
case '{':
|
||||
case '}':
|
||||
case ',':
|
||||
return ch; // special punctuation
|
||||
|
||||
case '\"':
|
||||
_datum = "";
|
||||
while (_device->getChar(&ch)) {
|
||||
if (ch == '\"') { // end on closing quote
|
||||
break;
|
||||
}
|
||||
if (ch == '\\') { // handle escaped quotes
|
||||
if (_device->getChar(&ch) && ch != '\"') {
|
||||
_datum.append('\\');
|
||||
}
|
||||
}
|
||||
_datum.append(ch);
|
||||
}
|
||||
return DATUM_TOKEN;
|
||||
|
||||
default:
|
||||
_datum = "";
|
||||
_datum.append(ch);
|
||||
while (_device->getChar(&ch)) {
|
||||
if (QChar(ch).isSpace() || ch == ';' || ch == ':' || ch == '{' || ch == '}' || ch == ',' || ch == '\"') {
|
||||
ungetChar(ch); // read until we encounter a special character, then replace it
|
||||
break;
|
||||
}
|
||||
_datum.append(ch);
|
||||
}
|
||||
return DATUM_TOKEN;
|
||||
}
|
||||
}
|
||||
return NO_TOKEN;
|
||||
}
|
||||
|
||||
FBXNode parseTextFBXNode(Tokenizer& tokenizer) {
|
||||
FBXNode node;
|
||||
|
||||
if (tokenizer.nextToken() != Tokenizer::DATUM_TOKEN) {
|
||||
return node;
|
||||
}
|
||||
node.name = tokenizer.getDatum();
|
||||
|
||||
if (tokenizer.nextToken() != ':') {
|
||||
return node;
|
||||
}
|
||||
|
||||
int token;
|
||||
bool expectingDatum = true;
|
||||
while ((token = tokenizer.nextToken()) != Tokenizer::NO_TOKEN) {
|
||||
if (token == '{') {
|
||||
for (FBXNode child = parseTextFBXNode(tokenizer); !child.name.isNull(); child = parseTextFBXNode(tokenizer)) {
|
||||
node.children.append(child);
|
||||
}
|
||||
return node;
|
||||
}
|
||||
if (token == ',') {
|
||||
expectingDatum = true;
|
||||
|
||||
} else if (token == Tokenizer::DATUM_TOKEN && expectingDatum) {
|
||||
QByteArray datum = tokenizer.getDatum();
|
||||
if ((token = tokenizer.nextToken()) == ':') {
|
||||
tokenizer.ungetChar(':');
|
||||
tokenizer.pushBackToken(Tokenizer::DATUM_TOKEN);
|
||||
return node;
|
||||
|
||||
} else {
|
||||
tokenizer.pushBackToken(token);
|
||||
node.properties.append(datum);
|
||||
expectingDatum = false;
|
||||
}
|
||||
} else {
|
||||
tokenizer.pushBackToken(token);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
FBXNode FBXReader::parseFBX(QIODevice* device) {
|
||||
// verify the prolog
|
||||
const QByteArray BINARY_PROLOG = "Kaydara FBX Binary ";
|
||||
if (device->peek(BINARY_PROLOG.size()) != BINARY_PROLOG) {
|
||||
// parse as a text file
|
||||
FBXNode top;
|
||||
Tokenizer tokenizer(device);
|
||||
while (device->bytesAvailable()) {
|
||||
FBXNode next = parseTextFBXNode(tokenizer);
|
||||
if (next.name.isNull()) {
|
||||
return top;
|
||||
|
||||
} else {
|
||||
top.children.append(next);
|
||||
}
|
||||
}
|
||||
return top;
|
||||
}
|
||||
QDataStream in(device);
|
||||
in.setByteOrder(QDataStream::LittleEndian);
|
||||
in.setVersion(QDataStream::Qt_4_5); // for single/double precision switch
|
||||
|
||||
// see http://code.blender.org/index.php/2013/08/fbx-binary-file-format-specification/ for an explanation
|
||||
// of the FBX binary format
|
||||
|
||||
// skip the rest of the header
|
||||
const int HEADER_SIZE = 27;
|
||||
in.skipRawData(HEADER_SIZE);
|
||||
int position = HEADER_SIZE;
|
||||
|
||||
// parse the top-level node
|
||||
FBXNode top;
|
||||
while (device->bytesAvailable()) {
|
||||
FBXNode next = parseBinaryFBXNode(in, position);
|
||||
if (next.name.isNull()) {
|
||||
return top;
|
||||
|
||||
} else {
|
||||
top.children.append(next);
|
||||
}
|
||||
}
|
||||
|
||||
return top;
|
||||
}
|
||||
|
||||
|
||||
glm::vec3 FBXReader::getVec3(const QVariantList& properties, int index) {
|
||||
return glm::vec3(properties.at(index).value<double>(), properties.at(index + 1).value<double>(),
|
||||
properties.at(index + 2).value<double>());
|
||||
}
|
||||
|
||||
QVector<glm::vec4> FBXReader::createVec4Vector(const QVector<double>& doubleVector) {
|
||||
QVector<glm::vec4> values;
|
||||
for (const double* it = doubleVector.constData(), *end = it + ((doubleVector.size() / 4) * 4); it != end; ) {
|
||||
float x = *it++;
|
||||
float y = *it++;
|
||||
float z = *it++;
|
||||
float w = *it++;
|
||||
values.append(glm::vec4(x, y, z, w));
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
|
||||
QVector<glm::vec4> FBXReader::createVec4VectorRGBA(const QVector<double>& doubleVector, glm::vec4& average) {
|
||||
QVector<glm::vec4> values;
|
||||
for (const double* it = doubleVector.constData(), *end = it + ((doubleVector.size() / 4) * 4); it != end; ) {
|
||||
float x = *it++;
|
||||
float y = *it++;
|
||||
float z = *it++;
|
||||
float w = *it++;
|
||||
auto val = glm::vec4(x, y, z, w);
|
||||
values.append(val);
|
||||
average += val;
|
||||
}
|
||||
if (!values.isEmpty()) {
|
||||
average *= (1.0f / float(values.size()));
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
QVector<glm::vec3> FBXReader::createVec3Vector(const QVector<double>& doubleVector) {
|
||||
QVector<glm::vec3> values;
|
||||
for (const double* it = doubleVector.constData(), *end = it + ((doubleVector.size() / 3) * 3); it != end; ) {
|
||||
float x = *it++;
|
||||
float y = *it++;
|
||||
float z = *it++;
|
||||
values.append(glm::vec3(x, y, z));
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
QVector<glm::vec2> FBXReader::createVec2Vector(const QVector<double>& doubleVector) {
|
||||
QVector<glm::vec2> values;
|
||||
for (const double* it = doubleVector.constData(), *end = it + ((doubleVector.size() / 2) * 2); it != end; ) {
|
||||
float s = *it++;
|
||||
float t = *it++;
|
||||
values.append(glm::vec2(s, -t));
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
glm::mat4 FBXReader::createMat4(const QVector<double>& doubleVector) {
|
||||
return glm::mat4(doubleVector.at(0), doubleVector.at(1), doubleVector.at(2), doubleVector.at(3),
|
||||
doubleVector.at(4), doubleVector.at(5), doubleVector.at(6), doubleVector.at(7),
|
||||
doubleVector.at(8), doubleVector.at(9), doubleVector.at(10), doubleVector.at(11),
|
||||
doubleVector.at(12), doubleVector.at(13), doubleVector.at(14), doubleVector.at(15));
|
||||
}
|
||||
|
||||
QVector<int> FBXReader::getIntVector(const FBXNode& node) {
|
||||
foreach (const FBXNode& child, node.children) {
|
||||
if (child.name == "a") {
|
||||
return getIntVector(child);
|
||||
}
|
||||
}
|
||||
if (node.properties.isEmpty()) {
|
||||
return QVector<int>();
|
||||
}
|
||||
QVector<int> vector = node.properties.at(0).value<QVector<int> >();
|
||||
if (!vector.isEmpty()) {
|
||||
return vector;
|
||||
}
|
||||
for (int i = 0; i < node.properties.size(); i++) {
|
||||
vector.append(node.properties.at(i).toInt());
|
||||
}
|
||||
return vector;
|
||||
}
|
||||
|
||||
QVector<float> FBXReader::getFloatVector(const FBXNode& node) {
|
||||
foreach (const FBXNode& child, node.children) {
|
||||
if (child.name == "a") {
|
||||
return getFloatVector(child);
|
||||
}
|
||||
}
|
||||
if (node.properties.isEmpty()) {
|
||||
return QVector<float>();
|
||||
}
|
||||
QVector<float> vector = node.properties.at(0).value<QVector<float> >();
|
||||
if (!vector.isEmpty()) {
|
||||
return vector;
|
||||
}
|
||||
for (int i = 0; i < node.properties.size(); i++) {
|
||||
vector.append(node.properties.at(i).toFloat());
|
||||
}
|
||||
return vector;
|
||||
}
|
||||
|
||||
QVector<double> FBXReader::getDoubleVector(const FBXNode& node) {
|
||||
foreach (const FBXNode& child, node.children) {
|
||||
if (child.name == "a") {
|
||||
return getDoubleVector(child);
|
||||
}
|
||||
}
|
||||
if (node.properties.isEmpty()) {
|
||||
return QVector<double>();
|
||||
}
|
||||
QVector<double> vector = node.properties.at(0).value<QVector<double> >();
|
||||
if (!vector.isEmpty()) {
|
||||
return vector;
|
||||
}
|
||||
for (int i = 0; i < node.properties.size(); i++) {
|
||||
vector.append(node.properties.at(i).toDouble());
|
||||
}
|
||||
return vector;
|
||||
}
|
||||
|
|
@ -121,21 +121,7 @@ glm::vec2 OBJTokenizer::getVec2() {
|
|||
|
||||
|
||||
void setMeshPartDefaults(FBXMeshPart& meshPart, QString materialID) {
|
||||
meshPart.diffuseColor = glm::vec3(1, 1, 1);
|
||||
meshPart.specularColor = glm::vec3(1, 1, 1);
|
||||
meshPart.emissiveColor = glm::vec3(0, 0, 0);
|
||||
meshPart.emissiveParams = glm::vec2(0, 1);
|
||||
meshPart.shininess = 40;
|
||||
meshPart.opacity = 1;
|
||||
|
||||
meshPart.materialID = materialID;
|
||||
meshPart.opacity = 1.0;
|
||||
meshPart._material = std::make_shared<model::Material>();
|
||||
meshPart._material->setDiffuse(glm::vec3(1.0, 1.0, 1.0));
|
||||
meshPart._material->setOpacity(1.0);
|
||||
meshPart._material->setMetallic(0.0);
|
||||
meshPart._material->setGloss(96.0);
|
||||
meshPart._material->setEmissive(glm::vec3(0.0, 0.0, 0.0));
|
||||
}
|
||||
|
||||
// OBJFace
|
||||
|
@ -502,7 +488,10 @@ FBXGeometry* OBJReader::readOBJ(QByteArray& model, const QVariantHash& mapping,
|
|||
}
|
||||
if (!groupMaterialName.isEmpty()) {
|
||||
OBJMaterial* material = &materials[groupMaterialName];
|
||||
// The code behind this is in transition. Some things are set directly in the FXBMeshPart...
|
||||
|
||||
// TODO Fix this once the transision is understood
|
||||
|
||||
/*// The code behind this is in transition. Some things are set directly in the FXBMeshPart...
|
||||
meshPart.materialID = groupMaterialName;
|
||||
meshPart.diffuseTexture.filename = material->diffuseTextureFilename;
|
||||
meshPart.specularTexture.filename = material->specularTextureFilename;
|
||||
|
@ -511,6 +500,7 @@ FBXGeometry* OBJReader::readOBJ(QByteArray& model, const QVariantHash& mapping,
|
|||
meshPart._material->setMetallic(glm::length(material->specularColor));
|
||||
meshPart._material->setGloss(material->shininess);
|
||||
meshPart._material->setOpacity(material->opacity);
|
||||
*/
|
||||
}
|
||||
foreach(OBJFace face, faceGroup) {
|
||||
glm::vec3 v0 = vertices[face.vertexIndices[0]];
|
||||
|
@ -591,15 +581,18 @@ void fbxDebugDump(const FBXGeometry& fbxgeo) {
|
|||
foreach (FBXMeshPart meshPart, mesh.parts) {
|
||||
qCDebug(modelformat) << " quadIndices.count() =" << meshPart.quadIndices.count();
|
||||
qCDebug(modelformat) << " triangleIndices.count() =" << meshPart.triangleIndices.count();
|
||||
/*
|
||||
qCDebug(modelformat) << " diffuseColor =" << meshPart.diffuseColor << "mat =" << meshPart._material->getDiffuse();
|
||||
qCDebug(modelformat) << " specularColor =" << meshPart.specularColor << "mat =" << meshPart._material->getMetallic();
|
||||
qCDebug(modelformat) << " emissiveColor =" << meshPart.emissiveColor << "mat =" << meshPart._material->getEmissive();
|
||||
qCDebug(modelformat) << " emissiveParams =" << meshPart.emissiveParams;
|
||||
qCDebug(modelformat) << " gloss =" << meshPart.shininess << "mat =" << meshPart._material->getGloss();
|
||||
qCDebug(modelformat) << " opacity =" << meshPart.opacity << "mat =" << meshPart._material->getOpacity();
|
||||
*/
|
||||
qCDebug(modelformat) << " materialID =" << meshPart.materialID;
|
||||
qCDebug(modelformat) << " diffuse texture =" << meshPart.diffuseTexture.filename;
|
||||
/* qCDebug(modelformat) << " diffuse texture =" << meshPart.diffuseTexture.filename;
|
||||
qCDebug(modelformat) << " specular texture =" << meshPart.specularTexture.filename;
|
||||
*/
|
||||
}
|
||||
qCDebug(modelformat) << " clusters.count() =" << mesh.clusters.count();
|
||||
foreach (FBXCluster cluster, mesh.clusters) {
|
||||
|
|
|
@ -1,651 +0,0 @@
|
|||
//
|
||||
// TextureCache.cpp
|
||||
// libraries/gpu-networking/src
|
||||
//
|
||||
// Created by Andrzej Kapolka on 8/6/13.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "TextureCache.h"
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/random.hpp>
|
||||
|
||||
#include <QNetworkReply>
|
||||
#include <QPainter>
|
||||
#include <QRunnable>
|
||||
#include <QThreadPool>
|
||||
#include <qimagereader.h>
|
||||
#include <PathUtils.h>
|
||||
|
||||
#include <gpu/Batch.h>
|
||||
|
||||
#include "GpuNetworkingLogging.h"
|
||||
|
||||
TextureCache::TextureCache() {
|
||||
const qint64 TEXTURE_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
|
||||
setUnusedResourceCacheSize(TEXTURE_DEFAULT_UNUSED_MAX_SIZE);
|
||||
}
|
||||
|
||||
TextureCache::~TextureCache() {
|
||||
}
|
||||
|
||||
// use fixed table of permutations. Could also make ordered list programmatically
|
||||
// and then shuffle algorithm. For testing, this ensures consistent behavior in each run.
|
||||
// this list taken from Ken Perlin's Improved Noise reference implementation (orig. in Java) at
|
||||
// http://mrl.nyu.edu/~perlin/noise/
|
||||
|
||||
const int permutation[256] =
|
||||
{
|
||||
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225,
|
||||
140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6, 148,
|
||||
247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
|
||||
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175,
|
||||
74, 165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122,
|
||||
60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54,
|
||||
65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169,
|
||||
200, 196, 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64,
|
||||
52, 217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212,
|
||||
207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213,
|
||||
119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9,
|
||||
129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104,
|
||||
218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241,
|
||||
81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157,
|
||||
184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93,
|
||||
222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180
|
||||
};
|
||||
|
||||
#define USE_CHRIS_NOISE 1
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getPermutationNormalTexture() {
|
||||
if (!_permutationNormalTexture) {
|
||||
|
||||
// the first line consists of random permutation offsets
|
||||
unsigned char data[256 * 2 * 3];
|
||||
#if (USE_CHRIS_NOISE==1)
|
||||
for (int i = 0; i < 256; i++) {
|
||||
data[3*i+0] = permutation[i];
|
||||
data[3*i+1] = permutation[i];
|
||||
data[3*i+2] = permutation[i];
|
||||
#else
|
||||
for (int i = 0; i < 256 * 3; i++) {
|
||||
data[i] = rand() % 256;
|
||||
#endif
|
||||
}
|
||||
|
||||
for (int i = 256 * 3; i < 256 * 3 * 2; i += 3) {
|
||||
glm::vec3 randvec = glm::sphericalRand(1.0f);
|
||||
data[i] = ((randvec.x + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 1] = ((randvec.y + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 2] = ((randvec.z + 1.0f) / 2.0f) * 255.0f;
|
||||
}
|
||||
|
||||
_permutationNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB), 256, 2));
|
||||
_permutationNormalTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(data), data);
|
||||
}
|
||||
return _permutationNormalTexture;
|
||||
}
|
||||
|
||||
const unsigned char OPAQUE_WHITE[] = { 0xFF, 0xFF, 0xFF, 0xFF };
|
||||
const unsigned char OPAQUE_GRAY[] = { 0x80, 0x80, 0x80, 0xFF };
|
||||
const unsigned char OPAQUE_BLUE[] = { 0x80, 0x80, 0xFF, 0xFF };
|
||||
const unsigned char OPAQUE_BLACK[] = { 0x00, 0x00, 0x00, 0xFF };
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
|
||||
if (!_whiteTexture) {
|
||||
_whiteTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_whiteTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
|
||||
}
|
||||
return _whiteTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getGrayTexture() {
|
||||
if (!_grayTexture) {
|
||||
_grayTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_grayTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_GRAY);
|
||||
}
|
||||
return _grayTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getBlueTexture() {
|
||||
if (!_blueTexture) {
|
||||
_blueTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_blueTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
|
||||
}
|
||||
return _blueTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getBlackTexture() {
|
||||
if (!_blackTexture) {
|
||||
_blackTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_blackTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
|
||||
}
|
||||
return _blackTexture;
|
||||
}
|
||||
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getNormalFittingTexture() {
|
||||
if (!_normalFittingTexture) {
|
||||
_normalFittingTexture = getImageTexture(PathUtils::resourcesPath() + "images/normalFittingScale.dds");
|
||||
}
|
||||
return _normalFittingTexture;
|
||||
}
|
||||
|
||||
/// Extra data for creating textures.
|
||||
class TextureExtra {
|
||||
public:
|
||||
TextureType type;
|
||||
const QByteArray& content;
|
||||
};
|
||||
|
||||
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, TextureType type, bool dilatable, const QByteArray& content) {
|
||||
if (!dilatable) {
|
||||
TextureExtra extra = { type, content };
|
||||
return ResourceCache::getResource(url, QUrl(), false, &extra).staticCast<NetworkTexture>();
|
||||
}
|
||||
NetworkTexturePointer texture = _dilatableNetworkTextures.value(url);
|
||||
if (texture.isNull()) {
|
||||
texture = NetworkTexturePointer(new DilatableNetworkTexture(url, content), &Resource::allReferencesCleared);
|
||||
texture->setSelf(texture);
|
||||
texture->setCache(this);
|
||||
_dilatableNetworkTextures.insert(url, texture);
|
||||
} else {
|
||||
removeUnusedResource(texture);
|
||||
}
|
||||
return texture;
|
||||
}
|
||||
|
||||
/// Returns a texture version of an image file
|
||||
gpu::TexturePointer TextureCache::getImageTexture(const QString& path) {
|
||||
QImage image = QImage(path).mirrored(false, true);
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB);
|
||||
if (image.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA);
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::UINT8, gpu::BGRA);
|
||||
}
|
||||
gpu::TexturePointer texture = gpu::TexturePointer(
|
||||
gpu::Texture::create2D(formatGPU, image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
texture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
texture->autoGenerateMips(-1);
|
||||
return texture;
|
||||
}
|
||||
|
||||
|
||||
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url,
|
||||
const QSharedPointer<Resource>& fallback, bool delayLoad, const void* extra) {
|
||||
const TextureExtra* textureExtra = static_cast<const TextureExtra*>(extra);
|
||||
return QSharedPointer<Resource>(new NetworkTexture(url, textureExtra->type, textureExtra->content),
|
||||
&Resource::allReferencesCleared);
|
||||
}
|
||||
|
||||
Texture::Texture() {
|
||||
}
|
||||
|
||||
Texture::~Texture() {
|
||||
}
|
||||
|
||||
NetworkTexture::NetworkTexture(const QUrl& url, TextureType type, const QByteArray& content) :
|
||||
Resource(url, !content.isEmpty()),
|
||||
_type(type),
|
||||
_translucent(false),
|
||||
_width(0),
|
||||
_height(0) {
|
||||
|
||||
if (!url.isValid()) {
|
||||
_loaded = true;
|
||||
}
|
||||
|
||||
std::string theName = url.toString().toStdString();
|
||||
// if we have content, load it after we have our self pointer
|
||||
if (!content.isEmpty()) {
|
||||
_startedLoading = true;
|
||||
QMetaObject::invokeMethod(this, "loadContent", Qt::QueuedConnection, Q_ARG(const QByteArray&, content));
|
||||
}
|
||||
}
|
||||
|
||||
class ImageReader : public QRunnable {
|
||||
public:
|
||||
|
||||
ImageReader(const QWeakPointer<Resource>& texture, TextureType type, const QByteArray& data, const QUrl& url = QUrl());
|
||||
|
||||
virtual void run();
|
||||
|
||||
private:
|
||||
|
||||
QWeakPointer<Resource> _texture;
|
||||
TextureType _type;
|
||||
QUrl _url;
|
||||
QByteArray _content;
|
||||
};
|
||||
|
||||
void NetworkTexture::downloadFinished(const QByteArray& data) {
|
||||
// send the reader off to the thread pool
|
||||
QThreadPool::globalInstance()->start(new ImageReader(_self, _type, data, _url));
|
||||
}
|
||||
|
||||
void NetworkTexture::loadContent(const QByteArray& content) {
|
||||
QThreadPool::globalInstance()->start(new ImageReader(_self, _type, content, _url));
|
||||
}
|
||||
|
||||
ImageReader::ImageReader(const QWeakPointer<Resource>& texture, TextureType type, const QByteArray& data,
|
||||
const QUrl& url) :
|
||||
_texture(texture),
|
||||
_type(type),
|
||||
_url(url),
|
||||
_content(data)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
std::once_flag onceListSupportedFormatsflag;
|
||||
void listSupportedImageFormats() {
|
||||
std::call_once(onceListSupportedFormatsflag, [](){
|
||||
auto supportedFormats = QImageReader::supportedImageFormats();
|
||||
QString formats;
|
||||
foreach(const QByteArray& f, supportedFormats) {
|
||||
formats += QString(f) + ",";
|
||||
}
|
||||
qCDebug(gpunetwork) << "List of supported Image formats:" << formats;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
class CubeLayout {
|
||||
public:
|
||||
int _widthRatio = 1;
|
||||
int _heightRatio = 1;
|
||||
|
||||
class Face {
|
||||
public:
|
||||
int _x = 0;
|
||||
int _y = 0;
|
||||
bool _horizontalMirror = false;
|
||||
bool _verticalMirror = false;
|
||||
|
||||
Face() {}
|
||||
Face(int x, int y, bool horizontalMirror, bool verticalMirror) : _x(x), _y(y), _horizontalMirror(horizontalMirror), _verticalMirror(verticalMirror) {}
|
||||
};
|
||||
|
||||
Face _faceXPos;
|
||||
Face _faceXNeg;
|
||||
Face _faceYPos;
|
||||
Face _faceYNeg;
|
||||
Face _faceZPos;
|
||||
Face _faceZNeg;
|
||||
|
||||
CubeLayout(int wr, int hr, Face fXP, Face fXN, Face fYP, Face fYN, Face fZP, Face fZN) :
|
||||
_widthRatio(wr),
|
||||
_heightRatio(hr),
|
||||
_faceXPos(fXP),
|
||||
_faceXNeg(fXN),
|
||||
_faceYPos(fYP),
|
||||
_faceYNeg(fYN),
|
||||
_faceZPos(fZP),
|
||||
_faceZNeg(fZN) {}
|
||||
};
|
||||
|
||||
void ImageReader::run() {
|
||||
QSharedPointer<Resource> texture = _texture.toStrongRef();
|
||||
if (texture.isNull()) {
|
||||
return;
|
||||
}
|
||||
|
||||
listSupportedImageFormats();
|
||||
|
||||
// try to help the QImage loader by extracting the image file format from the url filename ext
|
||||
// Some tga are not created properly for example without it
|
||||
auto filename = _url.fileName().toStdString();
|
||||
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
|
||||
QImage image = QImage::fromData(_content, filenameExtension.c_str());
|
||||
|
||||
// Note that QImage.format is the pixel format which is different from the "format" of the image file...
|
||||
auto imageFormat = image.format();
|
||||
int originalWidth = image.width();
|
||||
int originalHeight = image.height();
|
||||
|
||||
if (originalWidth == 0 || originalHeight == 0 || imageFormat == QImage::Format_Invalid) {
|
||||
if (filenameExtension.empty()) {
|
||||
qCDebug(gpunetwork) << "QImage failed to create from content, no file extension:" << _url;
|
||||
} else {
|
||||
qCDebug(gpunetwork) << "QImage failed to create from content" << _url;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int imageArea = image.width() * image.height();
|
||||
auto ntex = dynamic_cast<NetworkTexture*>(&*texture);
|
||||
if (ntex && (ntex->getType() == CUBE_TEXTURE)) {
|
||||
qCDebug(gpunetwork) << "Cube map size:" << _url << image.width() << image.height();
|
||||
}
|
||||
|
||||
int opaquePixels = 0;
|
||||
int translucentPixels = 0;
|
||||
bool isTransparent = false;
|
||||
int redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0;
|
||||
const int EIGHT_BIT_MAXIMUM = 255;
|
||||
QColor averageColor(EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM);
|
||||
|
||||
if (!image.hasAlphaChannel()) {
|
||||
if (image.format() != QImage::Format_RGB888) {
|
||||
image = image.convertToFormat(QImage::Format_RGB888);
|
||||
}
|
||||
// int redTotal = 0, greenTotal = 0, blueTotal = 0;
|
||||
for (int y = 0; y < image.height(); y++) {
|
||||
for (int x = 0; x < image.width(); x++) {
|
||||
QRgb rgb = image.pixel(x, y);
|
||||
redTotal += qRed(rgb);
|
||||
greenTotal += qGreen(rgb);
|
||||
blueTotal += qBlue(rgb);
|
||||
}
|
||||
}
|
||||
if (imageArea > 0) {
|
||||
averageColor.setRgb(redTotal / imageArea, greenTotal / imageArea, blueTotal / imageArea);
|
||||
}
|
||||
} else {
|
||||
if (image.format() != QImage::Format_ARGB32) {
|
||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||
}
|
||||
|
||||
// check for translucency/false transparency
|
||||
// int opaquePixels = 0;
|
||||
// int translucentPixels = 0;
|
||||
// int redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0;
|
||||
for (int y = 0; y < image.height(); y++) {
|
||||
for (int x = 0; x < image.width(); x++) {
|
||||
QRgb rgb = image.pixel(x, y);
|
||||
redTotal += qRed(rgb);
|
||||
greenTotal += qGreen(rgb);
|
||||
blueTotal += qBlue(rgb);
|
||||
int alpha = qAlpha(rgb);
|
||||
alphaTotal += alpha;
|
||||
if (alpha == EIGHT_BIT_MAXIMUM) {
|
||||
opaquePixels++;
|
||||
} else if (alpha != 0) {
|
||||
translucentPixels++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (opaquePixels == imageArea) {
|
||||
qCDebug(gpunetwork) << "Image with alpha channel is completely opaque:" << _url;
|
||||
image = image.convertToFormat(QImage::Format_RGB888);
|
||||
}
|
||||
|
||||
averageColor = QColor(redTotal / imageArea,
|
||||
greenTotal / imageArea, blueTotal / imageArea, alphaTotal / imageArea);
|
||||
|
||||
isTransparent = (translucentPixels >= imageArea / 2);
|
||||
}
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
// bool isLinearRGB = true; //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
bool isLinearRGB = !(_type == CUBE_TEXTURE); //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
if (image.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::RGBA : gpu::SRGBA));
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::BGRA : gpu::SBGRA));
|
||||
}
|
||||
|
||||
if (_type == CUBE_TEXTURE) {
|
||||
|
||||
const CubeLayout CUBEMAP_LAYOUTS[] = {
|
||||
// Here is the expected layout for the faces in an image with the 1/6 aspect ratio:
|
||||
//
|
||||
// WIDTH
|
||||
// <------>
|
||||
// ^ +------+
|
||||
// | | |
|
||||
// | | +X |
|
||||
// | | |
|
||||
// H +------+
|
||||
// E | |
|
||||
// I | -X |
|
||||
// G | |
|
||||
// H +------+
|
||||
// T | |
|
||||
// | | +Y |
|
||||
// | | |
|
||||
// | +------+
|
||||
// | | |
|
||||
// | | -Y |
|
||||
// | | |
|
||||
// H +------+
|
||||
// E | |
|
||||
// I | +Z |
|
||||
// G | |
|
||||
// H +------+
|
||||
// T | |
|
||||
// | | -Z |
|
||||
// | | |
|
||||
// V +------+
|
||||
//
|
||||
// FaceWidth = width = height / 6
|
||||
{ 1, 6,
|
||||
{0, 0, true, false},
|
||||
{0, 1, true, false},
|
||||
{0, 2, false, true},
|
||||
{0, 3, false, true},
|
||||
{0, 4, true, false},
|
||||
{0, 5, true, false}
|
||||
},
|
||||
|
||||
// Here is the expected layout for the faces in an image with the 3/4 aspect ratio:
|
||||
//
|
||||
// <-----------WIDTH----------->
|
||||
// ^ +------+------+------+------+
|
||||
// | | | | | |
|
||||
// | | | +Y | | |
|
||||
// | | | | | |
|
||||
// H +------+------+------+------+
|
||||
// E | | | | |
|
||||
// I | -X | -Z | +X | +Z |
|
||||
// G | | | | |
|
||||
// H +------+------+------+------+
|
||||
// T | | | | |
|
||||
// | | | -Y | | |
|
||||
// | | | | | |
|
||||
// V +------+------+------+------+
|
||||
//
|
||||
// FaceWidth = width / 4 = height / 3
|
||||
{ 4, 3,
|
||||
{2, 1, true, false},
|
||||
{0, 1, true, false},
|
||||
{1, 0, false, true},
|
||||
{1, 2, false, true},
|
||||
{3, 1, true, false},
|
||||
{1, 1, true, false}
|
||||
},
|
||||
|
||||
// Here is the expected layout for the faces in an image with the 4/3 aspect ratio:
|
||||
//
|
||||
// <-------WIDTH-------->
|
||||
// ^ +------+------+------+
|
||||
// | | | | |
|
||||
// | | | +Y | |
|
||||
// | | | | |
|
||||
// H +------+------+------+
|
||||
// E | | | |
|
||||
// I | -X | -Z | +X |
|
||||
// G | | | |
|
||||
// H +------+------+------+
|
||||
// T | | | |
|
||||
// | | | -Y | |
|
||||
// | | | | |
|
||||
// | +------+------+------+
|
||||
// | | | | |
|
||||
// | | | +Z! | | <+Z is upside down!
|
||||
// | | | | |
|
||||
// V +------+------+------+
|
||||
//
|
||||
// FaceWidth = width / 3 = height / 4
|
||||
{ 3, 4,
|
||||
{2, 1, true, false},
|
||||
{0, 1, true, false},
|
||||
{1, 0, false, true},
|
||||
{1, 2, false, true},
|
||||
{1, 3, false, true},
|
||||
{1, 1, true, false}
|
||||
}
|
||||
};
|
||||
const int NUM_CUBEMAP_LAYOUTS = sizeof(CUBEMAP_LAYOUTS) / sizeof(CubeLayout);
|
||||
|
||||
// Find the layout of the cubemap in the 2D image
|
||||
int foundLayout = -1;
|
||||
for (int i = 0; i < NUM_CUBEMAP_LAYOUTS; i++) {
|
||||
if ((image.height() * CUBEMAP_LAYOUTS[i]._widthRatio) == (image.width() * CUBEMAP_LAYOUTS[i]._heightRatio)) {
|
||||
foundLayout = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<QImage> faces;
|
||||
// If found, go extract the faces as separate images
|
||||
if (foundLayout >= 0) {
|
||||
auto& layout = CUBEMAP_LAYOUTS[foundLayout];
|
||||
int faceWidth = image.width() / layout._widthRatio;
|
||||
|
||||
faces.push_back(image.copy(QRect(layout._faceXPos._x * faceWidth, layout._faceXPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXPos._horizontalMirror, layout._faceXPos._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceXNeg._x * faceWidth, layout._faceXNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXNeg._horizontalMirror, layout._faceXNeg._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceYPos._x * faceWidth, layout._faceYPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYPos._horizontalMirror, layout._faceYPos._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceYNeg._x * faceWidth, layout._faceYNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYNeg._horizontalMirror, layout._faceYNeg._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceZPos._x * faceWidth, layout._faceZPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZPos._horizontalMirror, layout._faceZPos._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceZNeg._x * faceWidth, layout._faceZNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZNeg._horizontalMirror, layout._faceZNeg._verticalMirror));
|
||||
} else {
|
||||
qCDebug(gpunetwork) << "Failed to find a known cube map layout from this image:" << _url;
|
||||
return;
|
||||
}
|
||||
|
||||
// If the 6 faces have been created go on and define the true Texture
|
||||
if (faces.size() == gpu::Texture::NUM_FACES_PER_TYPE[gpu::Texture::TEX_CUBE]) {
|
||||
theTexture = gpu::Texture::createCube(formatGPU, faces[0].width(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||
theTexture->autoGenerateMips(-1);
|
||||
int f = 0;
|
||||
for (auto& face : faces) {
|
||||
theTexture->assignStoredMipFace(0, formatMip, face.byteCount(), face.constBits(), f);
|
||||
f++;
|
||||
}
|
||||
|
||||
// GEnerate irradiance while we are at it
|
||||
theTexture->generateIrradiance();
|
||||
}
|
||||
|
||||
} else {
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
theTexture->autoGenerateMips(-1);
|
||||
}
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(texture.data(), "setImage",
|
||||
Q_ARG(const QImage&, image),
|
||||
Q_ARG(void*, theTexture),
|
||||
Q_ARG(bool, isTransparent),
|
||||
Q_ARG(const QColor&, averageColor),
|
||||
Q_ARG(int, originalWidth), Q_ARG(int, originalHeight));
|
||||
|
||||
|
||||
}
|
||||
|
||||
void NetworkTexture::setImage(const QImage& image, void* voidTexture, bool translucent, const QColor& averageColor, int originalWidth,
|
||||
int originalHeight) {
|
||||
_translucent = translucent;
|
||||
_averageColor = averageColor;
|
||||
_originalWidth = originalWidth;
|
||||
_originalHeight = originalHeight;
|
||||
|
||||
gpu::Texture* texture = static_cast<gpu::Texture*>(voidTexture);
|
||||
// Passing ownership
|
||||
_gpuTexture.reset(texture);
|
||||
|
||||
if (_gpuTexture) {
|
||||
_width = _gpuTexture->getWidth();
|
||||
_height = _gpuTexture->getHeight();
|
||||
} else {
|
||||
_width = _height = 0;
|
||||
}
|
||||
|
||||
finishedLoading(true);
|
||||
|
||||
imageLoaded(image);
|
||||
}
|
||||
|
||||
void NetworkTexture::imageLoaded(const QImage& image) {
|
||||
// nothing by default
|
||||
}
|
||||
|
||||
DilatableNetworkTexture::DilatableNetworkTexture(const QUrl& url, const QByteArray& content) :
|
||||
NetworkTexture(url, DEFAULT_TEXTURE, content),
|
||||
_innerRadius(0),
|
||||
_outerRadius(0)
|
||||
{
|
||||
}
|
||||
|
||||
QSharedPointer<Texture> DilatableNetworkTexture::getDilatedTexture(float dilation) {
|
||||
QSharedPointer<Texture> texture = _dilatedTextures.value(dilation);
|
||||
if (texture.isNull()) {
|
||||
texture = QSharedPointer<Texture>::create();
|
||||
|
||||
if (!_image.isNull()) {
|
||||
QImage dilatedImage = _image;
|
||||
QPainter painter;
|
||||
painter.begin(&dilatedImage);
|
||||
QPainterPath path;
|
||||
qreal radius = glm::mix((float) _innerRadius, (float) _outerRadius, dilation);
|
||||
path.addEllipse(QPointF(_image.width() / 2.0, _image.height() / 2.0), radius, radius);
|
||||
painter.fillPath(path, Qt::black);
|
||||
painter.end();
|
||||
|
||||
bool isLinearRGB = true;// (_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
if (dilatedImage.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::RGBA : gpu::SRGBA));
|
||||
// FIXME either remove the ?: operator or provide different arguments depending on linear
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::BGRA : gpu::BGRA));
|
||||
}
|
||||
texture->_gpuTexture = gpu::TexturePointer(gpu::Texture::create2D(formatGPU, dilatedImage.width(), dilatedImage.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
texture->_gpuTexture->assignStoredMip(0, formatMip, dilatedImage.byteCount(), dilatedImage.constBits());
|
||||
texture->_gpuTexture->autoGenerateMips(-1);
|
||||
|
||||
}
|
||||
|
||||
_dilatedTextures.insert(dilation, texture);
|
||||
}
|
||||
return texture;
|
||||
}
|
||||
|
||||
void DilatableNetworkTexture::imageLoaded(const QImage& image) {
|
||||
_image = image;
|
||||
|
||||
// scan out from the center to find inner and outer radii
|
||||
int halfWidth = image.width() / 2;
|
||||
int halfHeight = image.height() / 2;
|
||||
const int BLACK_THRESHOLD = 32;
|
||||
while (_innerRadius < halfWidth && qGray(image.pixel(halfWidth + _innerRadius, halfHeight)) < BLACK_THRESHOLD) {
|
||||
_innerRadius++;
|
||||
}
|
||||
_outerRadius = _innerRadius;
|
||||
const int TRANSPARENT_THRESHOLD = 32;
|
||||
while (_outerRadius < halfWidth && qAlpha(image.pixel(halfWidth + _outerRadius, halfHeight)) > TRANSPARENT_THRESHOLD) {
|
||||
_outerRadius++;
|
||||
}
|
||||
|
||||
// clear out any textures we generated before loading
|
||||
_dilatedTextures.clear();
|
||||
}
|
||||
|
||||
void DilatableNetworkTexture::reinsert() {
|
||||
static_cast<TextureCache*>(_cache.data())->_dilatableNetworkTextures.insert(_url,
|
||||
qWeakPointerCast<NetworkTexture, Resource>(_self));
|
||||
}
|
||||
|
|
@ -1,40 +1,40 @@
|
|||
//
|
||||
// Shader.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 2/27/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "Shader.h"
|
||||
#include <math.h>
|
||||
#include <QDebug>
|
||||
|
||||
#include "Context.h"
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
Shader::Shader(Type type, const Source& source):
|
||||
_source(source),
|
||||
_type(type)
|
||||
{
|
||||
}
|
||||
|
||||
Shader::Shader(Type type, Pointer& vertex, Pointer& pixel):
|
||||
_type(type)
|
||||
{
|
||||
_shaders.resize(2);
|
||||
_shaders[VERTEX] = vertex;
|
||||
_shaders[PIXEL] = pixel;
|
||||
}
|
||||
|
||||
|
||||
Shader::~Shader()
|
||||
{
|
||||
}
|
||||
//
|
||||
// Shader.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 2/27/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "Shader.h"
|
||||
#include <math.h>
|
||||
#include <QDebug>
|
||||
|
||||
#include "Context.h"
|
||||
|
||||
using namespace gpu;
|
||||
|
||||
Shader::Shader(Type type, const Source& source):
|
||||
_source(source),
|
||||
_type(type)
|
||||
{
|
||||
}
|
||||
|
||||
Shader::Shader(Type type, Pointer& vertex, Pointer& pixel):
|
||||
_type(type)
|
||||
{
|
||||
_shaders.resize(2);
|
||||
_shaders[VERTEX] = vertex;
|
||||
_shaders[PIXEL] = pixel;
|
||||
}
|
||||
|
||||
|
||||
Shader::~Shader()
|
||||
{
|
||||
}
|
||||
|
||||
Shader* Shader::createVertex(const Source& source) {
|
||||
Shader* shader = new Shader(VERTEX, source);
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
#include <string>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include <QUrl>
|
||||
|
||||
namespace gpu {
|
||||
|
||||
|
|
|
@ -768,3 +768,28 @@ void SphericalHarmonics::evalFromTexture(const Texture& texture) {
|
|||
L22 = coefs[8];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// TextureSource
|
||||
TextureSource::TextureSource() {
|
||||
}
|
||||
|
||||
TextureSource::~TextureSource() {
|
||||
}
|
||||
|
||||
void TextureSource::reset(const QUrl& url) {
|
||||
_imageUrl = url;
|
||||
}
|
||||
|
||||
void TextureSource::resetTexture(gpu::Texture* texture) {
|
||||
_gpuTexture.reset(texture);
|
||||
}
|
||||
|
||||
bool TextureSource::isDefined() const {
|
||||
if (_gpuTexture) {
|
||||
return _gpuTexture->isDefined();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
|
||||
#include <algorithm> //min max and more
|
||||
|
||||
#include <QUrl>
|
||||
|
||||
namespace gpu {
|
||||
|
||||
// THe spherical harmonics is a nice tool for cubemap, so if required, the irradiance SH can be automatically generated
|
||||
|
@ -356,7 +358,7 @@ public:
|
|||
|
||||
protected:
|
||||
std::unique_ptr< Storage > _storage;
|
||||
|
||||
|
||||
Stamp _stamp = 0;
|
||||
|
||||
Sampler _sampler;
|
||||
|
@ -380,7 +382,6 @@ protected:
|
|||
bool _autoGenerateMips = false;
|
||||
bool _isIrradianceValid = false;
|
||||
bool _defined = false;
|
||||
|
||||
|
||||
static Texture* create(Type type, const Element& texelFormat, uint16 width, uint16 height, uint16 depth, uint16 numSamples, uint16 numSlices, const Sampler& sampler);
|
||||
|
||||
|
@ -442,6 +443,28 @@ public:
|
|||
};
|
||||
typedef std::vector<TextureView> TextureViews;
|
||||
|
||||
// TextureSource is the bridge between a URL or a a way to produce an image and the final gpu::Texture that will be used to render it.
|
||||
// It provides the mechanism to create a texture using a customizable TextureLoader
|
||||
class TextureSource {
|
||||
public:
|
||||
TextureSource();
|
||||
~TextureSource();
|
||||
|
||||
const QUrl& getUrl() const { return _imageUrl; }
|
||||
const gpu::TexturePointer getGPUTexture() const { return _gpuTexture; }
|
||||
|
||||
void reset(const QUrl& url);
|
||||
|
||||
void resetTexture(gpu::Texture* texture);
|
||||
|
||||
bool isDefined() const;
|
||||
|
||||
protected:
|
||||
gpu::TexturePointer _gpuTexture;
|
||||
QUrl _imageUrl;
|
||||
};
|
||||
typedef std::shared_ptr< TextureSource > TextureSourcePointer;
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET_NAME gpu-networking)
|
||||
set(TARGET_NAME model-networking)
|
||||
|
||||
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
|
||||
setup_hifi_library()
|
||||
|
@ -7,5 +7,5 @@ add_dependency_external_projects(glm)
|
|||
find_package(GLM REQUIRED)
|
||||
target_include_directories(${TARGET_NAME} PUBLIC ${GLM_INCLUDE_DIRS})
|
||||
|
||||
link_hifi_libraries(shared networking gpu)
|
||||
link_hifi_libraries(shared networking gpu model)
|
||||
|
506
libraries/model-networking/src/model-networking/ModelCache.cpp
Normal file
506
libraries/model-networking/src/model-networking/ModelCache.cpp
Normal file
|
@ -0,0 +1,506 @@
|
|||
//
|
||||
// ModelCache.cpp
|
||||
// interface/src/renderer
|
||||
//
|
||||
// Created by Andrzej Kapolka on 6/21/13.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "ModelCache.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include <QNetworkReply>
|
||||
#include <QThreadPool>
|
||||
|
||||
#include <FSTReader.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#include "TextureCache.h"
|
||||
#include "ModelNetworkingLogging.h"
|
||||
|
||||
#include "gpu/StandardShaderLib.h"
|
||||
|
||||
#include "model/TextureMap.h"
|
||||
|
||||
//#define WANT_DEBUG
|
||||
|
||||
ModelCache::ModelCache()
|
||||
{
|
||||
const qint64 GEOMETRY_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
|
||||
setUnusedResourceCacheSize(GEOMETRY_DEFAULT_UNUSED_MAX_SIZE);
|
||||
}
|
||||
|
||||
ModelCache::~ModelCache() {
|
||||
}
|
||||
|
||||
QSharedPointer<Resource> ModelCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
|
||||
bool delayLoad, const void* extra) {
|
||||
// NetworkGeometry is no longer a subclass of Resource, but requires this method because, it is pure virtual.
|
||||
assert(false);
|
||||
return QSharedPointer<Resource>();
|
||||
}
|
||||
|
||||
|
||||
GeometryReader::GeometryReader(const QUrl& url, const QByteArray& data, const QVariantHash& mapping) :
|
||||
_url(url),
|
||||
_data(data),
|
||||
_mapping(mapping) {
|
||||
}
|
||||
|
||||
void GeometryReader::run() {
|
||||
try {
|
||||
if (_data.isEmpty()) {
|
||||
throw QString("Reply is NULL ?!");
|
||||
}
|
||||
QString urlname = _url.path().toLower();
|
||||
bool urlValid = true;
|
||||
urlValid &= !urlname.isEmpty();
|
||||
urlValid &= !_url.path().isEmpty();
|
||||
urlValid &= _url.path().toLower().endsWith(".fbx") || _url.path().toLower().endsWith(".obj");
|
||||
|
||||
if (urlValid) {
|
||||
// Let's read the binaries from the network
|
||||
FBXGeometry* fbxgeo = nullptr;
|
||||
if (_url.path().toLower().endsWith(".fbx")) {
|
||||
const bool grabLightmaps = true;
|
||||
const float lightmapLevel = 1.0f;
|
||||
fbxgeo = readFBX(_data, _mapping, _url.path(), grabLightmaps, lightmapLevel);
|
||||
} else if (_url.path().toLower().endsWith(".obj")) {
|
||||
fbxgeo = OBJReader().readOBJ(_data, _mapping, _url);
|
||||
} else {
|
||||
QString errorStr("usupported format");
|
||||
emit onError(NetworkGeometry::ModelParseError, errorStr);
|
||||
}
|
||||
emit onSuccess(fbxgeo);
|
||||
} else {
|
||||
throw QString("url is invalid");
|
||||
}
|
||||
|
||||
} catch (const QString& error) {
|
||||
qCDebug(modelnetworking) << "Error reading " << _url << ": " << error;
|
||||
emit onError(NetworkGeometry::ModelParseError, error);
|
||||
}
|
||||
}
|
||||
|
||||
NetworkGeometry::NetworkGeometry(const QUrl& url, bool delayLoad, const QVariantHash& mapping, const QUrl& textureBaseUrl) :
|
||||
_url(url),
|
||||
_mapping(mapping),
|
||||
_textureBaseUrl(textureBaseUrl.isValid() ? textureBaseUrl : url) {
|
||||
|
||||
if (delayLoad) {
|
||||
_state = DelayState;
|
||||
} else {
|
||||
attemptRequestInternal();
|
||||
}
|
||||
}
|
||||
|
||||
NetworkGeometry::~NetworkGeometry() {
|
||||
if (_resource) {
|
||||
_resource->deleteLater();
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkGeometry::attemptRequest() {
|
||||
if (_state == DelayState) {
|
||||
attemptRequestInternal();
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkGeometry::attemptRequestInternal() {
|
||||
if (_url.path().toLower().endsWith(".fst")) {
|
||||
_mappingUrl = _url;
|
||||
requestMapping(_url);
|
||||
} else {
|
||||
_modelUrl = _url;
|
||||
requestModel(_url);
|
||||
}
|
||||
}
|
||||
|
||||
bool NetworkGeometry::isLoaded() const {
|
||||
return _state == SuccessState;
|
||||
}
|
||||
|
||||
bool NetworkGeometry::isLoadedWithTextures() const {
|
||||
if (!isLoaded()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_isLoadedWithTextures) {
|
||||
for (auto&& material : _materials) {
|
||||
if ((material->diffuseTexture && !material->diffuseTexture->isLoaded()) ||
|
||||
(material->normalTexture && !material->normalTexture->isLoaded()) ||
|
||||
(material->specularTexture && !material->specularTexture->isLoaded()) ||
|
||||
(material->emissiveTexture && !material->emissiveTexture->isLoaded())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
_isLoadedWithTextures = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void NetworkGeometry::setTextureWithNameToURL(const QString& name, const QUrl& url) {
|
||||
|
||||
|
||||
if (_meshes.size() > 0) {
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
for (auto&& material : _materials) {
|
||||
QSharedPointer<NetworkTexture> matchingTexture = QSharedPointer<NetworkTexture>();
|
||||
if (material->diffuseTextureName == name) {
|
||||
material->diffuseTexture = textureCache->getTexture(url, DEFAULT_TEXTURE);
|
||||
} else if (material->normalTextureName == name) {
|
||||
material->normalTexture = textureCache->getTexture(url);
|
||||
} else if (material->specularTextureName == name) {
|
||||
material->specularTexture = textureCache->getTexture(url);
|
||||
} else if (material->emissiveTextureName == name) {
|
||||
material->emissiveTexture = textureCache->getTexture(url);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
qCWarning(modelnetworking) << "Ignoring setTextureWirthNameToURL() geometry not ready." << name << url;
|
||||
}
|
||||
_isLoadedWithTextures = false;
|
||||
}
|
||||
|
||||
QStringList NetworkGeometry::getTextureNames() const {
|
||||
QStringList result;
|
||||
for (auto&& material : _materials) {
|
||||
if (!material->diffuseTextureName.isEmpty() && material->diffuseTexture) {
|
||||
QString textureURL = material->diffuseTexture->getURL().toString();
|
||||
result << material->diffuseTextureName + ":" + textureURL;
|
||||
}
|
||||
|
||||
if (!material->normalTextureName.isEmpty() && material->normalTexture) {
|
||||
QString textureURL = material->normalTexture->getURL().toString();
|
||||
result << material->normalTextureName + ":" + textureURL;
|
||||
}
|
||||
|
||||
if (!material->specularTextureName.isEmpty() && material->specularTexture) {
|
||||
QString textureURL = material->specularTexture->getURL().toString();
|
||||
result << material->specularTextureName + ":" + textureURL;
|
||||
}
|
||||
|
||||
if (!material->emissiveTextureName.isEmpty() && material->emissiveTexture) {
|
||||
QString textureURL = material->emissiveTexture->getURL().toString();
|
||||
result << material->emissiveTextureName + ":" + textureURL;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void NetworkGeometry::requestMapping(const QUrl& url) {
|
||||
_state = RequestMappingState;
|
||||
if (_resource) {
|
||||
_resource->deleteLater();
|
||||
}
|
||||
_resource = new Resource(url, false);
|
||||
connect(_resource, &Resource::loaded, this, &NetworkGeometry::mappingRequestDone);
|
||||
connect(_resource, &Resource::failed, this, &NetworkGeometry::mappingRequestError);
|
||||
}
|
||||
|
||||
void NetworkGeometry::requestModel(const QUrl& url) {
|
||||
_state = RequestModelState;
|
||||
if (_resource) {
|
||||
_resource->deleteLater();
|
||||
}
|
||||
_modelUrl = url;
|
||||
_resource = new Resource(url, false);
|
||||
connect(_resource, &Resource::loaded, this, &NetworkGeometry::modelRequestDone);
|
||||
connect(_resource, &Resource::failed, this, &NetworkGeometry::modelRequestError);
|
||||
}
|
||||
|
||||
void NetworkGeometry::mappingRequestDone(const QByteArray& data) {
|
||||
assert(_state == RequestMappingState);
|
||||
|
||||
// parse the mapping file
|
||||
_mapping = FSTReader::readMapping(data);
|
||||
|
||||
QUrl replyUrl = _mappingUrl;
|
||||
QString modelUrlStr = _mapping.value("filename").toString();
|
||||
if (modelUrlStr.isNull()) {
|
||||
qCDebug(modelnetworking) << "Mapping file " << _url << "has no \"filename\" entry";
|
||||
emit onFailure(*this, MissingFilenameInMapping);
|
||||
} else {
|
||||
// read _textureBase from mapping file, if present
|
||||
QString texdir = _mapping.value("texdir").toString();
|
||||
if (!texdir.isNull()) {
|
||||
if (!texdir.endsWith('/')) {
|
||||
texdir += '/';
|
||||
}
|
||||
_textureBaseUrl = replyUrl.resolved(texdir);
|
||||
}
|
||||
|
||||
_modelUrl = replyUrl.resolved(modelUrlStr);
|
||||
requestModel(_modelUrl);
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkGeometry::mappingRequestError(QNetworkReply::NetworkError error) {
|
||||
assert(_state == RequestMappingState);
|
||||
_state = ErrorState;
|
||||
emit onFailure(*this, MappingRequestError);
|
||||
}
|
||||
|
||||
void NetworkGeometry::modelRequestDone(const QByteArray& data) {
|
||||
assert(_state == RequestModelState);
|
||||
|
||||
_state = ParsingModelState;
|
||||
|
||||
// asynchronously parse the model file.
|
||||
GeometryReader* geometryReader = new GeometryReader(_modelUrl, data, _mapping);
|
||||
connect(geometryReader, SIGNAL(onSuccess(FBXGeometry*)), SLOT(modelParseSuccess(FBXGeometry*)));
|
||||
connect(geometryReader, SIGNAL(onError(int, QString)), SLOT(modelParseError(int, QString)));
|
||||
|
||||
QThreadPool::globalInstance()->start(geometryReader);
|
||||
}
|
||||
|
||||
void NetworkGeometry::modelRequestError(QNetworkReply::NetworkError error) {
|
||||
assert(_state == RequestModelState);
|
||||
_state = ErrorState;
|
||||
emit onFailure(*this, ModelRequestError);
|
||||
}
|
||||
|
||||
static NetworkMesh* buildNetworkMesh(const FBXMesh& mesh, const QUrl& textureBaseUrl) {
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
NetworkMesh* networkMesh = new NetworkMesh();
|
||||
|
||||
int totalIndices = 0;
|
||||
bool checkForTexcoordLightmap = false;
|
||||
|
||||
|
||||
|
||||
// process network parts
|
||||
foreach (const FBXMeshPart& part, mesh.parts) {
|
||||
totalIndices += (part.quadIndices.size() + part.triangleIndices.size());
|
||||
}
|
||||
|
||||
// initialize index buffer
|
||||
{
|
||||
networkMesh->_indexBuffer = std::make_shared<gpu::Buffer>();
|
||||
networkMesh->_indexBuffer->resize(totalIndices * sizeof(int));
|
||||
int offset = 0;
|
||||
foreach(const FBXMeshPart& part, mesh.parts) {
|
||||
networkMesh->_indexBuffer->setSubData(offset, part.quadIndices.size() * sizeof(int),
|
||||
(gpu::Byte*) part.quadIndices.constData());
|
||||
offset += part.quadIndices.size() * sizeof(int);
|
||||
networkMesh->_indexBuffer->setSubData(offset, part.triangleIndices.size() * sizeof(int),
|
||||
(gpu::Byte*) part.triangleIndices.constData());
|
||||
offset += part.triangleIndices.size() * sizeof(int);
|
||||
}
|
||||
}
|
||||
|
||||
// initialize vertex buffer
|
||||
{
|
||||
networkMesh->_vertexBuffer = std::make_shared<gpu::Buffer>();
|
||||
// if we don't need to do any blending, the positions/normals can be static
|
||||
if (mesh.blendshapes.isEmpty()) {
|
||||
int normalsOffset = mesh.vertices.size() * sizeof(glm::vec3);
|
||||
int tangentsOffset = normalsOffset + mesh.normals.size() * sizeof(glm::vec3);
|
||||
int colorsOffset = tangentsOffset + mesh.tangents.size() * sizeof(glm::vec3);
|
||||
int texCoordsOffset = colorsOffset + mesh.colors.size() * sizeof(glm::vec3);
|
||||
int texCoords1Offset = texCoordsOffset + mesh.texCoords.size() * sizeof(glm::vec2);
|
||||
int clusterIndicesOffset = texCoords1Offset + mesh.texCoords1.size() * sizeof(glm::vec2);
|
||||
int clusterWeightsOffset = clusterIndicesOffset + mesh.clusterIndices.size() * sizeof(glm::vec4);
|
||||
|
||||
networkMesh->_vertexBuffer->resize(clusterWeightsOffset + mesh.clusterWeights.size() * sizeof(glm::vec4));
|
||||
|
||||
networkMesh->_vertexBuffer->setSubData(0, mesh.vertices.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.vertices.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(normalsOffset, mesh.normals.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.normals.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(tangentsOffset,
|
||||
mesh.tangents.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.tangents.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(colorsOffset, mesh.colors.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.colors.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(texCoordsOffset,
|
||||
mesh.texCoords.size() * sizeof(glm::vec2), (gpu::Byte*) mesh.texCoords.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(texCoords1Offset,
|
||||
mesh.texCoords1.size() * sizeof(glm::vec2), (gpu::Byte*) mesh.texCoords1.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterIndicesOffset,
|
||||
mesh.clusterIndices.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterIndices.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterWeightsOffset,
|
||||
mesh.clusterWeights.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterWeights.constData());
|
||||
|
||||
// otherwise, at least the cluster indices/weights can be static
|
||||
networkMesh->_vertexStream = std::make_shared<gpu::BufferStream>();
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, 0, sizeof(glm::vec3));
|
||||
if (mesh.normals.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, normalsOffset, sizeof(glm::vec3));
|
||||
if (mesh.tangents.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, tangentsOffset, sizeof(glm::vec3));
|
||||
if (mesh.colors.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, colorsOffset, sizeof(glm::vec3));
|
||||
if (mesh.texCoords.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, texCoordsOffset, sizeof(glm::vec2));
|
||||
if (mesh.texCoords1.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, texCoords1Offset, sizeof(glm::vec2));
|
||||
if (mesh.clusterIndices.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterIndicesOffset, sizeof(glm::vec4));
|
||||
if (mesh.clusterWeights.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterWeightsOffset, sizeof(glm::vec4));
|
||||
|
||||
int channelNum = 0;
|
||||
networkMesh->_vertexFormat = std::make_shared<gpu::Stream::Format>();
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::POSITION, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
|
||||
if (mesh.normals.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::NORMAL, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
if (mesh.tangents.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::TANGENT, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
if (mesh.colors.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::COLOR, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::RGB));
|
||||
if (mesh.texCoords.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD, channelNum++, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
if (mesh.texCoords1.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, channelNum++, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
// } else if (checkForTexcoordLightmap && mesh.texCoords.size()) {
|
||||
} else if (mesh.texCoords.size()) {
|
||||
// need lightmap texcoord UV but doesn't have uv#1 so just reuse the same channel
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, channelNum - 1, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
}
|
||||
if (mesh.clusterIndices.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
if (mesh.clusterWeights.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
}
|
||||
else {
|
||||
int colorsOffset = mesh.tangents.size() * sizeof(glm::vec3);
|
||||
int texCoordsOffset = colorsOffset + mesh.colors.size() * sizeof(glm::vec3);
|
||||
int clusterIndicesOffset = texCoordsOffset + mesh.texCoords.size() * sizeof(glm::vec2);
|
||||
int clusterWeightsOffset = clusterIndicesOffset + mesh.clusterIndices.size() * sizeof(glm::vec4);
|
||||
|
||||
networkMesh->_vertexBuffer->resize(clusterWeightsOffset + mesh.clusterWeights.size() * sizeof(glm::vec4));
|
||||
networkMesh->_vertexBuffer->setSubData(0, mesh.tangents.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.tangents.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(colorsOffset, mesh.colors.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.colors.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(texCoordsOffset,
|
||||
mesh.texCoords.size() * sizeof(glm::vec2), (gpu::Byte*) mesh.texCoords.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterIndicesOffset,
|
||||
mesh.clusterIndices.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterIndices.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterWeightsOffset,
|
||||
mesh.clusterWeights.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterWeights.constData());
|
||||
|
||||
networkMesh->_vertexStream = std::make_shared<gpu::BufferStream>();
|
||||
if (mesh.tangents.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, 0, sizeof(glm::vec3));
|
||||
if (mesh.colors.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, colorsOffset, sizeof(glm::vec3));
|
||||
if (mesh.texCoords.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, texCoordsOffset, sizeof(glm::vec2));
|
||||
if (mesh.clusterIndices.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterIndicesOffset, sizeof(glm::vec4));
|
||||
if (mesh.clusterWeights.size()) networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterWeightsOffset, sizeof(glm::vec4));
|
||||
|
||||
int channelNum = 0;
|
||||
networkMesh->_vertexFormat = std::make_shared<gpu::Stream::Format>();
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::POSITION, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
if (mesh.normals.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::NORMAL, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
if (mesh.tangents.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::TANGENT, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
if (mesh.colors.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::COLOR, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::RGB));
|
||||
if (mesh.texCoords.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD, channelNum++, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
if (mesh.clusterIndices.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
if (mesh.clusterWeights.size()) networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
}
|
||||
}
|
||||
|
||||
return networkMesh;
|
||||
}
|
||||
|
||||
static NetworkMaterial* buildNetworkMaterial(const FBXMaterial& material, const QUrl& textureBaseUrl) {
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
NetworkMaterial* networkMaterial = new NetworkMaterial();
|
||||
|
||||
int totalIndices = 0;
|
||||
bool checkForTexcoordLightmap = false;
|
||||
|
||||
networkMaterial->_material = material._material;
|
||||
|
||||
if (!material.diffuseTexture.filename.isEmpty()) {
|
||||
networkMaterial->diffuseTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(material.diffuseTexture.filename)), DEFAULT_TEXTURE, material.diffuseTexture.content);
|
||||
networkMaterial->diffuseTextureName = material.diffuseTexture.name;
|
||||
|
||||
auto diffuseMap = model::TextureMapPointer(new model::TextureMap());
|
||||
diffuseMap->setTextureSource(networkMaterial->diffuseTexture->_textureSource);
|
||||
diffuseMap->setTextureTransform(material.diffuseTexture.transform);
|
||||
|
||||
material._material->setTextureMap(model::MaterialKey::DIFFUSE_MAP, diffuseMap);
|
||||
}
|
||||
if (!material.normalTexture.filename.isEmpty()) {
|
||||
networkMaterial->normalTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(material.normalTexture.filename)), (material.normalTexture.isBumpmap ? BUMP_TEXTURE : NORMAL_TEXTURE), material.normalTexture.content);
|
||||
networkMaterial->normalTextureName = material.normalTexture.name;
|
||||
|
||||
auto normalMap = model::TextureMapPointer(new model::TextureMap());
|
||||
normalMap->setTextureSource(networkMaterial->normalTexture->_textureSource);
|
||||
|
||||
material._material->setTextureMap(model::MaterialKey::NORMAL_MAP, normalMap);
|
||||
}
|
||||
if (!material.specularTexture.filename.isEmpty()) {
|
||||
networkMaterial->specularTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(material.specularTexture.filename)), SPECULAR_TEXTURE, material.specularTexture.content);
|
||||
networkMaterial->specularTextureName = material.specularTexture.name;
|
||||
|
||||
auto glossMap = model::TextureMapPointer(new model::TextureMap());
|
||||
glossMap->setTextureSource(networkMaterial->specularTexture->_textureSource);
|
||||
|
||||
material._material->setTextureMap(model::MaterialKey::GLOSS_MAP, glossMap);
|
||||
}
|
||||
if (!material.emissiveTexture.filename.isEmpty()) {
|
||||
networkMaterial->emissiveTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(material.emissiveTexture.filename)), EMISSIVE_TEXTURE, material.emissiveTexture.content);
|
||||
networkMaterial->emissiveTextureName = material.emissiveTexture.name;
|
||||
|
||||
checkForTexcoordLightmap = true;
|
||||
|
||||
auto lightmapMap = model::TextureMapPointer(new model::TextureMap());
|
||||
lightmapMap->setTextureSource(networkMaterial->emissiveTexture->_textureSource);
|
||||
lightmapMap->setTextureTransform(material.emissiveTexture.transform);
|
||||
lightmapMap->setLightmapOffsetScale(material.emissiveParams.x, material.emissiveParams.y);
|
||||
|
||||
material._material->setTextureMap(model::MaterialKey::LIGHTMAP_MAP, lightmapMap);
|
||||
}
|
||||
|
||||
return networkMaterial;
|
||||
}
|
||||
|
||||
|
||||
void NetworkGeometry::modelParseSuccess(FBXGeometry* geometry) {
|
||||
// assume owner ship of geometry pointer
|
||||
_geometry.reset(geometry);
|
||||
|
||||
|
||||
|
||||
foreach(const FBXMesh& mesh, _geometry->meshes) {
|
||||
_meshes.emplace_back(buildNetworkMesh(mesh, _textureBaseUrl));
|
||||
}
|
||||
|
||||
QHash<QString, int> fbxMatIDToMatID;
|
||||
foreach(const FBXMaterial& material, _geometry->materials) {
|
||||
fbxMatIDToMatID[material.materialID] = _materials.size();
|
||||
_materials.emplace_back(buildNetworkMaterial(material, _textureBaseUrl));
|
||||
}
|
||||
|
||||
|
||||
int meshID = 0;
|
||||
foreach(const FBXMesh& mesh, _geometry->meshes) {
|
||||
int partID = 0;
|
||||
foreach (const FBXMeshPart& part, mesh.parts) {
|
||||
NetworkShape* networkShape = new NetworkShape();
|
||||
networkShape->_meshID = meshID;
|
||||
networkShape->_partID = partID;
|
||||
networkShape->_materialID = fbxMatIDToMatID[part.materialID];
|
||||
_shapes.emplace_back(networkShape);
|
||||
partID++;
|
||||
}
|
||||
meshID++;
|
||||
}
|
||||
|
||||
_state = SuccessState;
|
||||
emit onSuccess(*this, *_geometry.get());
|
||||
|
||||
delete _resource;
|
||||
_resource = nullptr;
|
||||
}
|
||||
|
||||
void NetworkGeometry::modelParseError(int error, QString str) {
|
||||
_state = ErrorState;
|
||||
emit onFailure(*this, (NetworkGeometry::Error)error);
|
||||
|
||||
delete _resource;
|
||||
_resource = nullptr;
|
||||
}
|
||||
|
||||
|
||||
const NetworkMaterial* NetworkGeometry::getShapeMaterial(int shapeID) {
|
||||
if ((shapeID >= 0) && (shapeID < _shapes.size())) {
|
||||
int materialID = _shapes[shapeID]->_materialID;
|
||||
if ((materialID >= 0) && (materialID < _materials.size())) {
|
||||
return _materials[materialID].get();
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
205
libraries/model-networking/src/model-networking/ModelCache.h
Normal file
205
libraries/model-networking/src/model-networking/ModelCache.h
Normal file
|
@ -0,0 +1,205 @@
|
|||
//
|
||||
// ModelCache.h
|
||||
// libraries/model-networking/src/model-networking
|
||||
//
|
||||
// Created by Sam Gateau on 9/21/15.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_ModelCache_h
|
||||
#define hifi_ModelCache_h
|
||||
|
||||
#include <QMap>
|
||||
#include <QRunnable>
|
||||
|
||||
#include <DependencyManager.h>
|
||||
#include <ResourceCache.h>
|
||||
|
||||
#include "FBXReader.h"
|
||||
#include "OBJReader.h"
|
||||
|
||||
#include <gpu/Batch.h>
|
||||
#include <gpu/Stream.h>
|
||||
|
||||
|
||||
#include <model/Material.h>
|
||||
#include <model/Asset.h>
|
||||
|
||||
class NetworkGeometry;
|
||||
class NetworkMesh;
|
||||
class NetworkTexture;
|
||||
class NetworkMaterial;
|
||||
class NetworkShape;
|
||||
|
||||
/// Stores cached geometry.
|
||||
class ModelCache : public ResourceCache, public Dependency {
|
||||
Q_OBJECT
|
||||
SINGLETON_DEPENDENCY
|
||||
|
||||
public:
|
||||
virtual QSharedPointer<Resource> createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
|
||||
bool delayLoad, const void* extra);
|
||||
|
||||
/// Loads geometry from the specified URL.
|
||||
/// \param fallback a fallback URL to load if the desired one is unavailable
|
||||
/// \param delayLoad if true, don't load the geometry immediately; wait until load is first requested
|
||||
QSharedPointer<NetworkGeometry> getGeometry(const QUrl& url, const QUrl& fallback = QUrl(), bool delayLoad = false);
|
||||
|
||||
/// Set a batch to the simple pipeline, returning the previous pipeline
|
||||
void useSimpleDrawPipeline(gpu::Batch& batch, bool noBlend = false);
|
||||
|
||||
private:
|
||||
ModelCache();
|
||||
virtual ~ModelCache();
|
||||
|
||||
QHash<QUrl, QWeakPointer<NetworkGeometry> > _networkGeometry;
|
||||
};
|
||||
|
||||
class NetworkGeometry : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
// mapping is only used if url is a .fbx or .obj file, it is essentially the content of an fst file.
|
||||
// if delayLoad is true, the url will not be immediately downloaded.
|
||||
// use the attemptRequest method to initiate the download.
|
||||
NetworkGeometry(const QUrl& url, bool delayLoad, const QVariantHash& mapping, const QUrl& textureBaseUrl = QUrl());
|
||||
~NetworkGeometry();
|
||||
|
||||
const QUrl& getURL() const { return _url; }
|
||||
|
||||
void attemptRequest();
|
||||
|
||||
// true when the geometry is loaded (but maybe not it's associated textures)
|
||||
bool isLoaded() const;
|
||||
|
||||
// true when the requested geometry and its textures are loaded.
|
||||
bool isLoadedWithTextures() const;
|
||||
|
||||
// WARNING: only valid when isLoaded returns true.
|
||||
const FBXGeometry& getFBXGeometry() const { return *_geometry; }
|
||||
const std::vector<std::unique_ptr<NetworkMesh>>& getMeshes() const { return _meshes; }
|
||||
// const model::AssetPointer getAsset() const { return _asset; }
|
||||
|
||||
// model::MeshPointer getShapeMesh(int shapeID);
|
||||
// int getShapePart(int shapeID);
|
||||
|
||||
// This would be the final verison
|
||||
// model::MaterialPointer getShapeMaterial(int shapeID);
|
||||
const NetworkMaterial* getShapeMaterial(int shapeID);
|
||||
|
||||
|
||||
void setTextureWithNameToURL(const QString& name, const QUrl& url);
|
||||
QStringList getTextureNames() const;
|
||||
|
||||
enum Error {
|
||||
MissingFilenameInMapping = 0,
|
||||
MappingRequestError,
|
||||
ModelRequestError,
|
||||
ModelParseError
|
||||
};
|
||||
|
||||
signals:
|
||||
// Fired when everything has downloaded and parsed successfully.
|
||||
void onSuccess(NetworkGeometry& networkGeometry, FBXGeometry& fbxGeometry);
|
||||
|
||||
// Fired when something went wrong.
|
||||
void onFailure(NetworkGeometry& networkGeometry, Error error);
|
||||
|
||||
protected slots:
|
||||
void mappingRequestDone(const QByteArray& data);
|
||||
void mappingRequestError(QNetworkReply::NetworkError error);
|
||||
|
||||
void modelRequestDone(const QByteArray& data);
|
||||
void modelRequestError(QNetworkReply::NetworkError error);
|
||||
|
||||
void modelParseSuccess(FBXGeometry* geometry);
|
||||
void modelParseError(int error, QString str);
|
||||
|
||||
protected:
|
||||
void attemptRequestInternal();
|
||||
void requestMapping(const QUrl& url);
|
||||
void requestModel(const QUrl& url);
|
||||
|
||||
enum State { DelayState,
|
||||
RequestMappingState,
|
||||
RequestModelState,
|
||||
ParsingModelState,
|
||||
SuccessState,
|
||||
ErrorState };
|
||||
State _state;
|
||||
|
||||
QUrl _url;
|
||||
QUrl _mappingUrl;
|
||||
QUrl _modelUrl;
|
||||
QVariantHash _mapping;
|
||||
QUrl _textureBaseUrl;
|
||||
|
||||
Resource* _resource = nullptr;
|
||||
std::unique_ptr<FBXGeometry> _geometry; // This should go away evenutally once we can put everything we need in the model::AssetPointer
|
||||
std::vector<std::unique_ptr<NetworkMesh>> _meshes;
|
||||
std::vector<std::unique_ptr<NetworkMaterial>> _materials;
|
||||
std::vector<std::unique_ptr<NetworkShape>> _shapes;
|
||||
|
||||
|
||||
// The model asset created from this NetworkGeometry
|
||||
// model::AssetPointer _asset;
|
||||
|
||||
// cache for isLoadedWithTextures()
|
||||
mutable bool _isLoadedWithTextures = false;
|
||||
};
|
||||
|
||||
/// Reads geometry in a worker thread.
|
||||
class GeometryReader : public QObject, public QRunnable {
|
||||
Q_OBJECT
|
||||
public:
|
||||
GeometryReader(const QUrl& url, const QByteArray& data, const QVariantHash& mapping);
|
||||
virtual void run();
|
||||
signals:
|
||||
void onSuccess(FBXGeometry* geometry);
|
||||
void onError(int error, QString str);
|
||||
private:
|
||||
QUrl _url;
|
||||
QByteArray _data;
|
||||
QVariantHash _mapping;
|
||||
};
|
||||
|
||||
|
||||
class NetworkShape {
|
||||
public:
|
||||
int _meshID{ -1 };
|
||||
int _partID{ -1 };
|
||||
int _materialID{ -1 };
|
||||
};
|
||||
|
||||
class NetworkMaterial {
|
||||
public:
|
||||
model::MaterialPointer _material;
|
||||
QString diffuseTextureName;
|
||||
QSharedPointer<NetworkTexture> diffuseTexture;
|
||||
QString normalTextureName;
|
||||
QSharedPointer<NetworkTexture> normalTexture;
|
||||
QString specularTextureName;
|
||||
QSharedPointer<NetworkTexture> specularTexture;
|
||||
QString emissiveTextureName;
|
||||
QSharedPointer<NetworkTexture> emissiveTexture;
|
||||
};
|
||||
|
||||
|
||||
/// The state associated with a single mesh.
|
||||
class NetworkMesh {
|
||||
public:
|
||||
gpu::BufferPointer _indexBuffer;
|
||||
gpu::BufferPointer _vertexBuffer;
|
||||
|
||||
gpu::BufferStreamPointer _vertexStream;
|
||||
|
||||
gpu::Stream::FormatPointer _vertexFormat;
|
||||
|
||||
int getTranslucentPartCount(const FBXMesh& fbxMesh) const;
|
||||
bool isPartTranslucent(const FBXMesh& fbxMesh, int partIndex) const;
|
||||
};
|
||||
|
||||
#endif // hifi_GeometryCache_h
|
|
@ -6,6 +6,6 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "GpuNetworkingLogging.h"
|
||||
#include "ModelNetworkingLogging.h"
|
||||
|
||||
Q_LOGGING_CATEGORY(gpunetwork, "hifi.gpu-network")
|
||||
Q_LOGGING_CATEGORY(modelnetworking, "hifi.gpu-network")
|
|
@ -8,4 +8,4 @@
|
|||
|
||||
#include <QLoggingCategory>
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(gpunetwork)
|
||||
Q_DECLARE_LOGGING_CATEGORY(modelnetworking)
|
363
libraries/model-networking/src/model-networking/TextureCache.cpp
Normal file
363
libraries/model-networking/src/model-networking/TextureCache.cpp
Normal file
|
@ -0,0 +1,363 @@
|
|||
//
|
||||
// TextureCache.cpp
|
||||
// libraries/model-networking/src
|
||||
//
|
||||
// Created by Andrzej Kapolka on 8/6/13.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "TextureCache.h"
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/random.hpp>
|
||||
|
||||
#include <QNetworkReply>
|
||||
#include <QPainter>
|
||||
#include <QRunnable>
|
||||
#include <QThreadPool>
|
||||
#include <qimagereader.h>
|
||||
#include <PathUtils.h>
|
||||
|
||||
#include <gpu/Batch.h>
|
||||
|
||||
#include "ModelNetworkingLogging.h"
|
||||
|
||||
TextureCache::TextureCache() {
|
||||
const qint64 TEXTURE_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
|
||||
setUnusedResourceCacheSize(TEXTURE_DEFAULT_UNUSED_MAX_SIZE);
|
||||
}
|
||||
|
||||
TextureCache::~TextureCache() {
|
||||
}
|
||||
|
||||
// use fixed table of permutations. Could also make ordered list programmatically
|
||||
// and then shuffle algorithm. For testing, this ensures consistent behavior in each run.
|
||||
// this list taken from Ken Perlin's Improved Noise reference implementation (orig. in Java) at
|
||||
// http://mrl.nyu.edu/~perlin/noise/
|
||||
|
||||
const int permutation[256] =
|
||||
{
|
||||
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225,
|
||||
140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6, 148,
|
||||
247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
|
||||
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175,
|
||||
74, 165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122,
|
||||
60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54,
|
||||
65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169,
|
||||
200, 196, 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64,
|
||||
52, 217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212,
|
||||
207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213,
|
||||
119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9,
|
||||
129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104,
|
||||
218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241,
|
||||
81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157,
|
||||
184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93,
|
||||
222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180
|
||||
};
|
||||
|
||||
#define USE_CHRIS_NOISE 1
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getPermutationNormalTexture() {
|
||||
if (!_permutationNormalTexture) {
|
||||
|
||||
// the first line consists of random permutation offsets
|
||||
unsigned char data[256 * 2 * 3];
|
||||
#if (USE_CHRIS_NOISE==1)
|
||||
for (int i = 0; i < 256; i++) {
|
||||
data[3*i+0] = permutation[i];
|
||||
data[3*i+1] = permutation[i];
|
||||
data[3*i+2] = permutation[i];
|
||||
#else
|
||||
for (int i = 0; i < 256 * 3; i++) {
|
||||
data[i] = rand() % 256;
|
||||
#endif
|
||||
}
|
||||
|
||||
for (int i = 256 * 3; i < 256 * 3 * 2; i += 3) {
|
||||
glm::vec3 randvec = glm::sphericalRand(1.0f);
|
||||
data[i] = ((randvec.x + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 1] = ((randvec.y + 1.0f) / 2.0f) * 255.0f;
|
||||
data[i + 2] = ((randvec.z + 1.0f) / 2.0f) * 255.0f;
|
||||
}
|
||||
|
||||
_permutationNormalTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB), 256, 2));
|
||||
_permutationNormalTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(data), data);
|
||||
}
|
||||
return _permutationNormalTexture;
|
||||
}
|
||||
|
||||
const unsigned char OPAQUE_WHITE[] = { 0xFF, 0xFF, 0xFF, 0xFF };
|
||||
const unsigned char OPAQUE_GRAY[] = { 0x80, 0x80, 0x80, 0xFF };
|
||||
const unsigned char OPAQUE_BLUE[] = { 0x80, 0x80, 0xFF, 0xFF };
|
||||
const unsigned char OPAQUE_BLACK[] = { 0x00, 0x00, 0x00, 0xFF };
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getWhiteTexture() {
|
||||
if (!_whiteTexture) {
|
||||
_whiteTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_whiteTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_WHITE);
|
||||
}
|
||||
return _whiteTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getGrayTexture() {
|
||||
if (!_grayTexture) {
|
||||
_grayTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_grayTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_WHITE), OPAQUE_GRAY);
|
||||
}
|
||||
return _grayTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getBlueTexture() {
|
||||
if (!_blueTexture) {
|
||||
_blueTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_blueTexture->assignStoredMip(0, _blueTexture->getTexelFormat(), sizeof(OPAQUE_BLUE), OPAQUE_BLUE);
|
||||
}
|
||||
return _blueTexture;
|
||||
}
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getBlackTexture() {
|
||||
if (!_blackTexture) {
|
||||
_blackTexture = gpu::TexturePointer(gpu::Texture::create2D(gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA), 1, 1));
|
||||
_blackTexture->assignStoredMip(0, _whiteTexture->getTexelFormat(), sizeof(OPAQUE_BLACK), OPAQUE_BLACK);
|
||||
}
|
||||
return _blackTexture;
|
||||
}
|
||||
|
||||
|
||||
const gpu::TexturePointer& TextureCache::getNormalFittingTexture() {
|
||||
if (!_normalFittingTexture) {
|
||||
_normalFittingTexture = getImageTexture(PathUtils::resourcesPath() + "images/normalFittingScale.dds");
|
||||
}
|
||||
return _normalFittingTexture;
|
||||
}
|
||||
|
||||
/// Extra data for creating textures.
|
||||
class TextureExtra {
|
||||
public:
|
||||
TextureType type;
|
||||
const QByteArray& content;
|
||||
};
|
||||
|
||||
NetworkTexturePointer TextureCache::getTexture(const QUrl& url, TextureType type, const QByteArray& content) {
|
||||
TextureExtra extra = { type, content };
|
||||
return ResourceCache::getResource(url, QUrl(), false, &extra).staticCast<NetworkTexture>();
|
||||
}
|
||||
|
||||
/// Returns a texture version of an image file
|
||||
gpu::TexturePointer TextureCache::getImageTexture(const QString& path) {
|
||||
QImage image = QImage(path).mirrored(false, true);
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB);
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::UINT8, gpu::RGB);
|
||||
if (image.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::UINT8, gpu::RGBA);
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::UINT8, gpu::BGRA);
|
||||
}
|
||||
gpu::TexturePointer texture = gpu::TexturePointer(
|
||||
gpu::Texture::create2D(formatGPU, image.width(), image.height(),
|
||||
gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
texture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
texture->autoGenerateMips(-1);
|
||||
return texture;
|
||||
}
|
||||
|
||||
|
||||
QSharedPointer<Resource> TextureCache::createResource(const QUrl& url,
|
||||
const QSharedPointer<Resource>& fallback, bool delayLoad, const void* extra) {
|
||||
const TextureExtra* textureExtra = static_cast<const TextureExtra*>(extra);
|
||||
return QSharedPointer<Resource>(new NetworkTexture(url, textureExtra->type, textureExtra->content),
|
||||
&Resource::allReferencesCleared);
|
||||
}
|
||||
|
||||
Texture::Texture() {
|
||||
}
|
||||
|
||||
Texture::~Texture() {
|
||||
}
|
||||
|
||||
NetworkTexture::NetworkTexture(const QUrl& url, TextureType type, const QByteArray& content) :
|
||||
Resource(url, !content.isEmpty()),
|
||||
_type(type),
|
||||
_width(0),
|
||||
_height(0) {
|
||||
|
||||
_textureSource.reset(new gpu::TextureSource());
|
||||
|
||||
if (!url.isValid()) {
|
||||
_loaded = true;
|
||||
}
|
||||
|
||||
std::string theName = url.toString().toStdString();
|
||||
// if we have content, load it after we have our self pointer
|
||||
if (!content.isEmpty()) {
|
||||
_startedLoading = true;
|
||||
QMetaObject::invokeMethod(this, "loadContent", Qt::QueuedConnection, Q_ARG(const QByteArray&, content));
|
||||
}
|
||||
}
|
||||
|
||||
NetworkTexture::NetworkTexture(const QUrl& url, const TextureLoaderFunc& textureLoader, const QByteArray& content) :
|
||||
Resource(url, !content.isEmpty()),
|
||||
_type(CUSTOM_TEXTURE),
|
||||
_textureLoader(textureLoader),
|
||||
_width(0),
|
||||
_height(0) {
|
||||
|
||||
_textureSource.reset(new gpu::TextureSource());
|
||||
|
||||
if (!url.isValid()) {
|
||||
_loaded = true;
|
||||
}
|
||||
|
||||
std::string theName = url.toString().toStdString();
|
||||
// if we have content, load it after we have our self pointer
|
||||
if (!content.isEmpty()) {
|
||||
_startedLoading = true;
|
||||
QMetaObject::invokeMethod(this, "loadContent", Qt::QueuedConnection, Q_ARG(const QByteArray&, content));
|
||||
}
|
||||
}
|
||||
|
||||
NetworkTexture::TextureLoaderFunc NetworkTexture::getTextureLoader() const {
|
||||
switch (_type) {
|
||||
case CUBE_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createCubeTextureFromImage);
|
||||
break;
|
||||
}
|
||||
case BUMP_TEXTURE: {
|
||||
return TextureLoaderFunc(model::TextureUsage::createNormalTextureFromBumpImage);
|
||||
break;
|
||||
}
|
||||
case CUSTOM_TEXTURE: {
|
||||
return _textureLoader;
|
||||
break;
|
||||
}
|
||||
case DEFAULT_TEXTURE:
|
||||
case NORMAL_TEXTURE:
|
||||
case SPECULAR_TEXTURE:
|
||||
case EMISSIVE_TEXTURE:
|
||||
default: {
|
||||
return TextureLoaderFunc(model::TextureUsage::create2DTextureFromImage);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class ImageReader : public QRunnable {
|
||||
public:
|
||||
|
||||
ImageReader(const QWeakPointer<Resource>& texture, const NetworkTexture::TextureLoaderFunc& textureLoader, const QByteArray& data, const QUrl& url = QUrl());
|
||||
|
||||
virtual void run();
|
||||
|
||||
private:
|
||||
|
||||
QWeakPointer<Resource> _texture;
|
||||
NetworkTexture::TextureLoaderFunc _textureLoader;
|
||||
QUrl _url;
|
||||
QByteArray _content;
|
||||
};
|
||||
|
||||
void NetworkTexture::downloadFinished(const QByteArray& data) {
|
||||
// send the reader off to the thread pool
|
||||
QThreadPool::globalInstance()->start(new ImageReader(_self, getTextureLoader(), data, _url));
|
||||
}
|
||||
|
||||
void NetworkTexture::loadContent(const QByteArray& content) {
|
||||
QThreadPool::globalInstance()->start(new ImageReader(_self, getTextureLoader(), content, _url));
|
||||
}
|
||||
|
||||
ImageReader::ImageReader(const QWeakPointer<Resource>& texture, const NetworkTexture::TextureLoaderFunc& textureLoader, const QByteArray& data,
|
||||
const QUrl& url) :
|
||||
_texture(texture),
|
||||
_textureLoader(textureLoader),
|
||||
_url(url),
|
||||
_content(data)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
std::once_flag onceListSupportedFormatsflag;
|
||||
void listSupportedImageFormats() {
|
||||
std::call_once(onceListSupportedFormatsflag, [](){
|
||||
auto supportedFormats = QImageReader::supportedImageFormats();
|
||||
QString formats;
|
||||
foreach(const QByteArray& f, supportedFormats) {
|
||||
formats += QString(f) + ",";
|
||||
}
|
||||
qCDebug(modelnetworking) << "List of supported Image formats:" << formats;
|
||||
});
|
||||
}
|
||||
|
||||
void ImageReader::run() {
|
||||
QSharedPointer<Resource> texture = _texture.toStrongRef();
|
||||
if (texture.isNull()) {
|
||||
return;
|
||||
}
|
||||
|
||||
listSupportedImageFormats();
|
||||
|
||||
// try to help the QImage loader by extracting the image file format from the url filename ext
|
||||
// Some tga are not created properly for example without it
|
||||
auto filename = _url.fileName().toStdString();
|
||||
auto filenameExtension = filename.substr(filename.find_last_of('.') + 1);
|
||||
QImage image = QImage::fromData(_content, filenameExtension.c_str());
|
||||
|
||||
// Note that QImage.format is the pixel format which is different from the "format" of the image file...
|
||||
auto imageFormat = image.format();
|
||||
int originalWidth = image.width();
|
||||
int originalHeight = image.height();
|
||||
|
||||
if (originalWidth == 0 || originalHeight == 0 || imageFormat == QImage::Format_Invalid) {
|
||||
if (filenameExtension.empty()) {
|
||||
qCDebug(modelnetworking) << "QImage failed to create from content, no file extension:" << _url;
|
||||
} else {
|
||||
qCDebug(modelnetworking) << "QImage failed to create from content" << _url;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
auto ntex = dynamic_cast<NetworkTexture*>(&*texture);
|
||||
if (ntex) {
|
||||
theTexture = ntex->getTextureLoader()(image, _url.toString().toStdString());
|
||||
}
|
||||
|
||||
QMetaObject::invokeMethod(texture.data(), "setImage",
|
||||
Q_ARG(const QImage&, image),
|
||||
Q_ARG(void*, theTexture),
|
||||
Q_ARG(int, originalWidth), Q_ARG(int, originalHeight));
|
||||
|
||||
|
||||
}
|
||||
|
||||
void NetworkTexture::setImage(const QImage& image, void* voidTexture, int originalWidth,
|
||||
int originalHeight) {
|
||||
_originalWidth = originalWidth;
|
||||
_originalHeight = originalHeight;
|
||||
|
||||
gpu::Texture* texture = static_cast<gpu::Texture*>(voidTexture);
|
||||
|
||||
// Passing ownership
|
||||
_textureSource->resetTexture(texture);
|
||||
auto gpuTexture = _textureSource->getGPUTexture();
|
||||
|
||||
if (gpuTexture) {
|
||||
_width = gpuTexture->getWidth();
|
||||
_height = gpuTexture->getHeight();
|
||||
} else {
|
||||
_width = _height = 0;
|
||||
}
|
||||
|
||||
finishedLoading(true);
|
||||
|
||||
imageLoaded(image);
|
||||
}
|
||||
|
||||
void NetworkTexture::imageLoaded(const QImage& image) {
|
||||
// nothing by default
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
//
|
||||
// TextureCache.h
|
||||
// libraries/gpu-networking/src
|
||||
// libraries/model-networking/src
|
||||
//
|
||||
// Created by Andrzej Kapolka on 8/6/13.
|
||||
// Copyright 2013 High Fidelity, Inc.
|
||||
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <DependencyManager.h>
|
||||
#include <ResourceCache.h>
|
||||
#include <model/TextureMap.h>
|
||||
|
||||
namespace gpu {
|
||||
class Batch;
|
||||
|
@ -28,7 +29,7 @@ class NetworkTexture;
|
|||
|
||||
typedef QSharedPointer<NetworkTexture> NetworkTexturePointer;
|
||||
|
||||
enum TextureType { DEFAULT_TEXTURE, NORMAL_TEXTURE, SPECULAR_TEXTURE, EMISSIVE_TEXTURE, SPLAT_TEXTURE, CUBE_TEXTURE };
|
||||
enum TextureType { DEFAULT_TEXTURE, NORMAL_TEXTURE, BUMP_TEXTURE, SPECULAR_TEXTURE, EMISSIVE_TEXTURE, CUBE_TEXTURE, CUSTOM_TEXTURE };
|
||||
|
||||
/// Stores cached textures, including render-to-texture targets.
|
||||
class TextureCache : public ResourceCache, public Dependency {
|
||||
|
@ -60,9 +61,15 @@ public:
|
|||
static gpu::TexturePointer getImageTexture(const QString& path);
|
||||
|
||||
/// Loads a texture from the specified URL.
|
||||
NetworkTexturePointer getTexture(const QUrl& url, TextureType type = DEFAULT_TEXTURE, bool dilatable = false,
|
||||
NetworkTexturePointer getTexture(const QUrl& url, TextureType type = DEFAULT_TEXTURE,
|
||||
const QByteArray& content = QByteArray());
|
||||
|
||||
|
||||
typedef gpu::Texture* TextureLoader(const QImage& image, const std::string& srcImageName);
|
||||
|
||||
typedef std::function<TextureLoader> TextureLoaderFunc;
|
||||
|
||||
NetworkTexturePointer getTexture(const QUrl& url, const TextureLoaderFunc& textureLoader,
|
||||
const QByteArray& content = QByteArray());
|
||||
protected:
|
||||
|
||||
virtual QSharedPointer<Resource> createResource(const QUrl& url,
|
||||
|
@ -87,14 +94,13 @@ private:
|
|||
class Texture {
|
||||
public:
|
||||
friend class TextureCache;
|
||||
friend class DilatableNetworkTexture;
|
||||
Texture();
|
||||
~Texture();
|
||||
|
||||
const gpu::TexturePointer& getGPUTexture() const { return _gpuTexture; }
|
||||
const gpu::TexturePointer getGPUTexture() const { return _textureSource->getGPUTexture(); }
|
||||
gpu::TextureSourcePointer _textureSource;
|
||||
|
||||
protected:
|
||||
gpu::TexturePointer _gpuTexture;
|
||||
|
||||
private:
|
||||
};
|
||||
|
@ -106,65 +112,36 @@ class NetworkTexture : public Resource, public Texture {
|
|||
|
||||
public:
|
||||
|
||||
typedef TextureCache::TextureLoaderFunc TextureLoaderFunc;
|
||||
|
||||
NetworkTexture(const QUrl& url, TextureType type, const QByteArray& content);
|
||||
|
||||
/// Checks whether it "looks like" this texture is translucent
|
||||
/// (majority of pixels neither fully opaque or fully transparent).
|
||||
bool isTranslucent() const { return _translucent; }
|
||||
|
||||
/// Returns the lazily-computed average texture color.
|
||||
const QColor& getAverageColor() const { return _averageColor; }
|
||||
NetworkTexture(const QUrl& url, const TextureLoaderFunc& textureLoader, const QByteArray& content);
|
||||
|
||||
int getOriginalWidth() const { return _originalWidth; }
|
||||
int getOriginalHeight() const { return _originalHeight; }
|
||||
int getWidth() const { return _width; }
|
||||
int getHeight() const { return _height; }
|
||||
TextureType getType() const { return _type; }
|
||||
|
||||
TextureLoaderFunc getTextureLoader() const;
|
||||
|
||||
protected:
|
||||
|
||||
virtual void downloadFinished(const QByteArray& data) override;
|
||||
|
||||
Q_INVOKABLE void loadContent(const QByteArray& content);
|
||||
// FIXME: This void* should be a gpu::Texture* but i cannot get it to work for now, moving on...
|
||||
Q_INVOKABLE void setImage(const QImage& image, void* texture, bool translucent, const QColor& averageColor, int originalWidth,
|
||||
int originalHeight);
|
||||
Q_INVOKABLE void setImage(const QImage& image, void* texture, int originalWidth, int originalHeight);
|
||||
|
||||
virtual void imageLoaded(const QImage& image);
|
||||
|
||||
TextureType _type;
|
||||
|
||||
private:
|
||||
bool _translucent;
|
||||
QColor _averageColor;
|
||||
TextureLoaderFunc _textureLoader;
|
||||
int _originalWidth;
|
||||
int _originalHeight;
|
||||
int _width;
|
||||
int _height;
|
||||
};
|
||||
|
||||
/// Caches derived, dilated textures.
|
||||
class DilatableNetworkTexture : public NetworkTexture {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
|
||||
DilatableNetworkTexture(const QUrl& url, const QByteArray& content);
|
||||
|
||||
/// Returns a pointer to a texture with the requested amount of dilation.
|
||||
QSharedPointer<Texture> getDilatedTexture(float dilation);
|
||||
|
||||
protected:
|
||||
|
||||
virtual void imageLoaded(const QImage& image);
|
||||
virtual void reinsert();
|
||||
|
||||
private:
|
||||
|
||||
QImage _image;
|
||||
int _innerRadius;
|
||||
int _outerRadius;
|
||||
|
||||
QMap<float, QWeakPointer<Texture> > _dilatedTextures;
|
||||
};
|
||||
|
||||
#endif // hifi_TextureCache_h
|
|
@ -9,4 +9,4 @@ add_dependency_external_projects(glm)
|
|||
find_package(GLM REQUIRED)
|
||||
target_include_directories(${TARGET_NAME} PUBLIC ${GLM_INCLUDE_DIRS})
|
||||
|
||||
link_hifi_libraries(shared networking gpu gpu-networking procedural octree)
|
||||
link_hifi_libraries(shared gpu)
|
||||
|
|
20
libraries/model/src/model/Asset.cpp
Normal file
20
libraries/model/src/model/Asset.cpp
Normal file
|
@ -0,0 +1,20 @@
|
|||
//
|
||||
// Asset.cpp
|
||||
// libraries/model/src/model
|
||||
//
|
||||
// Created by Sam Gateau on 08/21/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "Asset.h"
|
||||
|
||||
using namespace model;
|
||||
|
||||
Asset::Asset() {
|
||||
}
|
||||
|
||||
Asset::~Asset() {
|
||||
}
|
||||
|
132
libraries/model/src/model/Asset.h
Normal file
132
libraries/model/src/model/Asset.h
Normal file
|
@ -0,0 +1,132 @@
|
|||
//
|
||||
// Asset.h
|
||||
// libraries/model/src/model
|
||||
//
|
||||
// Created by Sam Gateau on 08/21/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_model_Asset_h
|
||||
#define hifi_model_Asset_h
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "Material.h"
|
||||
#include "Geometry.h"
|
||||
|
||||
namespace model {
|
||||
|
||||
template <class T>
|
||||
class Table {
|
||||
public:
|
||||
typedef std::vector< T > Vector;
|
||||
typedef int ID;
|
||||
|
||||
static const ID INVALID_ID = 0;
|
||||
|
||||
typedef size_t Index;
|
||||
enum Version {
|
||||
DRAFT = 0,
|
||||
FINAL,
|
||||
NUM_VERSIONS,
|
||||
};
|
||||
|
||||
static Version evalVersionFromID(ID id) {
|
||||
if (id <= 0) {
|
||||
return DRAFT;
|
||||
} else {
|
||||
return FINAL;
|
||||
}
|
||||
}
|
||||
static Index evalIndexFromID(ID id) {
|
||||
return Index(id < 0 ? -id : id) - 1;
|
||||
}
|
||||
static ID evalID(Index index, Version version) {
|
||||
return (version == DRAFT ? -int(index + 1) : int(index + 1));
|
||||
}
|
||||
|
||||
Table() {
|
||||
for (auto e : _elements) {
|
||||
e.resize(0);
|
||||
}
|
||||
}
|
||||
~Table() {}
|
||||
|
||||
Index getNumElements() const {
|
||||
return _elements[DRAFT].size();
|
||||
}
|
||||
|
||||
ID add(const T& element) {
|
||||
for (auto e : _elements) {
|
||||
e.push_back(element);
|
||||
}
|
||||
return evalID(_elements[DRAFT].size(), DRAFT);
|
||||
}
|
||||
|
||||
void set(ID id, const T& element) {
|
||||
Index index = evalIndexFromID(id);
|
||||
if (index < getNumElements()) {
|
||||
_elements[DRAFT][index] = element;
|
||||
}
|
||||
}
|
||||
|
||||
const T& get(ID id, const T& element) const {
|
||||
Index index = evalIndexFromID(id);
|
||||
if (index < getNumElements()) {
|
||||
return _elements[DRAFT][index];
|
||||
}
|
||||
return _default;
|
||||
}
|
||||
|
||||
protected:
|
||||
Vector _elements[NUM_VERSIONS];
|
||||
T _default;
|
||||
};
|
||||
|
||||
typedef Table< MaterialPointer > MaterialTable;
|
||||
|
||||
typedef Table< MeshPointer > MeshTable;
|
||||
|
||||
|
||||
class Shape {
|
||||
public:
|
||||
|
||||
MeshTable::ID _meshID{ MeshTable::INVALID_ID };
|
||||
int _partID = 0;
|
||||
|
||||
MaterialTable::ID _materialID{ MaterialTable::INVALID_ID };
|
||||
};
|
||||
|
||||
typedef Table< Shape > ShapeTable;
|
||||
|
||||
class Asset {
|
||||
public:
|
||||
|
||||
|
||||
Asset();
|
||||
~Asset();
|
||||
|
||||
MeshTable& editMeshes() { return _meshes; }
|
||||
const MeshTable& getMeshes() const { return _meshes; }
|
||||
|
||||
MaterialTable& editMaterials() { return _materials; }
|
||||
const MaterialTable& getMaterials() const { return _materials; }
|
||||
|
||||
ShapeTable& editShapes() { return _shapes; }
|
||||
const ShapeTable& getShapes() const { return _shapes; }
|
||||
|
||||
protected:
|
||||
|
||||
MeshTable _meshes;
|
||||
MaterialTable _materials;
|
||||
ShapeTable _shapes;
|
||||
|
||||
};
|
||||
|
||||
typedef std::shared_ptr< Asset > AssetPointer;
|
||||
|
||||
};
|
||||
#endif
|
|
@ -10,13 +10,27 @@
|
|||
//
|
||||
#include "Material.h"
|
||||
|
||||
#include "TextureMap.h"
|
||||
|
||||
using namespace model;
|
||||
using namespace gpu;
|
||||
|
||||
float componentSRGBToLinear(float cs) {
|
||||
if (cs > 0.04045) {
|
||||
return pow(((cs + 0.055)/1.055), 2.4);
|
||||
} else {
|
||||
return cs / 12.92;
|
||||
}
|
||||
}
|
||||
|
||||
glm::vec3 convertSRGBToLinear(const glm::vec3& srgb) {
|
||||
return glm::vec3(componentSRGBToLinear(srgb.x), componentSRGBToLinear(srgb.y), componentSRGBToLinear(srgb.z));
|
||||
}
|
||||
|
||||
Material::Material() :
|
||||
_key(0),
|
||||
_schemaBuffer(),
|
||||
_textureMap() {
|
||||
_textureMaps() {
|
||||
|
||||
// only if created from nothing shall we create the Buffer to store the properties
|
||||
Schema schema;
|
||||
|
@ -28,13 +42,13 @@ Material::Material() :
|
|||
Material::Material(const Material& material) :
|
||||
_key(material._key),
|
||||
_schemaBuffer(material._schemaBuffer),
|
||||
_textureMap(material._textureMap) {
|
||||
_textureMaps(material._textureMaps) {
|
||||
}
|
||||
|
||||
Material& Material::operator= (const Material& material) {
|
||||
_key = (material._key);
|
||||
_schemaBuffer = (material._schemaBuffer);
|
||||
_textureMap = (material._textureMap);
|
||||
_textureMaps = (material._textureMaps);
|
||||
|
||||
return (*this);
|
||||
}
|
||||
|
@ -67,8 +81,15 @@ void Material::setOpacity(float opacity) {
|
|||
_schemaBuffer.edit<Schema>()._opacity = opacity;
|
||||
}
|
||||
|
||||
void Material::setTextureView(MapChannel channel, const gpu::TextureView& view) {
|
||||
_key.setMapChannel(channel, (view.isValid()));
|
||||
_textureMap[channel] = view;
|
||||
void Material::setTextureMap(MapChannel channel, const TextureMapPointer& textureMap) {
|
||||
if (textureMap) {
|
||||
_key.setMapChannel(channel, (true));
|
||||
_textureMaps[channel] = textureMap;
|
||||
} else {
|
||||
_key.setMapChannel(channel, (false));
|
||||
_textureMaps.erase(channel);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -13,16 +13,18 @@
|
|||
|
||||
#include <bitset>
|
||||
#include <map>
|
||||
#include <qurl.h>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
|
||||
#include "gpu/Resource.h"
|
||||
#include "gpu/Texture.h"
|
||||
|
||||
#include <gpu/Resource.h>
|
||||
|
||||
namespace model {
|
||||
|
||||
static glm::vec3 convertSRGBToLinear(const glm::vec3& srgb);
|
||||
|
||||
class TextureMap;
|
||||
typedef std::shared_ptr< TextureMap > TextureMapPointer;
|
||||
|
||||
// Material Key is a coarse trait description of a material used to classify the materials
|
||||
class MaterialKey {
|
||||
public:
|
||||
|
@ -39,6 +41,7 @@ public:
|
|||
GLOSS_MAP_BIT,
|
||||
TRANSPARENT_MAP_BIT,
|
||||
NORMAL_MAP_BIT,
|
||||
LIGHTMAP_MAP_BIT,
|
||||
|
||||
NUM_FLAGS,
|
||||
};
|
||||
|
@ -51,6 +54,7 @@ public:
|
|||
GLOSS_MAP,
|
||||
TRANSPARENT_MAP,
|
||||
NORMAL_MAP,
|
||||
LIGHTMAP_MAP,
|
||||
|
||||
NUM_MAP_CHANNELS,
|
||||
};
|
||||
|
@ -81,6 +85,7 @@ public:
|
|||
Builder& withTransparentMap() { _flags.set(TRANSPARENT_MAP_BIT); return (*this); }
|
||||
|
||||
Builder& withNormalMap() { _flags.set(NORMAL_MAP_BIT); return (*this); }
|
||||
Builder& withLightmapMap() { _flags.set(LIGHTMAP_MAP_BIT); return (*this); }
|
||||
|
||||
// Convenient standard keys that we will keep on using all over the place
|
||||
static MaterialKey opaqueDiffuse() { return Builder().withDiffuse().build(); }
|
||||
|
@ -120,6 +125,9 @@ public:
|
|||
void setNormalMap(bool value) { _flags.set(NORMAL_MAP_BIT, value); }
|
||||
bool isNormalMap() const { return _flags[NORMAL_MAP_BIT]; }
|
||||
|
||||
void setLightmapMap(bool value) { _flags.set(LIGHTMAP_MAP_BIT, value); }
|
||||
bool isLightmapMap() const { return _flags[LIGHTMAP_MAP_BIT]; }
|
||||
|
||||
void setMapChannel(MapChannel channel, bool value) { _flags.set(EMISSIVE_MAP_BIT + channel, value); }
|
||||
bool isMapChannel(MapChannel channel) const { return _flags[EMISSIVE_MAP_BIT + channel]; }
|
||||
|
||||
|
@ -175,6 +183,9 @@ public:
|
|||
Builder& withoutNormalMap() { _value.reset(MaterialKey::NORMAL_MAP_BIT); _mask.set(MaterialKey::NORMAL_MAP_BIT); return (*this); }
|
||||
Builder& withNormalMap() { _value.set(MaterialKey::NORMAL_MAP_BIT); _mask.set(MaterialKey::NORMAL_MAP_BIT); return (*this); }
|
||||
|
||||
Builder& withoutLightmapMap() { _value.reset(MaterialKey::LIGHTMAP_MAP_BIT); _mask.set(MaterialKey::LIGHTMAP_MAP_BIT); return (*this); }
|
||||
Builder& withLightmapMap() { _value.set(MaterialKey::LIGHTMAP_MAP_BIT); _mask.set(MaterialKey::LIGHTMAP_MAP_BIT); return (*this); }
|
||||
|
||||
// Convenient standard keys that we will keep on using all over the place
|
||||
static MaterialFilter opaqueDiffuse() { return Builder().withDiffuse().withoutTransparent().build(); }
|
||||
};
|
||||
|
@ -197,12 +208,11 @@ public:
|
|||
class Material {
|
||||
public:
|
||||
typedef gpu::BufferView UniformBufferView;
|
||||
typedef gpu::TextureView TextureView;
|
||||
|
||||
typedef glm::vec3 Color;
|
||||
|
||||
typedef MaterialKey::MapChannel MapChannel;
|
||||
typedef std::map<MapChannel, TextureView> TextureMap;
|
||||
typedef std::map<MapChannel, TextureMapPointer> TextureMaps;
|
||||
typedef std::bitset<MaterialKey::NUM_MAP_CHANNELS> MapFlags;
|
||||
|
||||
Material();
|
||||
|
@ -241,14 +251,15 @@ public:
|
|||
|
||||
const UniformBufferView& getSchemaBuffer() const { return _schemaBuffer; }
|
||||
|
||||
void setTextureView(MapChannel channel, const TextureView& texture);
|
||||
const TextureMap& getTextureMap() const { return _textureMap; }
|
||||
// The texture map to channel association
|
||||
void setTextureMap(MapChannel channel, const TextureMapPointer& textureMap);
|
||||
const TextureMaps& getTextureMaps() const { return _textureMaps; }
|
||||
|
||||
protected:
|
||||
|
||||
MaterialKey _key;
|
||||
UniformBufferView _schemaBuffer;
|
||||
TextureMap _textureMap;
|
||||
TextureMaps _textureMaps;
|
||||
|
||||
};
|
||||
typedef std::shared_ptr< Material > MaterialPointer;
|
||||
|
|
|
@ -26,6 +26,38 @@ Material getMaterial() {
|
|||
return _mat;
|
||||
}
|
||||
|
||||
<! // TODO: use this code for correct gamma correction
|
||||
/*
|
||||
float componentSRGBToLinear(float cs) {
|
||||
// sRGB to linear conversion
|
||||
// { cs / 12.92, cs <= 0.04045
|
||||
// cl = {
|
||||
// { ((cs + 0.055)/1.055)^2.4, cs > 0.04045
|
||||
// constants:
|
||||
// T = 0.04045
|
||||
// A = 1 / 1.055 = 0.94786729857
|
||||
// B = 0.055 * A = 0.05213270142
|
||||
// C = 1 / 12.92 = 0.0773993808
|
||||
// G = 2.4
|
||||
const float T = 0.04045;
|
||||
const float A = 0.947867;
|
||||
const float B = 0.052132;
|
||||
const float C = 0.077399;
|
||||
const float G = 2.4;
|
||||
|
||||
if (cs > T) {
|
||||
return pow((cs * A + B), G);
|
||||
} else {
|
||||
return cs * C;
|
||||
}
|
||||
}
|
||||
|
||||
vec3 SRGBToLinear(vec3 srgb) {
|
||||
return vec3(componentSRGBToLinear(srgb.x),componentSRGBToLinear(srgb.y),componentSRGBToLinear(srgb.z));
|
||||
}
|
||||
vec3 getMaterialDiffuse(Material m) { return (gl_FragCoord.x < 800 ? SRGBToLinear(m._diffuse.rgb) : m._diffuse.rgb); }
|
||||
*/!>
|
||||
|
||||
float getMaterialOpacity(Material m) { return m._diffuse.a; }
|
||||
vec3 getMaterialDiffuse(Material m) { return m._diffuse.rgb; }
|
||||
vec3 getMaterialSpecular(Material m) { return m._specular.rgb; }
|
||||
|
|
11
libraries/model/src/model/ModelLogging.cpp
Normal file
11
libraries/model/src/model/ModelLogging.cpp
Normal file
|
@ -0,0 +1,11 @@
|
|||
//
|
||||
// Created by Sam Gateau on 2015/09/21
|
||||
// Copyright 2013-2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "ModelLogging.h"
|
||||
|
||||
Q_LOGGING_CATEGORY(modelLog, "hifi.model")
|
14
libraries/model/src/model/ModelLogging.h
Normal file
14
libraries/model/src/model/ModelLogging.h
Normal file
|
@ -0,0 +1,14 @@
|
|||
//
|
||||
// ModelLogging.h
|
||||
// hifi
|
||||
//
|
||||
// Created by Sam Gateau on 9/20/15.
|
||||
// Copyright 2013-2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <QLoggingCategory>
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(modelLog)
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
#include <gpu/Batch.h>
|
||||
#include <gpu/Context.h>
|
||||
#include <procedural/Procedural.h>
|
||||
#include <ViewFrustum.h>
|
||||
|
||||
#include "Skybox_vert.h"
|
||||
|
@ -40,15 +39,6 @@ void Skybox::setColor(const Color& color) {
|
|||
_color = color;
|
||||
}
|
||||
|
||||
void Skybox::setProcedural(QSharedPointer<Procedural> procedural) {
|
||||
_procedural = procedural;
|
||||
if (_procedural) {
|
||||
_procedural->_vertexSource = Skybox_vert;
|
||||
_procedural->_fragmentSource = Skybox_frag;
|
||||
// No pipeline state customization
|
||||
}
|
||||
}
|
||||
|
||||
void Skybox::setCubemap(const gpu::TexturePointer& cubemap) {
|
||||
_cubemap = cubemap;
|
||||
}
|
||||
|
@ -58,7 +48,7 @@ void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Sky
|
|||
static gpu::BufferPointer theBuffer;
|
||||
static gpu::Stream::FormatPointer theFormat;
|
||||
|
||||
if (skybox._procedural || skybox.getCubemap()) {
|
||||
if (skybox.getCubemap()) {
|
||||
if (!theBuffer) {
|
||||
const float CLIP = 1.0f;
|
||||
const glm::vec2 vertices[4] = { { -CLIP, -CLIP }, { CLIP, -CLIP }, { -CLIP, CLIP }, { CLIP, CLIP } };
|
||||
|
@ -78,14 +68,7 @@ void Skybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const Sky
|
|||
batch.setInputBuffer(gpu::Stream::POSITION, theBuffer, 0, 8);
|
||||
batch.setInputFormat(theFormat);
|
||||
|
||||
if (skybox._procedural && skybox._procedural->_enabled && skybox._procedural->ready()) {
|
||||
if (skybox.getCubemap() && skybox.getCubemap()->isDefined()) {
|
||||
batch.setResourceTexture(0, skybox.getCubemap());
|
||||
}
|
||||
|
||||
skybox._procedural->prepare(batch, glm::vec3(1));
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
} else if (skybox.getCubemap() && skybox.getCubemap()->isDefined()) {
|
||||
if (skybox.getCubemap() && skybox.getCubemap()->isDefined()) {
|
||||
static gpu::BufferPointer theConstants;
|
||||
static gpu::PipelinePointer thePipeline;
|
||||
static int SKYBOX_CONSTANTS_SLOT = 0; // need to be defined by the compilation of the shader
|
||||
|
|
|
@ -11,13 +11,12 @@
|
|||
#ifndef hifi_model_Skybox_h
|
||||
#define hifi_model_Skybox_h
|
||||
|
||||
#include <QtCore/QSharedPointer>
|
||||
#include <gpu/Texture.h>
|
||||
|
||||
#include "Light.h"
|
||||
|
||||
class ViewFrustum;
|
||||
struct Procedural;
|
||||
|
||||
namespace gpu { class Batch; }
|
||||
|
||||
namespace model {
|
||||
|
@ -36,13 +35,14 @@ public:
|
|||
void setCubemap(const gpu::TexturePointer& cubemap);
|
||||
const gpu::TexturePointer& getCubemap() const { return _cubemap; }
|
||||
|
||||
void setProcedural(QSharedPointer<Procedural> procedural);
|
||||
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum) const {
|
||||
render(batch, frustum, (*this));
|
||||
}
|
||||
|
||||
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const Skybox& skybox);
|
||||
|
||||
protected:
|
||||
gpu::TexturePointer _cubemap;
|
||||
QSharedPointer<Procedural> _procedural;
|
||||
Color _color{1.0f, 1.0f, 1.0f};
|
||||
};
|
||||
typedef std::shared_ptr< Skybox > SkyboxPointer;
|
||||
|
|
|
@ -204,7 +204,6 @@ SunSkyStage::SunSkyStage() :
|
|||
// Begining of march
|
||||
setYearTime(60.0f);
|
||||
|
||||
_skybox = std::make_shared<Skybox>();
|
||||
_skybox->setColor(Color(1.0f, 0.0f, 0.0f));
|
||||
}
|
||||
|
||||
|
|
485
libraries/model/src/model/TextureMap.cpp
Executable file
485
libraries/model/src/model/TextureMap.cpp
Executable file
|
@ -0,0 +1,485 @@
|
|||
//
|
||||
// TextureMap.cpp
|
||||
// libraries/model/src/model
|
||||
//
|
||||
// Created by Sam Gateau on 5/6/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "TextureMap.h"
|
||||
|
||||
#include <QImage>
|
||||
#include <QPainter>
|
||||
#include <QDebug>
|
||||
|
||||
#include "ModelLogging.h"
|
||||
|
||||
using namespace model;
|
||||
using namespace gpu;
|
||||
|
||||
|
||||
void TextureMap::setTextureSource(TextureSourcePointer& textureSource) {
|
||||
_textureSource = textureSource;
|
||||
}
|
||||
|
||||
bool TextureMap::isDefined() const {
|
||||
if (_textureSource) {
|
||||
return _textureSource->isDefined();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
gpu::TextureView TextureMap::getTextureView() const {
|
||||
if (_textureSource) {
|
||||
return gpu::TextureView(_textureSource->getGPUTexture(), 0);
|
||||
} else {
|
||||
return gpu::TextureView();
|
||||
}
|
||||
}
|
||||
|
||||
void TextureMap::setTextureTransform(const Transform& texcoordTransform) {
|
||||
_texcoordTransform = texcoordTransform;
|
||||
}
|
||||
|
||||
void TextureMap::setLightmapOffsetScale(float offset, float scale) {
|
||||
_lightmapOffsetScale.x = offset;
|
||||
_lightmapOffsetScale.y = scale;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
gpu::Texture* TextureUsage::create2DTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||
QImage image = srcImage;
|
||||
|
||||
int imageArea = image.width() * image.height();
|
||||
|
||||
int opaquePixels = 0;
|
||||
int translucentPixels = 0;
|
||||
bool isTransparent = false;
|
||||
int redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0;
|
||||
const int EIGHT_BIT_MAXIMUM = 255;
|
||||
QColor averageColor(EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM);
|
||||
|
||||
if (!image.hasAlphaChannel()) {
|
||||
if (image.format() != QImage::Format_RGB888) {
|
||||
image = image.convertToFormat(QImage::Format_RGB888);
|
||||
}
|
||||
// int redTotal = 0, greenTotal = 0, blueTotal = 0;
|
||||
for (int y = 0; y < image.height(); y++) {
|
||||
for (int x = 0; x < image.width(); x++) {
|
||||
QRgb rgb = image.pixel(x, y);
|
||||
redTotal += qRed(rgb);
|
||||
greenTotal += qGreen(rgb);
|
||||
blueTotal += qBlue(rgb);
|
||||
}
|
||||
}
|
||||
if (imageArea > 0) {
|
||||
averageColor.setRgb(redTotal / imageArea, greenTotal / imageArea, blueTotal / imageArea);
|
||||
}
|
||||
} else {
|
||||
if (image.format() != QImage::Format_ARGB32) {
|
||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||
}
|
||||
|
||||
// check for translucency/false transparency
|
||||
// int opaquePixels = 0;
|
||||
// int translucentPixels = 0;
|
||||
// int redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0;
|
||||
for (int y = 0; y < image.height(); y++) {
|
||||
for (int x = 0; x < image.width(); x++) {
|
||||
QRgb rgb = image.pixel(x, y);
|
||||
redTotal += qRed(rgb);
|
||||
greenTotal += qGreen(rgb);
|
||||
blueTotal += qBlue(rgb);
|
||||
int alpha = qAlpha(rgb);
|
||||
alphaTotal += alpha;
|
||||
if (alpha == EIGHT_BIT_MAXIMUM) {
|
||||
opaquePixels++;
|
||||
} else if (alpha != 0) {
|
||||
translucentPixels++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (opaquePixels == imageArea) {
|
||||
qCDebug(modelLog) << "Image with alpha channel is completely opaque:" << QString(srcImageName.c_str());
|
||||
image = image.convertToFormat(QImage::Format_RGB888);
|
||||
}
|
||||
|
||||
averageColor = QColor(redTotal / imageArea,
|
||||
greenTotal / imageArea, blueTotal / imageArea, alphaTotal / imageArea);
|
||||
|
||||
isTransparent = (translucentPixels >= imageArea / 2);
|
||||
}
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
// bool isLinearRGB = true; //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
bool isLinearRGB = true; //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
if (image.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::RGBA : gpu::SRGBA));
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::BGRA : gpu::SBGRA));
|
||||
}
|
||||
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
theTexture->autoGenerateMips(-1);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
}
|
||||
|
||||
int clampPixelCoordinate(int coordinate, int maxCoordinate) {
|
||||
return coordinate - ((int)(coordinate < 0) * coordinate) + ((int)(coordinate > maxCoordinate) * (maxCoordinate - coordinate));
|
||||
}
|
||||
|
||||
const int RGBA_MAX = 255;
|
||||
|
||||
// transform -1 - 1 to 0 - 255 (from sobel value to rgb)
|
||||
double mapComponent(double sobelValue) {
|
||||
const double factor = RGBA_MAX / 2.0;
|
||||
return (sobelValue + 1.0) * factor;
|
||||
}
|
||||
|
||||
gpu::Texture* TextureUsage::createNormalTextureFromBumpImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||
QImage image = srcImage;
|
||||
|
||||
// PR 5540 by AlessandroSigna
|
||||
// integrated here as a specialized TextureLoader for bumpmaps
|
||||
// The conversion is done using the Sobel Filter to calculate the derivatives from the grayscale image
|
||||
const double pStrength = 2.0;
|
||||
int width = image.width();
|
||||
int height = image.height();
|
||||
QImage result(width, height, image.format());
|
||||
|
||||
for (int i = 0; i < width; i++) {
|
||||
const int iNextClamped = clampPixelCoordinate(i + 1, width - 1);
|
||||
const int iPrevClamped = clampPixelCoordinate(i - 1, width - 1);
|
||||
|
||||
for (int j = 0; j < height; j++) {
|
||||
const int jNextClamped = clampPixelCoordinate(j + 1, height - 1);
|
||||
const int jPrevClamped = clampPixelCoordinate(j - 1, height - 1);
|
||||
|
||||
// surrounding pixels
|
||||
const QRgb topLeft = image.pixel(iPrevClamped, jPrevClamped);
|
||||
const QRgb top = image.pixel(iPrevClamped, j);
|
||||
const QRgb topRight = image.pixel(iPrevClamped, jNextClamped);
|
||||
const QRgb right = image.pixel(i, jNextClamped);
|
||||
const QRgb bottomRight = image.pixel(iNextClamped, jNextClamped);
|
||||
const QRgb bottom = image.pixel(iNextClamped, j);
|
||||
const QRgb bottomLeft = image.pixel(iNextClamped, jPrevClamped);
|
||||
const QRgb left = image.pixel(i, jPrevClamped);
|
||||
|
||||
// take their gray intensities
|
||||
// since it's a grayscale image, the value of each component RGB is the same
|
||||
const double tl = qRed(topLeft);
|
||||
const double t = qRed(top);
|
||||
const double tr = qRed(topRight);
|
||||
const double r = qRed(right);
|
||||
const double br = qRed(bottomRight);
|
||||
const double b = qRed(bottom);
|
||||
const double bl = qRed(bottomLeft);
|
||||
const double l = qRed(left);
|
||||
|
||||
// apply the sobel filter
|
||||
const double dX = (tr + pStrength * r + br) - (tl + pStrength * l + bl);
|
||||
const double dY = (bl + pStrength * b + br) - (tl + pStrength * t + tr);
|
||||
const double dZ = RGBA_MAX / pStrength;
|
||||
|
||||
glm::vec3 v(dX, dY, dZ);
|
||||
glm::normalize(v);
|
||||
|
||||
// convert to rgb from the value obtained computing the filter
|
||||
QRgb qRgbValue = qRgb(mapComponent(v.x), mapComponent(v.y), mapComponent(v.z));
|
||||
result.setPixel(i, j, qRgbValue);
|
||||
}
|
||||
}
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
// bool isLinearRGB = true; //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
bool isLinearRGB = true; //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
if (image.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::RGBA : gpu::SRGBA));
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::BGRA : gpu::SBGRA));
|
||||
}
|
||||
|
||||
|
||||
theTexture = (gpu::Texture::create2D(formatGPU, image.width(), image.height(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR)));
|
||||
theTexture->assignStoredMip(0, formatMip, image.byteCount(), image.constBits());
|
||||
theTexture->autoGenerateMips(-1);
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
}
|
||||
|
||||
class CubeLayout {
|
||||
public:
|
||||
int _widthRatio = 1;
|
||||
int _heightRatio = 1;
|
||||
|
||||
class Face {
|
||||
public:
|
||||
int _x = 0;
|
||||
int _y = 0;
|
||||
bool _horizontalMirror = false;
|
||||
bool _verticalMirror = false;
|
||||
|
||||
Face() {}
|
||||
Face(int x, int y, bool horizontalMirror, bool verticalMirror) : _x(x), _y(y), _horizontalMirror(horizontalMirror), _verticalMirror(verticalMirror) {}
|
||||
};
|
||||
|
||||
Face _faceXPos;
|
||||
Face _faceXNeg;
|
||||
Face _faceYPos;
|
||||
Face _faceYNeg;
|
||||
Face _faceZPos;
|
||||
Face _faceZNeg;
|
||||
|
||||
CubeLayout(int wr, int hr, Face fXP, Face fXN, Face fYP, Face fYN, Face fZP, Face fZN) :
|
||||
_widthRatio(wr),
|
||||
_heightRatio(hr),
|
||||
_faceXPos(fXP),
|
||||
_faceXNeg(fXN),
|
||||
_faceYPos(fYP),
|
||||
_faceYNeg(fYN),
|
||||
_faceZPos(fZP),
|
||||
_faceZNeg(fZN) {}
|
||||
};
|
||||
|
||||
gpu::Texture* TextureUsage::createCubeTextureFromImage(const QImage& srcImage, const std::string& srcImageName) {
|
||||
QImage image = srcImage;
|
||||
|
||||
int imageArea = image.width() * image.height();
|
||||
|
||||
|
||||
qCDebug(modelLog) << "Cube map size:" << QString(srcImageName.c_str()) << image.width() << image.height();
|
||||
|
||||
int opaquePixels = 0;
|
||||
int translucentPixels = 0;
|
||||
bool isTransparent = false;
|
||||
int redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0;
|
||||
const int EIGHT_BIT_MAXIMUM = 255;
|
||||
QColor averageColor(EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM, EIGHT_BIT_MAXIMUM);
|
||||
|
||||
if (!image.hasAlphaChannel()) {
|
||||
if (image.format() != QImage::Format_RGB888) {
|
||||
image = image.convertToFormat(QImage::Format_RGB888);
|
||||
}
|
||||
// int redTotal = 0, greenTotal = 0, blueTotal = 0;
|
||||
for (int y = 0; y < image.height(); y++) {
|
||||
for (int x = 0; x < image.width(); x++) {
|
||||
QRgb rgb = image.pixel(x, y);
|
||||
redTotal += qRed(rgb);
|
||||
greenTotal += qGreen(rgb);
|
||||
blueTotal += qBlue(rgb);
|
||||
}
|
||||
}
|
||||
if (imageArea > 0) {
|
||||
averageColor.setRgb(redTotal / imageArea, greenTotal / imageArea, blueTotal / imageArea);
|
||||
}
|
||||
} else {
|
||||
if (image.format() != QImage::Format_ARGB32) {
|
||||
image = image.convertToFormat(QImage::Format_ARGB32);
|
||||
}
|
||||
|
||||
// check for translucency/false transparency
|
||||
// int opaquePixels = 0;
|
||||
// int translucentPixels = 0;
|
||||
// int redTotal = 0, greenTotal = 0, blueTotal = 0, alphaTotal = 0;
|
||||
for (int y = 0; y < image.height(); y++) {
|
||||
for (int x = 0; x < image.width(); x++) {
|
||||
QRgb rgb = image.pixel(x, y);
|
||||
redTotal += qRed(rgb);
|
||||
greenTotal += qGreen(rgb);
|
||||
blueTotal += qBlue(rgb);
|
||||
int alpha = qAlpha(rgb);
|
||||
alphaTotal += alpha;
|
||||
if (alpha == EIGHT_BIT_MAXIMUM) {
|
||||
opaquePixels++;
|
||||
} else if (alpha != 0) {
|
||||
translucentPixels++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (opaquePixels == imageArea) {
|
||||
qCDebug(modelLog) << "Image with alpha channel is completely opaque:" << QString(srcImageName.c_str());
|
||||
image = image.convertToFormat(QImage::Format_RGB888);
|
||||
}
|
||||
|
||||
averageColor = QColor(redTotal / imageArea,
|
||||
greenTotal / imageArea, blueTotal / imageArea, alphaTotal / imageArea);
|
||||
|
||||
isTransparent = (translucentPixels >= imageArea / 2);
|
||||
}
|
||||
|
||||
gpu::Texture* theTexture = nullptr;
|
||||
if ((image.width() > 0) && (image.height() > 0)) {
|
||||
|
||||
// bool isLinearRGB = true; //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
bool isLinearRGB = false; //(_type == NORMAL_TEXTURE) || (_type == EMISSIVE_TEXTURE);
|
||||
|
||||
gpu::Element formatGPU = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
gpu::Element formatMip = gpu::Element(gpu::VEC3, gpu::UINT8, (isLinearRGB ? gpu::RGB : gpu::SRGB));
|
||||
if (image.hasAlphaChannel()) {
|
||||
formatGPU = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::RGBA : gpu::SRGBA));
|
||||
formatMip = gpu::Element(gpu::VEC4, gpu::UINT8, (isLinearRGB ? gpu::BGRA : gpu::SBGRA));
|
||||
}
|
||||
|
||||
|
||||
const CubeLayout CUBEMAP_LAYOUTS[] = {
|
||||
// Here is the expected layout for the faces in an image with the 1/6 aspect ratio:
|
||||
//
|
||||
// WIDTH
|
||||
// <------>
|
||||
// ^ +------+
|
||||
// | | |
|
||||
// | | +X |
|
||||
// | | |
|
||||
// H +------+
|
||||
// E | |
|
||||
// I | -X |
|
||||
// G | |
|
||||
// H +------+
|
||||
// T | |
|
||||
// | | +Y |
|
||||
// | | |
|
||||
// | +------+
|
||||
// | | |
|
||||
// | | -Y |
|
||||
// | | |
|
||||
// H +------+
|
||||
// E | |
|
||||
// I | +Z |
|
||||
// G | |
|
||||
// H +------+
|
||||
// T | |
|
||||
// | | -Z |
|
||||
// | | |
|
||||
// V +------+
|
||||
//
|
||||
// FaceWidth = width = height / 6
|
||||
{ 1, 6,
|
||||
{0, 0, true, false},
|
||||
{0, 1, true, false},
|
||||
{0, 2, false, true},
|
||||
{0, 3, false, true},
|
||||
{0, 4, true, false},
|
||||
{0, 5, true, false}
|
||||
},
|
||||
|
||||
// Here is the expected layout for the faces in an image with the 3/4 aspect ratio:
|
||||
//
|
||||
// <-----------WIDTH----------->
|
||||
// ^ +------+------+------+------+
|
||||
// | | | | | |
|
||||
// | | | +Y | | |
|
||||
// | | | | | |
|
||||
// H +------+------+------+------+
|
||||
// E | | | | |
|
||||
// I | -X | -Z | +X | +Z |
|
||||
// G | | | | |
|
||||
// H +------+------+------+------+
|
||||
// T | | | | |
|
||||
// | | | -Y | | |
|
||||
// | | | | | |
|
||||
// V +------+------+------+------+
|
||||
//
|
||||
// FaceWidth = width / 4 = height / 3
|
||||
{ 4, 3,
|
||||
{2, 1, true, false},
|
||||
{0, 1, true, false},
|
||||
{1, 0, false, true},
|
||||
{1, 2, false, true},
|
||||
{3, 1, true, false},
|
||||
{1, 1, true, false}
|
||||
},
|
||||
|
||||
// Here is the expected layout for the faces in an image with the 4/3 aspect ratio:
|
||||
//
|
||||
// <-------WIDTH-------->
|
||||
// ^ +------+------+------+
|
||||
// | | | | |
|
||||
// | | | +Y | |
|
||||
// | | | | |
|
||||
// H +------+------+------+
|
||||
// E | | | |
|
||||
// I | -X | -Z | +X |
|
||||
// G | | | |
|
||||
// H +------+------+------+
|
||||
// T | | | |
|
||||
// | | | -Y | |
|
||||
// | | | | |
|
||||
// | +------+------+------+
|
||||
// | | | | |
|
||||
// | | | +Z! | | <+Z is upside down!
|
||||
// | | | | |
|
||||
// V +------+------+------+
|
||||
//
|
||||
// FaceWidth = width / 3 = height / 4
|
||||
{ 3, 4,
|
||||
{2, 1, true, false},
|
||||
{0, 1, true, false},
|
||||
{1, 0, false, true},
|
||||
{1, 2, false, true},
|
||||
{1, 3, false, true},
|
||||
{1, 1, true, false}
|
||||
}
|
||||
};
|
||||
const int NUM_CUBEMAP_LAYOUTS = sizeof(CUBEMAP_LAYOUTS) / sizeof(CubeLayout);
|
||||
|
||||
// Find the layout of the cubemap in the 2D image
|
||||
int foundLayout = -1;
|
||||
for (int i = 0; i < NUM_CUBEMAP_LAYOUTS; i++) {
|
||||
if ((image.height() * CUBEMAP_LAYOUTS[i]._widthRatio) == (image.width() * CUBEMAP_LAYOUTS[i]._heightRatio)) {
|
||||
foundLayout = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<QImage> faces;
|
||||
// If found, go extract the faces as separate images
|
||||
if (foundLayout >= 0) {
|
||||
auto& layout = CUBEMAP_LAYOUTS[foundLayout];
|
||||
int faceWidth = image.width() / layout._widthRatio;
|
||||
|
||||
faces.push_back(image.copy(QRect(layout._faceXPos._x * faceWidth, layout._faceXPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXPos._horizontalMirror, layout._faceXPos._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceXNeg._x * faceWidth, layout._faceXNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceXNeg._horizontalMirror, layout._faceXNeg._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceYPos._x * faceWidth, layout._faceYPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYPos._horizontalMirror, layout._faceYPos._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceYNeg._x * faceWidth, layout._faceYNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceYNeg._horizontalMirror, layout._faceYNeg._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceZPos._x * faceWidth, layout._faceZPos._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZPos._horizontalMirror, layout._faceZPos._verticalMirror));
|
||||
faces.push_back(image.copy(QRect(layout._faceZNeg._x * faceWidth, layout._faceZNeg._y * faceWidth, faceWidth, faceWidth)).mirrored(layout._faceZNeg._horizontalMirror, layout._faceZNeg._verticalMirror));
|
||||
} else {
|
||||
qCDebug(modelLog) << "Failed to find a known cube map layout from this image:" << QString(srcImageName.c_str());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If the 6 faces have been created go on and define the true Texture
|
||||
if (faces.size() == gpu::Texture::NUM_FACES_PER_TYPE[gpu::Texture::TEX_CUBE]) {
|
||||
theTexture = gpu::Texture::createCube(formatGPU, faces[0].width(), gpu::Sampler(gpu::Sampler::FILTER_MIN_MAG_MIP_LINEAR, gpu::Sampler::WRAP_CLAMP));
|
||||
theTexture->autoGenerateMips(-1);
|
||||
int f = 0;
|
||||
for (auto& face : faces) {
|
||||
theTexture->assignStoredMipFace(0, formatMip, face.byteCount(), face.constBits(), f);
|
||||
f++;
|
||||
}
|
||||
|
||||
// GEnerate irradiance while we are at it
|
||||
theTexture->generateIrradiance();
|
||||
}
|
||||
}
|
||||
|
||||
return theTexture;
|
||||
}
|
67
libraries/model/src/model/TextureMap.h
Executable file
67
libraries/model/src/model/TextureMap.h
Executable file
|
@ -0,0 +1,67 @@
|
|||
//
|
||||
// TextureMap.h
|
||||
// libraries/model/src/model
|
||||
//
|
||||
// Created by Sam Gateau on 5/6/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_model_TextureMap_h
|
||||
#define hifi_model_TextureMap_h
|
||||
|
||||
#include "gpu/Texture.h"
|
||||
|
||||
#include "Material.h"
|
||||
#include "Transform.h"
|
||||
|
||||
#include <qurl.h>
|
||||
|
||||
class QImage;
|
||||
|
||||
namespace model {
|
||||
|
||||
typedef glm::vec3 Color;
|
||||
|
||||
class TextureUsage {
|
||||
public:
|
||||
gpu::Texture::Type _type{ gpu::Texture::TEX_2D };
|
||||
Material::MapFlags _materialUsage{ MaterialKey::DIFFUSE_MAP };
|
||||
|
||||
int _environmentUsage = 0;
|
||||
|
||||
static gpu::Texture* create2DTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||
static gpu::Texture* createNormalTextureFromBumpImage(const QImage& image, const std::string& srcImageName);
|
||||
static gpu::Texture* createCubeTextureFromImage(const QImage& image, const std::string& srcImageName);
|
||||
};
|
||||
|
||||
|
||||
|
||||
class TextureMap {
|
||||
public:
|
||||
TextureMap() {}
|
||||
|
||||
void setTextureSource(gpu::TextureSourcePointer& textureSource);
|
||||
|
||||
bool isDefined() const;
|
||||
gpu::TextureView getTextureView() const;
|
||||
|
||||
void setTextureTransform(const Transform& texcoordTransform);
|
||||
const Transform& getTextureTransform() const { return _texcoordTransform; }
|
||||
|
||||
void setLightmapOffsetScale(float offset, float scale);
|
||||
const glm::vec2& getLightmapOffsetScale() const { return _lightmapOffsetScale; }
|
||||
|
||||
protected:
|
||||
gpu::TextureSourcePointer _textureSource;
|
||||
|
||||
Transform _texcoordTransform;
|
||||
glm::vec2 _lightmapOffsetScale{ 0.0f, 1.0f };
|
||||
};
|
||||
typedef std::shared_ptr< TextureMap > TextureMapPointer;
|
||||
|
||||
};
|
||||
|
||||
#endif // hifi_model_TextureMap_h
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
//
|
||||
// TextureStorage.cpp
|
||||
// libraries/model/src/model
|
||||
//
|
||||
// Created by Sam Gateau on 5/6/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "TextureStorage.h"
|
||||
|
||||
using namespace model;
|
||||
using namespace gpu;
|
||||
|
||||
// TextureStorage
|
||||
TextureStorage::TextureStorage() : Texture::Storage(),
|
||||
_gpuTexture(Texture::createFromStorage(this))
|
||||
{}
|
||||
|
||||
TextureStorage::~TextureStorage() {
|
||||
}
|
||||
|
||||
void TextureStorage::reset(const QUrl& url, const TextureUsage& usage) {
|
||||
_url = url;
|
||||
_usage = usage;
|
||||
}
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
//
|
||||
// TextureStorage.h
|
||||
// libraries/model/src/model
|
||||
//
|
||||
// Created by Sam Gateau on 5/6/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_model_TextureStorage_h
|
||||
#define hifi_model_TextureStorage_h
|
||||
|
||||
#include "gpu/Texture.h"
|
||||
|
||||
#include "Material.h"
|
||||
|
||||
#include <qurl.h>
|
||||
|
||||
namespace model {
|
||||
|
||||
typedef glm::vec3 Color;
|
||||
|
||||
class TextureUsage {
|
||||
public:
|
||||
gpu::Texture::Type _type{ gpu::Texture::TEX_2D };
|
||||
Material::MapFlags _materialUsage{ MaterialKey::DIFFUSE_MAP };
|
||||
|
||||
int _environmentUsage = 0;
|
||||
};
|
||||
|
||||
// TextureStorage is a specialized version of the gpu::Texture::Storage
|
||||
// It provides the mechanism to create a texture from a Url and the intended usage
|
||||
// that guides the internal format used
|
||||
class TextureStorage : public gpu::Texture::Storage {
|
||||
public:
|
||||
TextureStorage();
|
||||
~TextureStorage();
|
||||
|
||||
const QUrl& getUrl() const { return _url; }
|
||||
gpu::Texture::Type getType() const { return _usage._type; }
|
||||
const gpu::TexturePointer& getGPUTexture() const { return _gpuTexture; }
|
||||
|
||||
virtual void reset() { Storage::reset(); }
|
||||
void reset(const QUrl& url, const TextureUsage& usage);
|
||||
|
||||
protected:
|
||||
gpu::TexturePointer _gpuTexture;
|
||||
TextureUsage _usage;
|
||||
QUrl _url;
|
||||
};
|
||||
typedef std::shared_ptr< TextureStorage > TextureStoragePointer;
|
||||
|
||||
};
|
||||
|
||||
#endif // hifi_model_TextureStorage_h
|
||||
|
|
@ -408,6 +408,7 @@ void Resource::handleReplyFinished() {
|
|||
_request = nullptr;
|
||||
}
|
||||
|
||||
|
||||
void Resource::downloadFinished(const QByteArray& data) {
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
set(TARGET_NAME procedural)
|
||||
|
||||
AUTOSCRIBE_SHADER_LIB(gpu model)
|
||||
|
||||
# use setup_hifi_library macro to setup our project and link appropriate Qt modules
|
||||
setup_hifi_library()
|
||||
|
||||
|
@ -7,4 +9,4 @@ add_dependency_external_projects(glm)
|
|||
find_package(GLM REQUIRED)
|
||||
target_include_directories(${TARGET_NAME} PUBLIC ${GLM_INCLUDE_DIRS})
|
||||
|
||||
link_hifi_libraries(shared gpu networking gpu-networking)
|
||||
link_hifi_libraries(shared gpu model model-networking)
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
#include <QtCore/QJsonDocument>
|
||||
#include <QtCore/QJsonObject>
|
||||
|
||||
#include <gpu-networking/ShaderCache.h>
|
||||
#include <gpu/Batch.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
|
|
@ -18,8 +18,7 @@
|
|||
#include <gpu/Shader.h>
|
||||
#include <gpu/Pipeline.h>
|
||||
#include <gpu/Batch.h>
|
||||
#include <gpu-networking/ShaderCache.h>
|
||||
|
||||
#include <model-networking/ShaderCache.h>
|
||||
|
||||
// FIXME better encapsulation
|
||||
// FIXME better mechanism for extending to things rendered using shaders other than simple.slv
|
||||
|
|
79
libraries/procedural/src/procedural/ProceduralSkybox.cpp
Normal file
79
libraries/procedural/src/procedural/ProceduralSkybox.cpp
Normal file
|
@ -0,0 +1,79 @@
|
|||
//
|
||||
// ProceduralSkybox.cpp
|
||||
// libraries/procedural/src/procedural
|
||||
//
|
||||
// Created by Sam Gateau on 9/21/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "ProceduralSkybox.h"
|
||||
|
||||
|
||||
#include <gpu/Batch.h>
|
||||
#include <gpu/Context.h>
|
||||
#include <ViewFrustum.h>
|
||||
|
||||
#include "ProceduralSkybox_vert.h"
|
||||
#include "ProceduralSkybox_frag.h"
|
||||
|
||||
ProceduralSkybox::ProceduralSkybox() : model::Skybox() {
|
||||
}
|
||||
|
||||
ProceduralSkybox::ProceduralSkybox(const ProceduralSkybox& skybox) :
|
||||
model::Skybox(skybox),
|
||||
_procedural(skybox._procedural) {
|
||||
|
||||
}
|
||||
|
||||
void ProceduralSkybox::setProcedural(const ProceduralPointer& procedural) {
|
||||
_procedural = procedural;
|
||||
if (_procedural) {
|
||||
_procedural->_vertexSource = ProceduralSkybox_vert;
|
||||
_procedural->_fragmentSource = ProceduralSkybox_frag;
|
||||
// No pipeline state customization
|
||||
}
|
||||
}
|
||||
|
||||
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& frustum) const {
|
||||
ProceduralSkybox::render(batch, frustum, (*this));
|
||||
}
|
||||
|
||||
void ProceduralSkybox::render(gpu::Batch& batch, const ViewFrustum& viewFrustum, const ProceduralSkybox& skybox) {
|
||||
if (!(skybox._procedural)) {
|
||||
Skybox::render(batch, viewFrustum, skybox);
|
||||
}
|
||||
|
||||
static gpu::BufferPointer theBuffer;
|
||||
static gpu::Stream::FormatPointer theFormat;
|
||||
|
||||
if (skybox._procedural && skybox._procedural->_enabled && skybox._procedural->ready()) {
|
||||
if (!theBuffer) {
|
||||
const float CLIP = 1.0f;
|
||||
const glm::vec2 vertices[4] = { { -CLIP, -CLIP }, { CLIP, -CLIP }, { -CLIP, CLIP }, { CLIP, CLIP } };
|
||||
theBuffer = std::make_shared<gpu::Buffer>(sizeof(vertices), (const gpu::Byte*) vertices);
|
||||
theFormat = std::make_shared<gpu::Stream::Format>();
|
||||
theFormat->setAttribute(gpu::Stream::POSITION, gpu::Stream::POSITION, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::XYZ));
|
||||
}
|
||||
|
||||
glm::mat4 projMat;
|
||||
viewFrustum.evalProjectionMatrix(projMat);
|
||||
|
||||
Transform viewTransform;
|
||||
viewFrustum.evalViewTransform(viewTransform);
|
||||
batch.setProjectionTransform(projMat);
|
||||
batch.setViewTransform(viewTransform);
|
||||
batch.setModelTransform(Transform()); // only for Mac
|
||||
batch.setInputBuffer(gpu::Stream::POSITION, theBuffer, 0, 8);
|
||||
batch.setInputFormat(theFormat);
|
||||
|
||||
if (skybox.getCubemap() && skybox.getCubemap()->isDefined()) {
|
||||
batch.setResourceTexture(0, skybox.getCubemap());
|
||||
}
|
||||
|
||||
skybox._procedural->prepare(batch, glm::vec3(1));
|
||||
batch.draw(gpu::TRIANGLE_STRIP, 4);
|
||||
}
|
||||
}
|
||||
|
39
libraries/procedural/src/procedural/ProceduralSkybox.h
Normal file
39
libraries/procedural/src/procedural/ProceduralSkybox.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// ProceduralSkybox.h
|
||||
// libraries/procedural/src/procedural
|
||||
//
|
||||
// Created by Sam Gateau on 9/21/15.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#pragma once
|
||||
#ifndef hifi_ProceduralSkybox_h
|
||||
#define hifi_ProceduralSkybox_h
|
||||
|
||||
#include <model/Skybox.h>
|
||||
|
||||
#include "Procedural.h"
|
||||
|
||||
typedef std::shared_ptr<Procedural> ProceduralPointer;
|
||||
|
||||
class ProceduralSkybox: public model::Skybox {
|
||||
public:
|
||||
ProceduralSkybox();
|
||||
ProceduralSkybox(const ProceduralSkybox& skybox);
|
||||
ProceduralSkybox& operator= (const ProceduralSkybox& skybox);
|
||||
virtual ~ProceduralSkybox() {};
|
||||
|
||||
void setProcedural(const ProceduralPointer& procedural);
|
||||
|
||||
virtual void render(gpu::Batch& batch, const ViewFrustum& frustum) const;
|
||||
static void render(gpu::Batch& batch, const ViewFrustum& frustum, const ProceduralSkybox& skybox);
|
||||
|
||||
protected:
|
||||
ProceduralPointer _procedural;
|
||||
};
|
||||
typedef std::shared_ptr< ProceduralSkybox > ProceduralSkyboxPointer;
|
||||
|
||||
#endif
|
50
libraries/procedural/src/procedural/ProceduralSkybox.slf
Normal file
50
libraries/procedural/src/procedural/ProceduralSkybox.slf
Normal file
|
@ -0,0 +1,50 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
// skybox.frag
|
||||
// fragment shader
|
||||
//
|
||||
// Created by Sam Gateau on 5/5/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
uniform samplerCube cubeMap;
|
||||
|
||||
struct Skybox {
|
||||
vec4 _color;
|
||||
};
|
||||
|
||||
uniform skyboxBuffer {
|
||||
Skybox _skybox;
|
||||
};
|
||||
|
||||
in vec3 _normal;
|
||||
out vec4 _fragColor;
|
||||
|
||||
//PROCEDURAL_COMMON_BLOCK
|
||||
|
||||
#line 1001
|
||||
//PROCEDURAL_BLOCK
|
||||
|
||||
#line 2033
|
||||
void main(void) {
|
||||
|
||||
#ifdef PROCEDURAL
|
||||
|
||||
vec3 color = getSkyboxColor();
|
||||
_fragColor = vec4(color, 0.0);
|
||||
|
||||
#else
|
||||
|
||||
vec3 coord = normalize(_normal);
|
||||
vec3 texel = texture(cubeMap, coord).rgb;
|
||||
vec3 color = texel * _skybox._color.rgb;
|
||||
vec3 pixel = pow(color, vec3(1.0/2.2)); // manual Gamma correction
|
||||
_fragColor = vec4(pixel, 0.0);
|
||||
|
||||
#endif
|
||||
|
||||
}
|
34
libraries/procedural/src/procedural/ProceduralSkybox.slv
Normal file
34
libraries/procedural/src/procedural/ProceduralSkybox.slv
Normal file
|
@ -0,0 +1,34 @@
|
|||
<@include gpu/Config.slh@>
|
||||
<$VERSION_HEADER$>
|
||||
// Generated on <$_SCRIBE_DATE$>
|
||||
// skybox.vert
|
||||
// vertex shader
|
||||
//
|
||||
// Created by Sam Gateau on 5/5/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
|
||||
<@include gpu/Inputs.slh@>
|
||||
|
||||
<@include gpu/Transform.slh@>
|
||||
|
||||
<$declareStandardTransform()$>
|
||||
|
||||
out vec3 _normal;
|
||||
|
||||
void main(void) {
|
||||
// standard transform
|
||||
TransformCamera cam = getTransformCamera();
|
||||
vec3 clipDir = vec3(inPosition.xy, 0.0);
|
||||
vec3 eyeDir;
|
||||
|
||||
<$transformClipToEyeDir(cam, clipDir, eyeDir)$>
|
||||
<$transformEyeToWorldDir(cam, eyeDir, _normal)$>
|
||||
|
||||
// Position is supposed to come in clip space
|
||||
gl_Position = vec4(inPosition.xy, 0.0, 1.0);
|
||||
}
|
|
@ -40,4 +40,4 @@ add_dependency_external_projects(oglplus)
|
|||
find_package(OGLPLUS REQUIRED)
|
||||
target_include_directories(${TARGET_NAME} PUBLIC ${OGLPLUS_INCLUDE_DIRS})
|
||||
|
||||
link_hifi_libraries(shared gpu gpu-networking procedural model render environment animation fbx)
|
||||
link_hifi_libraries(shared gpu procedural model model-networking render environment animation fbx)
|
||||
|
|
|
@ -33,6 +33,7 @@ vec3 bestFitNormal(vec3 normal) {
|
|||
texcoord = (texcoord.x < texcoord.y ? texcoord.yx : texcoord.xy);
|
||||
texcoord.y /= texcoord.x;
|
||||
vec3 cN = normal / maxNAbs;
|
||||
|
||||
float fittingScale = texture(normalFittingMap, texcoord).a;
|
||||
cN *= fittingScale;
|
||||
return (cN * 0.5 + 0.5);
|
||||
|
@ -49,6 +50,7 @@ void packDeferredFragment(vec3 normal, float alpha, vec3 diffuse, vec3 specular,
|
|||
if (alpha != glowIntensity) {
|
||||
discard;
|
||||
}
|
||||
|
||||
_fragColor0 = vec4(diffuse.rgb, alpha);
|
||||
_fragColor1 = vec4(bestFitNormal(normal), 1.0);
|
||||
_fragColor2 = vec4(specular, shininess / 128.0);
|
||||
|
|
|
@ -29,10 +29,13 @@
|
|||
|
||||
#include "gpu/StandardShaderLib.h"
|
||||
|
||||
#include "model/TextureMap.h"
|
||||
|
||||
//#define WANT_DEBUG
|
||||
|
||||
const int GeometryCache::UNKNOWN_ID = -1;
|
||||
|
||||
|
||||
static const uint FLOATS_PER_VERTEX = 3;
|
||||
static const uint VERTICES_PER_TRIANGLE = 3;
|
||||
static const uint TRIANGLES_PER_QUAD = 2;
|
||||
|
@ -490,10 +493,9 @@ gpu::Stream::FormatPointer& getInstancedSolidStreamFormat() {
|
|||
return INSTANCED_SOLID_STREAM_FORMAT;
|
||||
}
|
||||
|
||||
|
||||
GeometryCache::GeometryCache() {
|
||||
const qint64 GEOMETRY_DEFAULT_UNUSED_MAX_SIZE = DEFAULT_UNUSED_MAX_SIZE;
|
||||
setUnusedResourceCacheSize(GEOMETRY_DEFAULT_UNUSED_MAX_SIZE);
|
||||
GeometryCache::GeometryCache() :
|
||||
_nextID(0)
|
||||
{
|
||||
buildShapes();
|
||||
}
|
||||
|
||||
|
@ -506,13 +508,6 @@ GeometryCache::~GeometryCache() {
|
|||
#endif //def WANT_DEBUG
|
||||
}
|
||||
|
||||
QSharedPointer<Resource> GeometryCache::createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
|
||||
bool delayLoad, const void* extra) {
|
||||
// NetworkGeometry is no longer a subclass of Resource, but requires this method because, it is pure virtual.
|
||||
assert(false);
|
||||
return QSharedPointer<Resource>();
|
||||
}
|
||||
|
||||
void setupBatchInstance(gpu::Batch& batch, gpu::BufferPointer transformBuffer, gpu::BufferPointer colorBuffer) {
|
||||
gpu::BufferView colorView(colorBuffer, COLOR_ELEMENT);
|
||||
batch.setInputBuffer(gpu::Stream::COLOR, colorView);
|
||||
|
@ -1735,466 +1730,3 @@ void GeometryCache::useSimpleDrawPipeline(gpu::Batch& batch, bool noBlend) {
|
|||
batch.setPipeline(_standardDrawPipeline);
|
||||
}
|
||||
}
|
||||
|
||||
GeometryReader::GeometryReader(const QUrl& url, const QByteArray& data, const QVariantHash& mapping) :
|
||||
_url(url),
|
||||
_data(data),
|
||||
_mapping(mapping) {
|
||||
}
|
||||
|
||||
void GeometryReader::run() {
|
||||
try {
|
||||
if (_data.isEmpty()) {
|
||||
throw QString("Reply is NULL ?!");
|
||||
}
|
||||
QString urlname = _url.path().toLower();
|
||||
bool urlValid = true;
|
||||
urlValid &= !urlname.isEmpty();
|
||||
urlValid &= !_url.path().isEmpty();
|
||||
urlValid &= _url.path().toLower().endsWith(".fbx") || _url.path().toLower().endsWith(".obj");
|
||||
|
||||
if (urlValid) {
|
||||
// Let's read the binaries from the network
|
||||
FBXGeometry* fbxgeo = nullptr;
|
||||
if (_url.path().toLower().endsWith(".fbx")) {
|
||||
const bool grabLightmaps = true;
|
||||
const float lightmapLevel = 1.0f;
|
||||
fbxgeo = readFBX(_data, _mapping, _url.path(), grabLightmaps, lightmapLevel);
|
||||
} else if (_url.path().toLower().endsWith(".obj")) {
|
||||
fbxgeo = OBJReader().readOBJ(_data, _mapping, _url);
|
||||
} else {
|
||||
QString errorStr("usupported format");
|
||||
emit onError(NetworkGeometry::ModelParseError, errorStr);
|
||||
}
|
||||
emit onSuccess(fbxgeo);
|
||||
} else {
|
||||
throw QString("url is invalid");
|
||||
}
|
||||
|
||||
} catch (const QString& error) {
|
||||
qCDebug(renderutils) << "Error reading " << _url << ": " << error;
|
||||
emit onError(NetworkGeometry::ModelParseError, error);
|
||||
}
|
||||
}
|
||||
|
||||
NetworkGeometry::NetworkGeometry(const QUrl& url, bool delayLoad, const QVariantHash& mapping, const QUrl& textureBaseUrl) :
|
||||
_url(url),
|
||||
_mapping(mapping),
|
||||
_textureBaseUrl(textureBaseUrl.isValid() ? textureBaseUrl : url) {
|
||||
|
||||
if (delayLoad) {
|
||||
_state = DelayState;
|
||||
} else {
|
||||
attemptRequestInternal();
|
||||
}
|
||||
}
|
||||
|
||||
NetworkGeometry::~NetworkGeometry() {
|
||||
if (_resource) {
|
||||
_resource->deleteLater();
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkGeometry::attemptRequest() {
|
||||
if (_state == DelayState) {
|
||||
attemptRequestInternal();
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkGeometry::attemptRequestInternal() {
|
||||
if (_url.path().toLower().endsWith(".fst")) {
|
||||
_mappingUrl = _url;
|
||||
requestMapping(_url);
|
||||
} else {
|
||||
_modelUrl = _url;
|
||||
requestModel(_url);
|
||||
}
|
||||
}
|
||||
|
||||
bool NetworkGeometry::isLoaded() const {
|
||||
return _state == SuccessState;
|
||||
}
|
||||
|
||||
bool NetworkGeometry::isLoadedWithTextures() const {
|
||||
if (!isLoaded()) {
|
||||
return false;
|
||||
}
|
||||
if (!_isLoadedWithTextures) {
|
||||
for (auto&& mesh : _meshes) {
|
||||
for (auto && part : mesh->_parts) {
|
||||
if ((part->diffuseTexture && !part->diffuseTexture->isLoaded()) ||
|
||||
(part->normalTexture && !part->normalTexture->isLoaded()) ||
|
||||
(part->specularTexture && !part->specularTexture->isLoaded()) ||
|
||||
(part->emissiveTexture && !part->emissiveTexture->isLoaded())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
_isLoadedWithTextures = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void NetworkGeometry::setTextureWithNameToURL(const QString& name, const QUrl& url) {
|
||||
if (_meshes.size() > 0) {
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
for (size_t i = 0; i < _meshes.size(); i++) {
|
||||
NetworkMesh& mesh = *(_meshes[i].get());
|
||||
for (size_t j = 0; j < mesh._parts.size(); j++) {
|
||||
NetworkMeshPart& part = *(mesh._parts[j].get());
|
||||
QSharedPointer<NetworkTexture> matchingTexture = QSharedPointer<NetworkTexture>();
|
||||
if (part.diffuseTextureName == name) {
|
||||
part.diffuseTexture = textureCache->getTexture(url, DEFAULT_TEXTURE, _geometry->meshes[i].isEye);
|
||||
} else if (part.normalTextureName == name) {
|
||||
part.normalTexture = textureCache->getTexture(url);
|
||||
} else if (part.specularTextureName == name) {
|
||||
part.specularTexture = textureCache->getTexture(url);
|
||||
} else if (part.emissiveTextureName == name) {
|
||||
part.emissiveTexture = textureCache->getTexture(url);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
qCWarning(renderutils) << "Ignoring setTextureWirthNameToURL() geometry not ready." << name << url;
|
||||
}
|
||||
_isLoadedWithTextures = false;
|
||||
}
|
||||
|
||||
QStringList NetworkGeometry::getTextureNames() const {
|
||||
QStringList result;
|
||||
for (size_t i = 0; i < _meshes.size(); i++) {
|
||||
const NetworkMesh& mesh = *(_meshes[i].get());
|
||||
for (size_t j = 0; j < mesh._parts.size(); j++) {
|
||||
const NetworkMeshPart& part = *(mesh._parts[j].get());
|
||||
|
||||
if (!part.diffuseTextureName.isEmpty() && part.diffuseTexture) {
|
||||
QString textureURL = part.diffuseTexture->getURL().toString();
|
||||
result << part.diffuseTextureName + ":" + textureURL;
|
||||
}
|
||||
|
||||
if (!part.normalTextureName.isEmpty() && part.normalTexture) {
|
||||
QString textureURL = part.normalTexture->getURL().toString();
|
||||
result << part.normalTextureName + ":" + textureURL;
|
||||
}
|
||||
|
||||
if (!part.specularTextureName.isEmpty() && part.specularTexture) {
|
||||
QString textureURL = part.specularTexture->getURL().toString();
|
||||
result << part.specularTextureName + ":" + textureURL;
|
||||
}
|
||||
|
||||
if (!part.emissiveTextureName.isEmpty() && part.emissiveTexture) {
|
||||
QString textureURL = part.emissiveTexture->getURL().toString();
|
||||
result << part.emissiveTextureName + ":" + textureURL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void NetworkGeometry::requestMapping(const QUrl& url) {
|
||||
_state = RequestMappingState;
|
||||
if (_resource) {
|
||||
_resource->deleteLater();
|
||||
}
|
||||
_resource = new Resource(url, false);
|
||||
connect(_resource, &Resource::loaded, this, &NetworkGeometry::mappingRequestDone);
|
||||
connect(_resource, &Resource::failed, this, &NetworkGeometry::mappingRequestError);
|
||||
}
|
||||
|
||||
void NetworkGeometry::requestModel(const QUrl& url) {
|
||||
_state = RequestModelState;
|
||||
if (_resource) {
|
||||
_resource->deleteLater();
|
||||
}
|
||||
_modelUrl = url;
|
||||
_resource = new Resource(url, false);
|
||||
connect(_resource, &Resource::loaded, this, &NetworkGeometry::modelRequestDone);
|
||||
connect(_resource, &Resource::failed, this, &NetworkGeometry::modelRequestError);
|
||||
}
|
||||
|
||||
void NetworkGeometry::mappingRequestDone(const QByteArray& data) {
|
||||
assert(_state == RequestMappingState);
|
||||
|
||||
// parse the mapping file
|
||||
_mapping = FSTReader::readMapping(data);
|
||||
|
||||
QUrl replyUrl = _mappingUrl;
|
||||
QString modelUrlStr = _mapping.value("filename").toString();
|
||||
if (modelUrlStr.isNull()) {
|
||||
qCDebug(renderutils) << "Mapping file " << _url << "has no \"filename\" entry";
|
||||
emit onFailure(*this, MissingFilenameInMapping);
|
||||
} else {
|
||||
// read _textureBase from mapping file, if present
|
||||
QString texdir = _mapping.value("texdir").toString();
|
||||
if (!texdir.isNull()) {
|
||||
if (!texdir.endsWith('/')) {
|
||||
texdir += '/';
|
||||
}
|
||||
_textureBaseUrl = replyUrl.resolved(texdir);
|
||||
}
|
||||
|
||||
_modelUrl = replyUrl.resolved(modelUrlStr);
|
||||
requestModel(_modelUrl);
|
||||
}
|
||||
}
|
||||
|
||||
void NetworkGeometry::mappingRequestError(QNetworkReply::NetworkError error) {
|
||||
assert(_state == RequestMappingState);
|
||||
_state = ErrorState;
|
||||
emit onFailure(*this, MappingRequestError);
|
||||
}
|
||||
|
||||
void NetworkGeometry::modelRequestDone(const QByteArray& data) {
|
||||
assert(_state == RequestModelState);
|
||||
|
||||
_state = ParsingModelState;
|
||||
|
||||
// asynchronously parse the model file.
|
||||
GeometryReader* geometryReader = new GeometryReader(_modelUrl, data, _mapping);
|
||||
connect(geometryReader, SIGNAL(onSuccess(FBXGeometry*)), SLOT(modelParseSuccess(FBXGeometry*)));
|
||||
connect(geometryReader, SIGNAL(onError(int, QString)), SLOT(modelParseError(int, QString)));
|
||||
|
||||
QThreadPool::globalInstance()->start(geometryReader);
|
||||
}
|
||||
|
||||
void NetworkGeometry::modelRequestError(QNetworkReply::NetworkError error) {
|
||||
assert(_state == RequestModelState);
|
||||
_state = ErrorState;
|
||||
emit onFailure(*this, ModelRequestError);
|
||||
}
|
||||
|
||||
static NetworkMesh* buildNetworkMesh(const FBXMesh& mesh, const QUrl& textureBaseUrl) {
|
||||
auto textureCache = DependencyManager::get<TextureCache>();
|
||||
NetworkMesh* networkMesh = new NetworkMesh();
|
||||
|
||||
int totalIndices = 0;
|
||||
bool checkForTexcoordLightmap = false;
|
||||
|
||||
// process network parts
|
||||
foreach (const FBXMeshPart& part, mesh.parts) {
|
||||
NetworkMeshPart* networkPart = new NetworkMeshPart();
|
||||
|
||||
if (!part.diffuseTexture.filename.isEmpty()) {
|
||||
networkPart->diffuseTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(part.diffuseTexture.filename)), DEFAULT_TEXTURE,
|
||||
mesh.isEye, part.diffuseTexture.content);
|
||||
networkPart->diffuseTextureName = part.diffuseTexture.name;
|
||||
}
|
||||
if (!part.normalTexture.filename.isEmpty()) {
|
||||
networkPart->normalTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(part.normalTexture.filename)), NORMAL_TEXTURE,
|
||||
false, part.normalTexture.content);
|
||||
networkPart->normalTextureName = part.normalTexture.name;
|
||||
}
|
||||
if (!part.specularTexture.filename.isEmpty()) {
|
||||
networkPart->specularTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(part.specularTexture.filename)), SPECULAR_TEXTURE,
|
||||
false, part.specularTexture.content);
|
||||
networkPart->specularTextureName = part.specularTexture.name;
|
||||
}
|
||||
if (!part.emissiveTexture.filename.isEmpty()) {
|
||||
networkPart->emissiveTexture = textureCache->getTexture(textureBaseUrl.resolved(QUrl(part.emissiveTexture.filename)), EMISSIVE_TEXTURE,
|
||||
false, part.emissiveTexture.content);
|
||||
networkPart->emissiveTextureName = part.emissiveTexture.name;
|
||||
checkForTexcoordLightmap = true;
|
||||
}
|
||||
networkMesh->_parts.emplace_back(networkPart);
|
||||
totalIndices += (part.quadIndices.size() + part.triangleIndices.size());
|
||||
}
|
||||
|
||||
// initialize index buffer
|
||||
{
|
||||
networkMesh->_indexBuffer = std::make_shared<gpu::Buffer>();
|
||||
networkMesh->_indexBuffer->resize(totalIndices * sizeof(int));
|
||||
int offset = 0;
|
||||
foreach(const FBXMeshPart& part, mesh.parts) {
|
||||
networkMesh->_indexBuffer->setSubData(offset, part.quadIndices.size() * sizeof(int),
|
||||
(gpu::Byte*) part.quadIndices.constData());
|
||||
offset += part.quadIndices.size() * sizeof(int);
|
||||
networkMesh->_indexBuffer->setSubData(offset, part.triangleIndices.size() * sizeof(int),
|
||||
(gpu::Byte*) part.triangleIndices.constData());
|
||||
offset += part.triangleIndices.size() * sizeof(int);
|
||||
}
|
||||
}
|
||||
|
||||
// initialize vertex buffer
|
||||
{
|
||||
networkMesh->_vertexBuffer = std::make_shared<gpu::Buffer>();
|
||||
// if we don't need to do any blending, the positions/normals can be static
|
||||
if (mesh.blendshapes.isEmpty()) {
|
||||
int normalsOffset = mesh.vertices.size() * sizeof(glm::vec3);
|
||||
int tangentsOffset = normalsOffset + mesh.normals.size() * sizeof(glm::vec3);
|
||||
int colorsOffset = tangentsOffset + mesh.tangents.size() * sizeof(glm::vec3);
|
||||
int texCoordsOffset = colorsOffset + mesh.colors.size() * sizeof(glm::vec3);
|
||||
int texCoords1Offset = texCoordsOffset + mesh.texCoords.size() * sizeof(glm::vec2);
|
||||
int clusterIndicesOffset = texCoords1Offset + mesh.texCoords1.size() * sizeof(glm::vec2);
|
||||
int clusterWeightsOffset = clusterIndicesOffset + mesh.clusterIndices.size() * sizeof(glm::vec4);
|
||||
|
||||
networkMesh->_vertexBuffer->resize(clusterWeightsOffset + mesh.clusterWeights.size() * sizeof(glm::vec4));
|
||||
|
||||
networkMesh->_vertexBuffer->setSubData(0, mesh.vertices.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.vertices.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(normalsOffset, mesh.normals.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.normals.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(tangentsOffset,
|
||||
mesh.tangents.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.tangents.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(colorsOffset, mesh.colors.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.colors.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(texCoordsOffset,
|
||||
mesh.texCoords.size() * sizeof(glm::vec2), (gpu::Byte*) mesh.texCoords.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(texCoords1Offset,
|
||||
mesh.texCoords1.size() * sizeof(glm::vec2), (gpu::Byte*) mesh.texCoords1.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterIndicesOffset,
|
||||
mesh.clusterIndices.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterIndices.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterWeightsOffset,
|
||||
mesh.clusterWeights.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterWeights.constData());
|
||||
|
||||
// otherwise, at least the cluster indices/weights can be static
|
||||
networkMesh->_vertexStream = std::make_shared<gpu::BufferStream>();
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, 0, sizeof(glm::vec3));
|
||||
if (mesh.normals.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, normalsOffset, sizeof(glm::vec3));
|
||||
}
|
||||
if (mesh.tangents.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, tangentsOffset, sizeof(glm::vec3));
|
||||
}
|
||||
if (mesh.colors.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, colorsOffset, sizeof(glm::vec3));
|
||||
}
|
||||
if (mesh.texCoords.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, texCoordsOffset, sizeof(glm::vec2));
|
||||
}
|
||||
if (mesh.texCoords1.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, texCoords1Offset, sizeof(glm::vec2));
|
||||
}
|
||||
if (mesh.clusterIndices.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterIndicesOffset, sizeof(glm::vec4));
|
||||
}
|
||||
if (mesh.clusterWeights.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterWeightsOffset, sizeof(glm::vec4));
|
||||
}
|
||||
int channelNum = 0;
|
||||
networkMesh->_vertexFormat = std::make_shared<gpu::Stream::Format>();
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::POSITION, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ), 0);
|
||||
if (mesh.normals.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::NORMAL, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
}
|
||||
if (mesh.tangents.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TANGENT, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
}
|
||||
if (mesh.colors.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::COLOR, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::RGB));
|
||||
}
|
||||
if (mesh.texCoords.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD, channelNum++, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
}
|
||||
if (mesh.texCoords1.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, channelNum++, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
} else if (checkForTexcoordLightmap && mesh.texCoords.size()) {
|
||||
// need lightmap texcoord UV but doesn't have uv#1 so just reuse the same channel
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD1, channelNum - 1, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
}
|
||||
if (mesh.clusterIndices.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
}
|
||||
if (mesh.clusterWeights.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
}
|
||||
}
|
||||
else {
|
||||
int colorsOffset = mesh.tangents.size() * sizeof(glm::vec3);
|
||||
int texCoordsOffset = colorsOffset + mesh.colors.size() * sizeof(glm::vec3);
|
||||
int clusterIndicesOffset = texCoordsOffset + mesh.texCoords.size() * sizeof(glm::vec2);
|
||||
int clusterWeightsOffset = clusterIndicesOffset + mesh.clusterIndices.size() * sizeof(glm::vec4);
|
||||
|
||||
networkMesh->_vertexBuffer->resize(clusterWeightsOffset + mesh.clusterWeights.size() * sizeof(glm::vec4));
|
||||
networkMesh->_vertexBuffer->setSubData(0, mesh.tangents.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.tangents.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(colorsOffset, mesh.colors.size() * sizeof(glm::vec3), (gpu::Byte*) mesh.colors.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(texCoordsOffset,
|
||||
mesh.texCoords.size() * sizeof(glm::vec2), (gpu::Byte*) mesh.texCoords.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterIndicesOffset,
|
||||
mesh.clusterIndices.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterIndices.constData());
|
||||
networkMesh->_vertexBuffer->setSubData(clusterWeightsOffset,
|
||||
mesh.clusterWeights.size() * sizeof(glm::vec4), (gpu::Byte*) mesh.clusterWeights.constData());
|
||||
|
||||
networkMesh->_vertexStream = std::make_shared<gpu::BufferStream>();
|
||||
if (mesh.tangents.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, 0, sizeof(glm::vec3));
|
||||
}
|
||||
if (mesh.colors.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, colorsOffset, sizeof(glm::vec3));
|
||||
}
|
||||
if (mesh.texCoords.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, texCoordsOffset, sizeof(glm::vec2));
|
||||
}
|
||||
if (mesh.clusterIndices.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterIndicesOffset, sizeof(glm::vec4));
|
||||
}
|
||||
if (mesh.clusterWeights.size()) {
|
||||
networkMesh->_vertexStream->addBuffer(networkMesh->_vertexBuffer, clusterWeightsOffset, sizeof(glm::vec4));
|
||||
}
|
||||
|
||||
int channelNum = 0;
|
||||
networkMesh->_vertexFormat = std::make_shared<gpu::Stream::Format>();
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::POSITION, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
if (mesh.normals.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::NORMAL, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
}
|
||||
if (mesh.tangents.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TANGENT, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::XYZ));
|
||||
}
|
||||
if (mesh.colors.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::COLOR, channelNum++, gpu::Element(gpu::VEC3, gpu::FLOAT, gpu::RGB));
|
||||
}
|
||||
if (mesh.texCoords.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::TEXCOORD, channelNum++, gpu::Element(gpu::VEC2, gpu::FLOAT, gpu::UV));
|
||||
}
|
||||
if (mesh.clusterIndices.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_INDEX, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
}
|
||||
if (mesh.clusterWeights.size()) {
|
||||
networkMesh->_vertexFormat->setAttribute(gpu::Stream::SKIN_CLUSTER_WEIGHT, channelNum++, gpu::Element(gpu::VEC4, gpu::FLOAT, gpu::XYZW));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return networkMesh;
|
||||
}
|
||||
|
||||
void NetworkGeometry::modelParseSuccess(FBXGeometry* geometry) {
|
||||
// assume owner ship of geometry pointer
|
||||
_geometry.reset(geometry);
|
||||
|
||||
foreach(const FBXMesh& mesh, _geometry->meshes) {
|
||||
_meshes.emplace_back(buildNetworkMesh(mesh, _textureBaseUrl));
|
||||
}
|
||||
|
||||
_state = SuccessState;
|
||||
emit onSuccess(*this, *_geometry.get());
|
||||
|
||||
delete _resource;
|
||||
_resource = nullptr;
|
||||
}
|
||||
|
||||
void NetworkGeometry::modelParseError(int error, QString str) {
|
||||
_state = ErrorState;
|
||||
emit onFailure(*this, (NetworkGeometry::Error)error);
|
||||
|
||||
delete _resource;
|
||||
_resource = nullptr;
|
||||
}
|
||||
|
||||
bool NetworkMeshPart::isTranslucent() const {
|
||||
return diffuseTexture && diffuseTexture->isTranslucent();
|
||||
}
|
||||
|
||||
bool NetworkMesh::isPartTranslucent(const FBXMesh& fbxMesh, int partIndex) const {
|
||||
assert(partIndex >= 0);
|
||||
assert((size_t)partIndex < _parts.size());
|
||||
return (_parts.at(partIndex)->isTranslucent() || fbxMesh.parts.at(partIndex).opacity != 1.0f);
|
||||
}
|
||||
|
||||
int NetworkMesh::getTranslucentPartCount(const FBXMesh& fbxMesh) const {
|
||||
int count = 0;
|
||||
|
||||
for (size_t i = 0; i < _parts.size(); i++) {
|
||||
if (isPartTranslucent(fbxMesh, i)) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -12,25 +12,22 @@
|
|||
#ifndef hifi_GeometryCache_h
|
||||
#define hifi_GeometryCache_h
|
||||
|
||||
#include "model-networking/ModelCache.h"
|
||||
|
||||
#include <array>
|
||||
|
||||
|
||||
#include <QMap>
|
||||
#include <QRunnable>
|
||||
|
||||
#include <DependencyManager.h>
|
||||
#include <ResourceCache.h>
|
||||
|
||||
#include "FBXReader.h"
|
||||
#include "OBJReader.h"
|
||||
|
||||
#include <gpu/Batch.h>
|
||||
#include <gpu/Stream.h>
|
||||
|
||||
|
||||
class NetworkGeometry;
|
||||
class NetworkMesh;
|
||||
class NetworkTexture;
|
||||
|
||||
#include <model/Material.h>
|
||||
#include <model/Asset.h>
|
||||
|
||||
typedef glm::vec3 Vec3Key;
|
||||
|
||||
|
@ -125,8 +122,7 @@ using VertexVector = std::vector<glm::vec3>;
|
|||
using IndexVector = std::vector<uint16_t>;
|
||||
|
||||
/// Stores cached geometry.
|
||||
class GeometryCache : public ResourceCache, public Dependency {
|
||||
Q_OBJECT
|
||||
class GeometryCache : public Dependency {
|
||||
SINGLETON_DEPENDENCY
|
||||
|
||||
public:
|
||||
|
@ -151,9 +147,6 @@ public:
|
|||
int allocateID() { return _nextID++; }
|
||||
static const int UNKNOWN_ID;
|
||||
|
||||
virtual QSharedPointer<Resource> createResource(const QUrl& url, const QSharedPointer<Resource>& fallback,
|
||||
bool delayLoad, const void* extra);
|
||||
|
||||
void renderShapeInstances(gpu::Batch& batch, Shape shape, size_t count, gpu::BufferPointer& transformBuffer, gpu::BufferPointer& colorBuffer);
|
||||
void renderWireShapeInstances(gpu::Batch& batch, Shape shape, size_t count, gpu::BufferPointer& transformBuffer, gpu::BufferPointer& colorBuffer);
|
||||
void renderShape(gpu::Batch& batch, Shape shape);
|
||||
|
@ -236,11 +229,6 @@ public:
|
|||
void updateVertices(int id, const QVector<glm::vec3>& points, const QVector<glm::vec2>& texCoords, const glm::vec4& color);
|
||||
void renderVertices(gpu::Batch& batch, gpu::Primitive primitiveType, int id);
|
||||
|
||||
/// Loads geometry from the specified URL.
|
||||
/// \param fallback a fallback URL to load if the desired one is unavailable
|
||||
/// \param delayLoad if true, don't load the geometry immediately; wait until load is first requested
|
||||
QSharedPointer<NetworkGeometry> getGeometry(const QUrl& url, const QUrl& fallback = QUrl(), bool delayLoad = false);
|
||||
|
||||
/// Set a batch to the simple pipeline, returning the previous pipeline
|
||||
void useSimpleDrawPipeline(gpu::Batch& batch, bool noBlend = false);
|
||||
|
||||
|
@ -348,129 +336,4 @@ private:
|
|||
QHash<QUrl, QWeakPointer<NetworkGeometry> > _networkGeometry;
|
||||
};
|
||||
|
||||
class NetworkGeometry : public QObject {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
// mapping is only used if url is a .fbx or .obj file, it is essentially the content of an fst file.
|
||||
// if delayLoad is true, the url will not be immediately downloaded.
|
||||
// use the attemptRequest method to initiate the download.
|
||||
NetworkGeometry(const QUrl& url, bool delayLoad, const QVariantHash& mapping, const QUrl& textureBaseUrl = QUrl());
|
||||
~NetworkGeometry();
|
||||
|
||||
const QUrl& getURL() const { return _url; }
|
||||
|
||||
void attemptRequest();
|
||||
|
||||
// true when the geometry is loaded (but maybe not it's associated textures)
|
||||
bool isLoaded() const;
|
||||
|
||||
// true when the requested geometry and its textures are loaded.
|
||||
bool isLoadedWithTextures() const;
|
||||
|
||||
// WARNING: only valid when isLoaded returns true.
|
||||
const FBXGeometry& getFBXGeometry() const { return *_geometry; }
|
||||
const std::vector<std::unique_ptr<NetworkMesh>>& getMeshes() const { return _meshes; }
|
||||
|
||||
void setTextureWithNameToURL(const QString& name, const QUrl& url);
|
||||
QStringList getTextureNames() const;
|
||||
|
||||
enum Error {
|
||||
MissingFilenameInMapping = 0,
|
||||
MappingRequestError,
|
||||
ModelRequestError,
|
||||
ModelParseError
|
||||
};
|
||||
|
||||
signals:
|
||||
// Fired when everything has downloaded and parsed successfully.
|
||||
void onSuccess(NetworkGeometry& networkGeometry, FBXGeometry& fbxGeometry);
|
||||
|
||||
// Fired when something went wrong.
|
||||
void onFailure(NetworkGeometry& networkGeometry, Error error);
|
||||
|
||||
protected slots:
|
||||
void mappingRequestDone(const QByteArray& data);
|
||||
void mappingRequestError(QNetworkReply::NetworkError error);
|
||||
|
||||
void modelRequestDone(const QByteArray& data);
|
||||
void modelRequestError(QNetworkReply::NetworkError error);
|
||||
|
||||
void modelParseSuccess(FBXGeometry* geometry);
|
||||
void modelParseError(int error, QString str);
|
||||
|
||||
protected:
|
||||
void attemptRequestInternal();
|
||||
void requestMapping(const QUrl& url);
|
||||
void requestModel(const QUrl& url);
|
||||
|
||||
enum State { DelayState,
|
||||
RequestMappingState,
|
||||
RequestModelState,
|
||||
ParsingModelState,
|
||||
SuccessState,
|
||||
ErrorState };
|
||||
State _state;
|
||||
|
||||
QUrl _url;
|
||||
QUrl _mappingUrl;
|
||||
QUrl _modelUrl;
|
||||
QVariantHash _mapping;
|
||||
QUrl _textureBaseUrl;
|
||||
|
||||
Resource* _resource = nullptr;
|
||||
std::unique_ptr<FBXGeometry> _geometry;
|
||||
std::vector<std::unique_ptr<NetworkMesh>> _meshes;
|
||||
|
||||
// cache for isLoadedWithTextures()
|
||||
mutable bool _isLoadedWithTextures = false;
|
||||
};
|
||||
|
||||
/// Reads geometry in a worker thread.
|
||||
class GeometryReader : public QObject, public QRunnable {
|
||||
Q_OBJECT
|
||||
public:
|
||||
GeometryReader(const QUrl& url, const QByteArray& data, const QVariantHash& mapping);
|
||||
virtual void run();
|
||||
signals:
|
||||
void onSuccess(FBXGeometry* geometry);
|
||||
void onError(int error, QString str);
|
||||
private:
|
||||
QUrl _url;
|
||||
QByteArray _data;
|
||||
QVariantHash _mapping;
|
||||
};
|
||||
|
||||
/// The state associated with a single mesh part.
|
||||
class NetworkMeshPart {
|
||||
public:
|
||||
|
||||
QString diffuseTextureName;
|
||||
QSharedPointer<NetworkTexture> diffuseTexture;
|
||||
QString normalTextureName;
|
||||
QSharedPointer<NetworkTexture> normalTexture;
|
||||
QString specularTextureName;
|
||||
QSharedPointer<NetworkTexture> specularTexture;
|
||||
QString emissiveTextureName;
|
||||
QSharedPointer<NetworkTexture> emissiveTexture;
|
||||
|
||||
bool isTranslucent() const;
|
||||
};
|
||||
|
||||
/// The state associated with a single mesh.
|
||||
class NetworkMesh {
|
||||
public:
|
||||
gpu::BufferPointer _indexBuffer;
|
||||
gpu::BufferPointer _vertexBuffer;
|
||||
|
||||
gpu::BufferStreamPointer _vertexStream;
|
||||
|
||||
gpu::Stream::FormatPointer _vertexFormat;
|
||||
|
||||
std::vector<std::unique_ptr<NetworkMeshPart>> _parts;
|
||||
|
||||
int getTranslucentPartCount(const FBXMesh& fbxMesh) const;
|
||||
bool isPartTranslucent(const FBXMesh& fbxMesh, int partIndex) const;
|
||||
};
|
||||
|
||||
#endif // hifi_GeometryCache_h
|
||||
|
|
|
@ -96,6 +96,11 @@ Model::~Model() {
|
|||
|
||||
Model::RenderPipelineLib Model::_renderPipelineLib;
|
||||
const int MATERIAL_GPU_SLOT = 3;
|
||||
const int DIFFUSE_MAP_SLOT = 0;
|
||||
const int NORMAL_MAP_SLOT = 1;
|
||||
const int SPECULAR_MAP_SLOT = 2;
|
||||
const int LIGHTMAP_MAP_SLOT = 3;
|
||||
const int LIGHT_BUFFER_SLOT = 4;
|
||||
|
||||
void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
|
||||
gpu::ShaderPointer& vertexShader,
|
||||
|
@ -103,14 +108,13 @@ void Model::RenderPipelineLib::addRenderPipeline(Model::RenderKey key,
|
|||
|
||||
gpu::Shader::BindingSet slotBindings;
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("materialBuffer"), MATERIAL_GPU_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("diffuseMap"), 0));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), 1));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("specularMap"), 2));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("emissiveMap"), 3));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("lightBuffer"), 4));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("diffuseMap"), DIFFUSE_MAP_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalMap"), NORMAL_MAP_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("specularMap"), SPECULAR_MAP_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("emissiveMap"), LIGHTMAP_MAP_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("lightBuffer"), LIGHT_BUFFER_SLOT));
|
||||
slotBindings.insert(gpu::Shader::Binding(std::string("normalFittingMap"), DeferredLightingEffect::NORMAL_FITTING_MAP_SLOT));
|
||||
|
||||
|
||||
gpu::ShaderPointer program = gpu::ShaderPointer(gpu::Shader::createProgram(vertexShader, pixelShader));
|
||||
gpu::Shader::makeProgram(*program, slotBindings);
|
||||
|
||||
|
@ -185,6 +189,7 @@ void Model::RenderPipelineLib::initLocations(gpu::ShaderPointer& program, Model:
|
|||
locations.emissiveParams = program->getUniforms().findLocation("emissiveParams");
|
||||
locations.glowIntensity = program->getUniforms().findLocation("glowIntensity");
|
||||
locations.normalFittingMapUnit = program->getTextures().findLocation("normalFittingMap");
|
||||
locations.normalTextureUnit = program->getTextures().findLocation("normalMap");
|
||||
locations.specularTextureUnit = program->getTextures().findLocation("specularMap");
|
||||
locations.emissiveTextureUnit = program->getTextures().findLocation("emissiveMap");
|
||||
locations.materialBufferUnit = program->getBuffers().findLocation("materialBuffer");
|
||||
|
@ -738,16 +743,17 @@ void Model::renderSetup(RenderArgs* args) {
|
|||
|
||||
class MeshPartPayload {
|
||||
public:
|
||||
MeshPartPayload(bool transparent, Model* model, int meshIndex, int partIndex) :
|
||||
transparent(transparent), model(model), url(model->getURL()), meshIndex(meshIndex), partIndex(partIndex) { }
|
||||
MeshPartPayload(Model* model, int meshIndex, int partIndex, int shapeIndex) :
|
||||
model(model), url(model->getURL()), meshIndex(meshIndex), partIndex(partIndex), _shapeID(shapeIndex) { }
|
||||
|
||||
typedef render::Payload<MeshPartPayload> Payload;
|
||||
typedef Payload::DataPointer Pointer;
|
||||
|
||||
bool transparent;
|
||||
Model* model;
|
||||
QUrl url;
|
||||
int meshIndex;
|
||||
int partIndex;
|
||||
int _shapeID;
|
||||
};
|
||||
|
||||
namespace render {
|
||||
|
@ -755,7 +761,21 @@ namespace render {
|
|||
if (!payload->model->isVisible()) {
|
||||
return ItemKey::Builder().withInvisible().build();
|
||||
}
|
||||
return payload->transparent ? ItemKey::Builder::transparentShape() : ItemKey::Builder::opaqueShape();
|
||||
auto geometry = payload->model->getGeometry();
|
||||
if (!geometry.isNull()) {
|
||||
auto drawMaterial = geometry->getShapeMaterial(payload->_shapeID);
|
||||
if (drawMaterial) {
|
||||
auto matKey = drawMaterial->_material->getKey();
|
||||
if (matKey.isTransparent() || matKey.isTransparentMap()) {
|
||||
return ItemKey::Builder::transparentShape();
|
||||
} else {
|
||||
return ItemKey::Builder::opaqueShape();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return opaque for lack of a better idea
|
||||
return ItemKey::Builder::opaqueShape();
|
||||
}
|
||||
|
||||
template <> const Item::Bound payloadGetBound(const MeshPartPayload::Pointer& payload) {
|
||||
|
@ -766,7 +786,7 @@ namespace render {
|
|||
}
|
||||
template <> void payloadRender(const MeshPartPayload::Pointer& payload, RenderArgs* args) {
|
||||
if (args) {
|
||||
return payload->model->renderPart(args, payload->meshIndex, payload->partIndex, payload->transparent);
|
||||
return payload->model->renderPart(args, payload->meshIndex, payload->partIndex, payload->_shapeID);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -795,19 +815,7 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
|
|||
|
||||
bool somethingAdded = false;
|
||||
|
||||
foreach (auto renderItem, _transparentRenderItems) {
|
||||
auto item = scene->allocateID();
|
||||
auto renderData = MeshPartPayload::Pointer(renderItem);
|
||||
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
|
||||
pendingChanges.resetItem(item, renderPayload);
|
||||
pendingChanges.updateItem<MeshPartPayload>(item, [&](MeshPartPayload& data) {
|
||||
data.model->_needsUpdateClusterMatrices = true;
|
||||
});
|
||||
_renderItems.insert(item, renderPayload);
|
||||
somethingAdded = true;
|
||||
}
|
||||
|
||||
foreach (auto renderItem, _opaqueRenderItems) {
|
||||
foreach (auto renderItem, _renderItemsSet) {
|
||||
auto item = scene->allocateID();
|
||||
auto renderData = MeshPartPayload::Pointer(renderItem);
|
||||
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
|
||||
|
@ -831,20 +839,7 @@ bool Model::addToScene(std::shared_ptr<render::Scene> scene, render::PendingChan
|
|||
|
||||
bool somethingAdded = false;
|
||||
|
||||
foreach (auto renderItem, _transparentRenderItems) {
|
||||
auto item = scene->allocateID();
|
||||
auto renderData = MeshPartPayload::Pointer(renderItem);
|
||||
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
|
||||
renderPayload->addStatusGetters(statusGetters);
|
||||
pendingChanges.resetItem(item, renderPayload);
|
||||
pendingChanges.updateItem<MeshPartPayload>(item, [&](MeshPartPayload& data) {
|
||||
data.model->_needsUpdateClusterMatrices = true;
|
||||
});
|
||||
_renderItems.insert(item, renderPayload);
|
||||
somethingAdded = true;
|
||||
}
|
||||
|
||||
foreach (auto renderItem, _opaqueRenderItems) {
|
||||
foreach (auto renderItem, _renderItemsSet) {
|
||||
auto item = scene->allocateID();
|
||||
auto renderData = MeshPartPayload::Pointer(renderItem);
|
||||
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
|
||||
|
@ -1444,7 +1439,7 @@ AABox Model::getPartBounds(int meshIndex, int partIndex) {
|
|||
return AABox();
|
||||
}
|
||||
|
||||
void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool translucent) {
|
||||
void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, int shapeID) {
|
||||
// PROFILE_RANGE(__FUNCTION__);
|
||||
PerformanceTimer perfTimer("Model::renderPart");
|
||||
if (!_readyWhenAdded) {
|
||||
|
@ -1467,6 +1462,19 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
|
|||
const FBXGeometry& geometry = _geometry->getFBXGeometry();
|
||||
const std::vector<std::unique_ptr<NetworkMesh>>& networkMeshes = _geometry->getMeshes();
|
||||
|
||||
auto networkMaterial = _geometry->getShapeMaterial(shapeID);
|
||||
if (!networkMaterial) {
|
||||
return;
|
||||
};
|
||||
auto material = networkMaterial->_material;
|
||||
if (!material) {
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Not yet
|
||||
// auto drawMesh = _geometry->getShapeMesh(shapeID);
|
||||
// auto drawPart = _geometry->getShapePart(shapeID);
|
||||
|
||||
// guard against partially loaded meshes
|
||||
if (meshIndex >= (int)networkMeshes.size() || meshIndex >= (int)geometry.meshes.size() || meshIndex >= (int)_meshStates.size() ) {
|
||||
return;
|
||||
|
@ -1478,10 +1486,12 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
|
|||
const FBXMesh& mesh = geometry.meshes.at(meshIndex);
|
||||
const MeshState& state = _meshStates.at(meshIndex);
|
||||
|
||||
bool translucentMesh = translucent; // networkMesh.getTranslucentPartCount(mesh) == networkMesh.parts.size();
|
||||
bool hasTangents = !mesh.tangents.isEmpty();
|
||||
bool hasSpecular = mesh.hasSpecularTexture();
|
||||
bool hasLightmap = mesh.hasEmissiveTexture();
|
||||
auto drawMaterialKey = material->getKey();
|
||||
bool translucentMesh = drawMaterialKey.isTransparent() || drawMaterialKey.isTransparentMap();
|
||||
|
||||
bool hasTangents = drawMaterialKey.isNormalMap() && !mesh.tangents.isEmpty();
|
||||
bool hasSpecular = drawMaterialKey.isGlossMap(); // !drawMaterial->specularTextureName.isEmpty(); //mesh.hasSpecularTexture();
|
||||
bool hasLightmap = drawMaterialKey.isLightmapMap(); // !drawMaterial->emissiveTextureName.isEmpty(); //mesh.hasEmissiveTexture();
|
||||
bool isSkinned = state.clusterMatrices.size() > 1;
|
||||
bool wireframe = isWireframe();
|
||||
|
||||
|
@ -1578,13 +1588,12 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
|
|||
}
|
||||
|
||||
// guard against partially loaded meshes
|
||||
if (partIndex >= (int)networkMesh._parts.size() || partIndex >= mesh.parts.size()) {
|
||||
if (partIndex >= mesh.parts.size()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const NetworkMeshPart& networkPart = *(networkMesh._parts.at(partIndex).get());
|
||||
const FBXMeshPart& part = mesh.parts.at(partIndex);
|
||||
model::MaterialPointer material = part._material;
|
||||
|
||||
|
||||
#ifdef WANT_DEBUG
|
||||
if (material == nullptr) {
|
||||
|
@ -1592,7 +1601,7 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
|
|||
}
|
||||
#endif
|
||||
|
||||
if (material != nullptr) {
|
||||
{
|
||||
|
||||
// apply material properties
|
||||
if (mode != RenderArgs::SHADOW_RENDER_MODE) {
|
||||
|
@ -1606,65 +1615,90 @@ void Model::renderPart(RenderArgs* args, int meshIndex, int partIndex, bool tran
|
|||
batch.setUniformBuffer(locations->materialBufferUnit, material->getSchemaBuffer());
|
||||
}
|
||||
|
||||
Texture* diffuseMap = networkPart.diffuseTexture.data();
|
||||
if (mesh.isEye && diffuseMap) {
|
||||
// FIXME - guard against out of bounds here
|
||||
if (meshIndex < _dilatedTextures.size()) {
|
||||
if (partIndex < _dilatedTextures[meshIndex].size()) {
|
||||
diffuseMap = (_dilatedTextures[meshIndex][partIndex] =
|
||||
static_cast<DilatableNetworkTexture*>(diffuseMap)->getDilatedTexture(_pupilDilation)).data();
|
||||
auto materialKey = material->getKey();
|
||||
auto textureMaps = material->getTextureMaps();
|
||||
glm::mat4 texcoordTransform[2];
|
||||
|
||||
// Diffuse
|
||||
if (materialKey.isDiffuseMap()) {
|
||||
auto diffuseMap = textureMaps[model::MaterialKey::DIFFUSE_MAP];
|
||||
|
||||
if (diffuseMap && diffuseMap->isDefined()) {
|
||||
batch.setResourceTexture(DIFFUSE_MAP_SLOT, diffuseMap->getTextureView());
|
||||
|
||||
if (!diffuseMap->getTextureTransform().isIdentity()) {
|
||||
diffuseMap->getTextureTransform().getMatrix(texcoordTransform[0]);
|
||||
}
|
||||
} else {
|
||||
batch.setResourceTexture(DIFFUSE_MAP_SLOT, textureCache->getGrayTexture());
|
||||
}
|
||||
} else {
|
||||
batch.setResourceTexture(DIFFUSE_MAP_SLOT, textureCache->getGrayTexture());
|
||||
}
|
||||
|
||||
// Normal map
|
||||
if ((locations->normalTextureUnit >= 0) && hasTangents) {
|
||||
auto normalMap = textureMaps[model::MaterialKey::NORMAL_MAP];
|
||||
if (normalMap && normalMap->isDefined()) {
|
||||
batch.setResourceTexture(NORMAL_MAP_SLOT, normalMap->getTextureView());
|
||||
|
||||
// texcoord are assumed to be the same has diffuse
|
||||
} else {
|
||||
batch.setResourceTexture(NORMAL_MAP_SLOT, textureCache->getBlueTexture());
|
||||
}
|
||||
} else {
|
||||
batch.setResourceTexture(NORMAL_MAP_SLOT, nullptr);
|
||||
}
|
||||
|
||||
// TODO: For now gloss map is used as the "specular map in the shading, we ll need to fix that
|
||||
if ((locations->specularTextureUnit >= 0) && materialKey.isGlossMap()) {
|
||||
auto specularMap = textureMaps[model::MaterialKey::GLOSS_MAP];
|
||||
if (specularMap && specularMap->isDefined()) {
|
||||
batch.setResourceTexture(SPECULAR_MAP_SLOT, specularMap->getTextureView());
|
||||
|
||||
// texcoord are assumed to be the same has diffuse
|
||||
} else {
|
||||
batch.setResourceTexture(SPECULAR_MAP_SLOT, textureCache->getBlackTexture());
|
||||
}
|
||||
} else {
|
||||
batch.setResourceTexture(SPECULAR_MAP_SLOT, nullptr);
|
||||
}
|
||||
|
||||
// TODO: For now lightmaop is piped into the emissive map unit, we need to fix that and support for real emissive too
|
||||
if ((locations->emissiveTextureUnit >= 0) && materialKey.isLightmapMap()) {
|
||||
auto lightmapMap = textureMaps[model::MaterialKey::LIGHTMAP_MAP];
|
||||
|
||||
if (lightmapMap && lightmapMap->isDefined()) {
|
||||
batch.setResourceTexture(LIGHTMAP_MAP_SLOT, lightmapMap->getTextureView());
|
||||
|
||||
auto lightmapOffsetScale = lightmapMap->getLightmapOffsetScale();
|
||||
batch._glUniform2f(locations->emissiveParams, lightmapOffsetScale.x, lightmapOffsetScale.y);
|
||||
|
||||
if (!lightmapMap->getTextureTransform().isIdentity()) {
|
||||
lightmapMap->getTextureTransform().getMatrix(texcoordTransform[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (diffuseMap && static_cast<NetworkTexture*>(diffuseMap)->isLoaded()) {
|
||||
batch.setResourceTexture(0, diffuseMap->getGPUTexture());
|
||||
else {
|
||||
batch.setResourceTexture(LIGHTMAP_MAP_SLOT, textureCache->getGrayTexture());
|
||||
}
|
||||
} else {
|
||||
batch.setResourceTexture(0, textureCache->getGrayTexture());
|
||||
batch.setResourceTexture(LIGHTMAP_MAP_SLOT, nullptr);
|
||||
}
|
||||
|
||||
// Texcoord transforms ?
|
||||
if (locations->texcoordMatrices >= 0) {
|
||||
glm::mat4 texcoordTransform[2];
|
||||
if (!part.diffuseTexture.transform.isIdentity()) {
|
||||
part.diffuseTexture.transform.getMatrix(texcoordTransform[0]);
|
||||
}
|
||||
if (!part.emissiveTexture.transform.isIdentity()) {
|
||||
part.emissiveTexture.transform.getMatrix(texcoordTransform[1]);
|
||||
}
|
||||
batch._glUniformMatrix4fv(locations->texcoordMatrices, 2, false, (const float*) &texcoordTransform);
|
||||
batch._glUniformMatrix4fv(locations->texcoordMatrices, 2, false, (const float*)&texcoordTransform);
|
||||
}
|
||||
|
||||
if (!mesh.tangents.isEmpty()) {
|
||||
NetworkTexture* normalMap = networkPart.normalTexture.data();
|
||||
batch.setResourceTexture(1, (!normalMap || !normalMap->isLoaded()) ?
|
||||
textureCache->getBlueTexture() : normalMap->getGPUTexture());
|
||||
// TODO: We should be able to do that just in the renderTransparentJob
|
||||
if (translucentMesh && locations->lightBufferUnit >= 0) {
|
||||
DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit);
|
||||
}
|
||||
|
||||
if (locations->specularTextureUnit >= 0) {
|
||||
NetworkTexture* specularMap = networkPart.specularTexture.data();
|
||||
batch.setResourceTexture(locations->specularTextureUnit, (!specularMap || !specularMap->isLoaded()) ?
|
||||
textureCache->getBlackTexture() : specularMap->getGPUTexture());
|
||||
}
|
||||
|
||||
if (args) {
|
||||
args->_details._materialSwitches++;
|
||||
}
|
||||
|
||||
// HACK: For unknown reason (yet!) this code that should be assigned only if the material changes need to be called for every
|
||||
// drawcall with an emissive, so let's do it for now.
|
||||
if (locations->emissiveTextureUnit >= 0) {
|
||||
// assert(locations->emissiveParams >= 0); // we should have the emissiveParams defined in the shader
|
||||
float emissiveOffset = part.emissiveParams.x;
|
||||
float emissiveScale = part.emissiveParams.y;
|
||||
batch._glUniform2f(locations->emissiveParams, emissiveOffset, emissiveScale);
|
||||
|
||||
NetworkTexture* emissiveMap = networkPart.emissiveTexture.data();
|
||||
batch.setResourceTexture(locations->emissiveTextureUnit, (!emissiveMap || !emissiveMap->isLoaded()) ?
|
||||
textureCache->getGrayTexture() : emissiveMap->getGPUTexture());
|
||||
}
|
||||
|
||||
if (translucent && locations->lightBufferUnit >= 0) {
|
||||
DependencyManager::get<DeferredLightingEffect>()->setupTransparent(args, locations->lightBufferUnit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1688,36 +1722,20 @@ void Model::segregateMeshGroups() {
|
|||
return;
|
||||
}
|
||||
|
||||
_transparentRenderItems.clear();
|
||||
_opaqueRenderItems.clear();
|
||||
_renderItemsSet.clear();
|
||||
|
||||
// Run through all of the meshes, and place them into their segregated, but unsorted buckets
|
||||
int shapeID = 0;
|
||||
for (int i = 0; i < (int)networkMeshes.size(); i++) {
|
||||
const NetworkMesh& networkMesh = *(networkMeshes.at(i).get());
|
||||
const FBXMesh& mesh = geometry.meshes.at(i);
|
||||
const MeshState& state = _meshStates.at(i);
|
||||
|
||||
bool translucentMesh = networkMesh.getTranslucentPartCount(mesh) == (int)networkMesh._parts.size();
|
||||
bool hasTangents = !mesh.tangents.isEmpty();
|
||||
bool hasSpecular = mesh.hasSpecularTexture();
|
||||
bool hasLightmap = mesh.hasEmissiveTexture();
|
||||
bool isSkinned = state.clusterMatrices.size() > 1;
|
||||
bool wireframe = isWireframe();
|
||||
|
||||
if (wireframe) {
|
||||
translucentMesh = hasTangents = hasSpecular = hasLightmap = isSkinned = false;
|
||||
}
|
||||
// TODO: make excellent use of translucentMesh
|
||||
Q_UNUSED(translucentMesh);
|
||||
|
||||
// Create the render payloads
|
||||
int totalParts = mesh.parts.size();
|
||||
for (int partIndex = 0; partIndex < totalParts; partIndex++) {
|
||||
if (networkMesh.isPartTranslucent(mesh, partIndex)) {
|
||||
_transparentRenderItems << std::make_shared<MeshPartPayload>(true, this, i, partIndex);
|
||||
} else {
|
||||
_opaqueRenderItems << std::make_shared<MeshPartPayload>(false, this, i, partIndex);
|
||||
}
|
||||
_renderItemsSet << std::make_shared<MeshPartPayload>(this, i, partIndex, shapeID);
|
||||
shapeID++;
|
||||
}
|
||||
}
|
||||
_meshGroupsKnown = true;
|
||||
|
@ -1765,18 +1783,8 @@ bool Model::initWhenReady(render::ScenePointer scene) {
|
|||
segregateMeshGroups();
|
||||
|
||||
render::PendingChanges pendingChanges;
|
||||
foreach (auto renderItem, _transparentRenderItems) {
|
||||
auto item = scene->allocateID();
|
||||
auto renderData = MeshPartPayload::Pointer(renderItem);
|
||||
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
|
||||
_renderItems.insert(item, renderPayload);
|
||||
pendingChanges.resetItem(item, renderPayload);
|
||||
pendingChanges.updateItem<MeshPartPayload>(item, [&](MeshPartPayload& data) {
|
||||
data.model->_needsUpdateClusterMatrices = true;
|
||||
});
|
||||
}
|
||||
|
||||
foreach (auto renderItem, _opaqueRenderItems) {
|
||||
foreach (auto renderItem, _renderItemsSet) {
|
||||
auto item = scene->allocateID();
|
||||
auto renderData = MeshPartPayload::Pointer(renderItem);
|
||||
auto renderPayload = std::make_shared<MeshPartPayload::Payload>(renderData);
|
||||
|
|
|
@ -88,7 +88,7 @@ public:
|
|||
bool isVisible() const { return _isVisible; }
|
||||
|
||||
AABox getPartBounds(int meshIndex, int partIndex);
|
||||
void renderPart(RenderArgs* args, int meshIndex, int partIndex, bool translucent);
|
||||
void renderPart(RenderArgs* args, int meshIndex, int partIndex, int shapeID);
|
||||
|
||||
bool maybeStartBlender();
|
||||
|
||||
|
@ -339,6 +339,7 @@ private:
|
|||
int tangent;
|
||||
int alphaThreshold;
|
||||
int texcoordMatrices;
|
||||
int normalTextureUnit;
|
||||
int specularTextureUnit;
|
||||
int emissiveTextureUnit;
|
||||
int emissiveParams;
|
||||
|
@ -488,8 +489,7 @@ private:
|
|||
bool _renderCollisionHull;
|
||||
|
||||
|
||||
QSet<std::shared_ptr<MeshPartPayload>> _transparentRenderItems;
|
||||
QSet<std::shared_ptr<MeshPartPayload>> _opaqueRenderItems;
|
||||
QSet<std::shared_ptr<MeshPartPayload>> _renderItemsSet;
|
||||
QMap<render::ItemID, render::PayloadPointer> _renderItems;
|
||||
bool _readyWhenAdded = false;
|
||||
bool _needsReload = true;
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
// Compatibility
|
||||
#include <gpu-networking/TextureCache.h>
|
||||
#include <model-networking/TextureCache.h>
|
||||
|
|
|
@ -30,7 +30,7 @@ void main(void) {
|
|||
|
||||
// pass along the diffuse color
|
||||
_color = inColor.xyz;
|
||||
|
||||
|
||||
// and the texture coordinates
|
||||
_texCoord0 = (texcoordMatrices[0] * vec4(inTexCoord0.st, 0.0, 1.0)).st;
|
||||
|
||||
|
|
|
@ -7,4 +7,4 @@ add_dependency_external_projects(glm)
|
|||
find_package(GLM REQUIRED)
|
||||
target_include_directories(${TARGET_NAME} PUBLIC ${GLM_INCLUDE_DIRS})
|
||||
|
||||
link_hifi_libraries(shared networking octree gpu gpu-networking procedural model fbx entities animation audio physics)
|
||||
link_hifi_libraries(shared networking octree gpu procedural model model-networking fbx entities animation audio physics)
|
||||
|
|
|
@ -9,12 +9,17 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "SceneScriptingInterface.h"
|
||||
|
||||
#include <AddressManager.h>
|
||||
|
||||
#include "SceneScriptingInterface.h"
|
||||
|
||||
#include "SceneScriptingInterface.h"
|
||||
#include <procedural/ProceduralSkybox.h>
|
||||
|
||||
SceneScriptingInterface::SceneScriptingInterface() {
|
||||
// Let's make sure the sunSkyStage is using a proceduralSKybox
|
||||
_skyStage->setSkybox(model::SkyboxPointer(new ProceduralSkybox()));
|
||||
}
|
||||
|
||||
void SceneScriptingInterface::setStageOrientation(const glm::quat& orientation) {
|
||||
_skyStage->setOriginOrientation(orientation);
|
||||
|
|
|
@ -117,7 +117,7 @@ signals:
|
|||
void shouldRenderAvatarsChanged(bool shouldRenderAvatars);
|
||||
void shouldRenderEntitiesChanged(bool shouldRenderEntities);
|
||||
protected:
|
||||
SceneScriptingInterface() {};
|
||||
SceneScriptingInterface();
|
||||
~SceneScriptingInterface() {};
|
||||
|
||||
model::SunSkyStagePointer _skyStage = std::make_shared<model::SunSkyStage>();
|
||||
|
|
|
@ -10,6 +10,6 @@ set_target_properties(${TARGET_NAME} PROPERTIES FOLDER "Tests/manual-tests/")
|
|||
#include_oglplus()
|
||||
|
||||
# link in the shared libraries
|
||||
link_hifi_libraries(networking gpu gpu-networking procedural shared fbx model animation script-engine render-utils )
|
||||
link_hifi_libraries(networking gpu procedural shared fbx model model-networking animation script-engine render-utils )
|
||||
|
||||
copy_dlls_beside_windows_executable()
|
Loading…
Reference in a new issue