mirror of
https://github.com/overte-org/overte.git
synced 2025-04-21 04:03:59 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into brown
This commit is contained in:
commit
855d35eb1b
13 changed files with 362 additions and 230 deletions
|
@ -156,10 +156,8 @@ Column {
|
|||
function makeFilteredStoryProcessor() { // answer a function(storyData) that adds it to suggestions if it matches
|
||||
var words = filter.toUpperCase().split(/\s+/).filter(identity);
|
||||
function suggestable(story) {
|
||||
if (story.action === 'snapshot') {
|
||||
return true;
|
||||
}
|
||||
return story.place_name !== AddressManager.placename; // Not our entry, but do show other entry points to current domain.
|
||||
// We could filter out places we don't want to suggest, such as those where (story.place_name === AddressManager.placename) or (story.username === Account.username).
|
||||
return true;
|
||||
}
|
||||
function matches(story) {
|
||||
if (!words.length) {
|
||||
|
|
|
@ -502,13 +502,14 @@ static TextRenderer3D* textRenderer(TextRendererType type) {
|
|||
void Avatar::addToScene(AvatarSharedPointer self, const render::ScenePointer& scene, render::Transaction& transaction) {
|
||||
auto avatarPayload = new render::Payload<AvatarData>(self);
|
||||
auto avatarPayloadPointer = Avatar::PayloadPointer(avatarPayload);
|
||||
if (_skeletonModel->addToScene(scene, transaction)) {
|
||||
_renderItemID = scene->allocateID();
|
||||
transaction.resetItem(_renderItemID, avatarPayloadPointer);
|
||||
|
||||
for (auto& attachmentModel : _attachmentModels) {
|
||||
attachmentModel->addToScene(scene, transaction);
|
||||
}
|
||||
if (_renderItemID == render::Item::INVALID_ITEM_ID) {
|
||||
_renderItemID = scene->allocateID();
|
||||
}
|
||||
transaction.resetItem(_renderItemID, avatarPayloadPointer);
|
||||
_skeletonModel->addToScene(scene, transaction);
|
||||
for (auto& attachmentModel : _attachmentModels) {
|
||||
attachmentModel->addToScene(scene, transaction);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include <shared/NsightHelpers.h>
|
||||
#include <NetworkAccessManager.h>
|
||||
#include <ResourceManager.h>
|
||||
|
||||
#include "FBXReader.h"
|
||||
#include "ModelFormatLogging.h"
|
||||
|
@ -165,6 +166,7 @@ bool OBJFace::add(const QByteArray& vertexIndex, const QByteArray& textureIndex,
|
|||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
QVector<OBJFace> OBJFace::triangulate() {
|
||||
QVector<OBJFace> newFaces;
|
||||
const int nVerticesInATriangle = 3;
|
||||
|
@ -183,6 +185,7 @@ QVector<OBJFace> OBJFace::triangulate() {
|
|||
}
|
||||
return newFaces;
|
||||
}
|
||||
|
||||
void OBJFace::addFrom(const OBJFace* face, int index) { // add using data from f at index i
|
||||
vertexIndices.append(face->vertexIndices[index]);
|
||||
if (face->textureUVIndices.count() > 0) { // Any at all. Runtime error if not consistent.
|
||||
|
@ -193,24 +196,13 @@ void OBJFace::addFrom(const OBJFace* face, int index) { // add using data from f
|
|||
}
|
||||
}
|
||||
|
||||
static bool replyOK(QNetworkReply* netReply, QUrl url) { // This will be reworked when we make things asynchronous
|
||||
return (netReply && netReply->isFinished() &&
|
||||
(url.toString().startsWith("file", Qt::CaseInsensitive) ? // file urls don't have http status codes
|
||||
netReply->attribute(QNetworkRequest::HttpReasonPhraseAttribute).toString().isEmpty() :
|
||||
(netReply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt() == 200)));
|
||||
}
|
||||
|
||||
bool OBJReader::isValidTexture(const QByteArray &filename) {
|
||||
if (_url.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
QUrl candidateUrl = _url.resolved(QUrl(filename));
|
||||
QNetworkReply *netReply = request(candidateUrl, true);
|
||||
bool isValid = replyOK(netReply, candidateUrl);
|
||||
if (netReply) {
|
||||
netReply->deleteLater();
|
||||
}
|
||||
return isValid;
|
||||
|
||||
return ResourceManager::resourceExists(candidateUrl);
|
||||
}
|
||||
|
||||
void OBJReader::parseMaterialLibrary(QIODevice* device) {
|
||||
|
@ -274,7 +266,28 @@ void OBJReader::parseMaterialLibrary(QIODevice* device) {
|
|||
}
|
||||
}
|
||||
|
||||
QNetworkReply* OBJReader::request(QUrl& url, bool isTest) {
|
||||
std::tuple<bool, QByteArray> requestData(QUrl& url) {
|
||||
auto request = ResourceManager::createResourceRequest(nullptr, url);
|
||||
|
||||
if (!request) {
|
||||
return std::make_tuple(false, QByteArray());
|
||||
}
|
||||
|
||||
request->send();
|
||||
|
||||
QEventLoop loop;
|
||||
QObject::connect(request, &ResourceRequest::finished, &loop, &QEventLoop::quit);
|
||||
loop.exec();
|
||||
|
||||
if (request->getResult() == ResourceRequest::Success) {
|
||||
return std::make_tuple(true, request->getData());
|
||||
} else {
|
||||
return std::make_tuple(false, QByteArray());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
QNetworkReply* request(QUrl& url, bool isTest) {
|
||||
if (!qApp) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -293,10 +306,7 @@ QNetworkReply* OBJReader::request(QUrl& url, bool isTest) {
|
|||
QEventLoop loop; // Create an event loop that will quit when we get the finished signal
|
||||
QObject::connect(netReply, SIGNAL(finished()), &loop, SLOT(quit()));
|
||||
loop.exec(); // Nothing is going to happen on this whole run thread until we get this
|
||||
static const int WAIT_TIMEOUT_MS = 500;
|
||||
while (!aboutToQuit && qApp && !netReply->isReadable()) {
|
||||
netReply->waitForReadyRead(WAIT_TIMEOUT_MS); // so we might as well block this thread waiting for the response, rather than
|
||||
}
|
||||
|
||||
QObject::disconnect(connection);
|
||||
return netReply; // trying to sync later on.
|
||||
}
|
||||
|
@ -446,142 +456,142 @@ FBXGeometry* OBJReader::readOBJ(QByteArray& model, const QVariantHash& mapping,
|
|||
// add a new meshPart to the geometry's single mesh.
|
||||
while (parseOBJGroup(tokenizer, mapping, geometry, scaleGuess, combineParts)) {}
|
||||
|
||||
FBXMesh& mesh = geometry.meshes[0];
|
||||
mesh.meshIndex = 0;
|
||||
FBXMesh& mesh = geometry.meshes[0];
|
||||
mesh.meshIndex = 0;
|
||||
|
||||
geometry.joints.resize(1);
|
||||
geometry.joints[0].isFree = false;
|
||||
geometry.joints[0].parentIndex = -1;
|
||||
geometry.joints[0].distanceToParent = 0;
|
||||
geometry.joints[0].translation = glm::vec3(0, 0, 0);
|
||||
geometry.joints[0].rotationMin = glm::vec3(0, 0, 0);
|
||||
geometry.joints[0].rotationMax = glm::vec3(0, 0, 0);
|
||||
geometry.joints[0].name = "OBJ";
|
||||
geometry.joints[0].isSkeletonJoint = true;
|
||||
geometry.joints.resize(1);
|
||||
geometry.joints[0].isFree = false;
|
||||
geometry.joints[0].parentIndex = -1;
|
||||
geometry.joints[0].distanceToParent = 0;
|
||||
geometry.joints[0].translation = glm::vec3(0, 0, 0);
|
||||
geometry.joints[0].rotationMin = glm::vec3(0, 0, 0);
|
||||
geometry.joints[0].rotationMax = glm::vec3(0, 0, 0);
|
||||
geometry.joints[0].name = "OBJ";
|
||||
geometry.joints[0].isSkeletonJoint = true;
|
||||
|
||||
geometry.jointIndices["x"] = 1;
|
||||
geometry.jointIndices["x"] = 1;
|
||||
|
||||
FBXCluster cluster;
|
||||
cluster.jointIndex = 0;
|
||||
cluster.inverseBindMatrix = glm::mat4(1, 0, 0, 0,
|
||||
0, 1, 0, 0,
|
||||
0, 0, 1, 0,
|
||||
0, 0, 0, 1);
|
||||
mesh.clusters.append(cluster);
|
||||
FBXCluster cluster;
|
||||
cluster.jointIndex = 0;
|
||||
cluster.inverseBindMatrix = glm::mat4(1, 0, 0, 0,
|
||||
0, 1, 0, 0,
|
||||
0, 0, 1, 0,
|
||||
0, 0, 0, 1);
|
||||
mesh.clusters.append(cluster);
|
||||
|
||||
QMap<QString, int> materialMeshIdMap;
|
||||
QVector<FBXMeshPart> fbxMeshParts;
|
||||
for (int i = 0, meshPartCount = 0; i < mesh.parts.count(); i++, meshPartCount++) {
|
||||
FBXMeshPart& meshPart = mesh.parts[i];
|
||||
FaceGroup faceGroup = faceGroups[meshPartCount];
|
||||
QMap<QString, int> materialMeshIdMap;
|
||||
QVector<FBXMeshPart> fbxMeshParts;
|
||||
for (int i = 0, meshPartCount = 0; i < mesh.parts.count(); i++, meshPartCount++) {
|
||||
FBXMeshPart& meshPart = mesh.parts[i];
|
||||
FaceGroup faceGroup = faceGroups[meshPartCount];
|
||||
bool specifiesUV = false;
|
||||
foreach(OBJFace face, faceGroup) {
|
||||
// Go through all of the OBJ faces and determine the number of different materials necessary (each different material will be a unique mesh).
|
||||
// NOTE (trent/mittens 3/30/17): this seems hardcore wasteful and is slowed down a bit by iterating through the face group twice, but it's the best way I've thought of to hack multi-material support in an OBJ into this pipeline.
|
||||
if (!materialMeshIdMap.contains(face.materialName)) {
|
||||
// Create a new FBXMesh for this material mapping.
|
||||
materialMeshIdMap.insert(face.materialName, materialMeshIdMap.count());
|
||||
foreach(OBJFace face, faceGroup) {
|
||||
// Go through all of the OBJ faces and determine the number of different materials necessary (each different material will be a unique mesh).
|
||||
// NOTE (trent/mittens 3/30/17): this seems hardcore wasteful and is slowed down a bit by iterating through the face group twice, but it's the best way I've thought of to hack multi-material support in an OBJ into this pipeline.
|
||||
if (!materialMeshIdMap.contains(face.materialName)) {
|
||||
// Create a new FBXMesh for this material mapping.
|
||||
materialMeshIdMap.insert(face.materialName, materialMeshIdMap.count());
|
||||
|
||||
fbxMeshParts.append(FBXMeshPart());
|
||||
FBXMeshPart& meshPartNew = fbxMeshParts.last();
|
||||
meshPartNew.quadIndices = QVector<int>(meshPart.quadIndices); // Copy over quad indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
|
||||
meshPartNew.quadTrianglesIndices = QVector<int>(meshPart.quadTrianglesIndices); // Copy over quad triangulated indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
|
||||
meshPartNew.triangleIndices = QVector<int>(meshPart.triangleIndices); // Copy over triangle indices.
|
||||
fbxMeshParts.append(FBXMeshPart());
|
||||
FBXMeshPart& meshPartNew = fbxMeshParts.last();
|
||||
meshPartNew.quadIndices = QVector<int>(meshPart.quadIndices); // Copy over quad indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
|
||||
meshPartNew.quadTrianglesIndices = QVector<int>(meshPart.quadTrianglesIndices); // Copy over quad triangulated indices [NOTE (trent/mittens, 4/3/17): Likely unnecessary since they go unused anyway].
|
||||
meshPartNew.triangleIndices = QVector<int>(meshPart.triangleIndices); // Copy over triangle indices.
|
||||
|
||||
// Do some of the material logic (which previously lived below) now.
|
||||
// All the faces in the same group will have the same name and material.
|
||||
QString groupMaterialName = face.materialName;
|
||||
if (groupMaterialName.isEmpty() && specifiesUV) {
|
||||
// Do some of the material logic (which previously lived below) now.
|
||||
// All the faces in the same group will have the same name and material.
|
||||
QString groupMaterialName = face.materialName;
|
||||
if (groupMaterialName.isEmpty() && specifiesUV) {
|
||||
#ifdef WANT_DEBUG
|
||||
qCDebug(modelformat) << "OBJ Reader WARNING: " << url
|
||||
<< " needs a texture that isn't specified. Using default mechanism.";
|
||||
qCDebug(modelformat) << "OBJ Reader WARNING: " << url
|
||||
<< " needs a texture that isn't specified. Using default mechanism.";
|
||||
#endif
|
||||
groupMaterialName = SMART_DEFAULT_MATERIAL_NAME;
|
||||
}
|
||||
if (!groupMaterialName.isEmpty()) {
|
||||
OBJMaterial& material = materials[groupMaterialName];
|
||||
if (specifiesUV) {
|
||||
material.userSpecifiesUV = true; // Note might not be true in a later usage.
|
||||
}
|
||||
if (specifiesUV || (groupMaterialName.compare("none", Qt::CaseInsensitive) != 0)) {
|
||||
// Blender has a convention that a material named "None" isn't really used (or defined).
|
||||
material.used = true;
|
||||
needsMaterialLibrary = groupMaterialName != SMART_DEFAULT_MATERIAL_NAME;
|
||||
}
|
||||
materials[groupMaterialName] = material;
|
||||
meshPartNew.materialID = groupMaterialName;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clean up old mesh parts.
|
||||
int unmodifiedMeshPartCount = mesh.parts.count();
|
||||
mesh.parts.clear();
|
||||
mesh.parts = QVector<FBXMeshPart>(fbxMeshParts);
|
||||
|
||||
for (int i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) {
|
||||
FaceGroup faceGroup = faceGroups[meshPartCount];
|
||||
|
||||
// Now that each mesh has been created with its own unique material mappings, fill them with data (vertex data is duplicated, face data is not).
|
||||
foreach(OBJFace face, faceGroup) {
|
||||
FBXMeshPart& meshPart = mesh.parts[materialMeshIdMap[face.materialName]];
|
||||
|
||||
glm::vec3 v0 = checked_at(vertices, face.vertexIndices[0]);
|
||||
glm::vec3 v1 = checked_at(vertices, face.vertexIndices[1]);
|
||||
glm::vec3 v2 = checked_at(vertices, face.vertexIndices[2]);
|
||||
|
||||
// Scale the vertices if the OBJ file scale is specified as non-one.
|
||||
if (scaleGuess != 1.0f) {
|
||||
v0 *= scaleGuess;
|
||||
v1 *= scaleGuess;
|
||||
v2 *= scaleGuess;
|
||||
}
|
||||
|
||||
// Add the vertices.
|
||||
meshPart.triangleIndices.append(mesh.vertices.count()); // not face.vertexIndices into vertices
|
||||
mesh.vertices << v0;
|
||||
meshPart.triangleIndices.append(mesh.vertices.count());
|
||||
mesh.vertices << v1;
|
||||
meshPart.triangleIndices.append(mesh.vertices.count());
|
||||
mesh.vertices << v2;
|
||||
|
||||
glm::vec3 n0, n1, n2;
|
||||
if (face.normalIndices.count()) {
|
||||
n0 = checked_at(normals, face.normalIndices[0]);
|
||||
n1 = checked_at(normals, face.normalIndices[1]);
|
||||
n2 = checked_at(normals, face.normalIndices[2]);
|
||||
} else {
|
||||
// generate normals from triangle plane if not provided
|
||||
n0 = n1 = n2 = glm::cross(v1 - v0, v2 - v0);
|
||||
}
|
||||
|
||||
mesh.normals.append(n0);
|
||||
mesh.normals.append(n1);
|
||||
mesh.normals.append(n2);
|
||||
|
||||
if (face.textureUVIndices.count()) {
|
||||
mesh.texCoords <<
|
||||
checked_at(textureUVs, face.textureUVIndices[0]) <<
|
||||
checked_at(textureUVs, face.textureUVIndices[1]) <<
|
||||
checked_at(textureUVs, face.textureUVIndices[2]);
|
||||
} else {
|
||||
glm::vec2 corner(0.0f, 1.0f);
|
||||
mesh.texCoords << corner << corner << corner;
|
||||
}
|
||||
}
|
||||
groupMaterialName = SMART_DEFAULT_MATERIAL_NAME;
|
||||
}
|
||||
if (!groupMaterialName.isEmpty()) {
|
||||
OBJMaterial& material = materials[groupMaterialName];
|
||||
if (specifiesUV) {
|
||||
material.userSpecifiesUV = true; // Note might not be true in a later usage.
|
||||
}
|
||||
if (specifiesUV || (groupMaterialName.compare("none", Qt::CaseInsensitive) != 0)) {
|
||||
// Blender has a convention that a material named "None" isn't really used (or defined).
|
||||
material.used = true;
|
||||
needsMaterialLibrary = groupMaterialName != SMART_DEFAULT_MATERIAL_NAME;
|
||||
}
|
||||
materials[groupMaterialName] = material;
|
||||
meshPartNew.materialID = groupMaterialName;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mesh.meshExtents.reset();
|
||||
foreach(const glm::vec3& vertex, mesh.vertices) {
|
||||
mesh.meshExtents.addPoint(vertex);
|
||||
geometry.meshExtents.addPoint(vertex);
|
||||
}
|
||||
// clean up old mesh parts.
|
||||
int unmodifiedMeshPartCount = mesh.parts.count();
|
||||
mesh.parts.clear();
|
||||
mesh.parts = QVector<FBXMeshPart>(fbxMeshParts);
|
||||
|
||||
// Build the single mesh.
|
||||
FBXReader::buildModelMesh(mesh, url.toString());
|
||||
for (int i = 0, meshPartCount = 0; i < unmodifiedMeshPartCount; i++, meshPartCount++) {
|
||||
FaceGroup faceGroup = faceGroups[meshPartCount];
|
||||
|
||||
// fbxDebugDump(geometry);
|
||||
// Now that each mesh has been created with its own unique material mappings, fill them with data (vertex data is duplicated, face data is not).
|
||||
foreach(OBJFace face, faceGroup) {
|
||||
FBXMeshPart& meshPart = mesh.parts[materialMeshIdMap[face.materialName]];
|
||||
|
||||
glm::vec3 v0 = checked_at(vertices, face.vertexIndices[0]);
|
||||
glm::vec3 v1 = checked_at(vertices, face.vertexIndices[1]);
|
||||
glm::vec3 v2 = checked_at(vertices, face.vertexIndices[2]);
|
||||
|
||||
// Scale the vertices if the OBJ file scale is specified as non-one.
|
||||
if (scaleGuess != 1.0f) {
|
||||
v0 *= scaleGuess;
|
||||
v1 *= scaleGuess;
|
||||
v2 *= scaleGuess;
|
||||
}
|
||||
|
||||
// Add the vertices.
|
||||
meshPart.triangleIndices.append(mesh.vertices.count()); // not face.vertexIndices into vertices
|
||||
mesh.vertices << v0;
|
||||
meshPart.triangleIndices.append(mesh.vertices.count());
|
||||
mesh.vertices << v1;
|
||||
meshPart.triangleIndices.append(mesh.vertices.count());
|
||||
mesh.vertices << v2;
|
||||
|
||||
glm::vec3 n0, n1, n2;
|
||||
if (face.normalIndices.count()) {
|
||||
n0 = checked_at(normals, face.normalIndices[0]);
|
||||
n1 = checked_at(normals, face.normalIndices[1]);
|
||||
n2 = checked_at(normals, face.normalIndices[2]);
|
||||
} else {
|
||||
// generate normals from triangle plane if not provided
|
||||
n0 = n1 = n2 = glm::cross(v1 - v0, v2 - v0);
|
||||
}
|
||||
|
||||
mesh.normals.append(n0);
|
||||
mesh.normals.append(n1);
|
||||
mesh.normals.append(n2);
|
||||
|
||||
if (face.textureUVIndices.count()) {
|
||||
mesh.texCoords <<
|
||||
checked_at(textureUVs, face.textureUVIndices[0]) <<
|
||||
checked_at(textureUVs, face.textureUVIndices[1]) <<
|
||||
checked_at(textureUVs, face.textureUVIndices[2]);
|
||||
} else {
|
||||
glm::vec2 corner(0.0f, 1.0f);
|
||||
mesh.texCoords << corner << corner << corner;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mesh.meshExtents.reset();
|
||||
foreach(const glm::vec3& vertex, mesh.vertices) {
|
||||
mesh.meshExtents.addPoint(vertex);
|
||||
geometry.meshExtents.addPoint(vertex);
|
||||
}
|
||||
|
||||
// Build the single mesh.
|
||||
FBXReader::buildModelMesh(mesh, url.toString());
|
||||
|
||||
// fbxDebugDump(geometry);
|
||||
} catch(const std::exception& e) {
|
||||
qCDebug(modelformat) << "OBJ reader fail: " << e.what();
|
||||
}
|
||||
|
@ -624,15 +634,15 @@ FBXGeometry* OBJReader::readOBJ(QByteArray& model, const QVariantHash& mapping,
|
|||
// Throw away any path part of libraryName, and merge against original url.
|
||||
QUrl libraryUrl = _url.resolved(QUrl(libraryName).fileName());
|
||||
qCDebug(modelformat) << "OBJ Reader material library" << libraryName << "used in" << _url;
|
||||
QNetworkReply* netReply = request(libraryUrl, false);
|
||||
if (replyOK(netReply, libraryUrl)) {
|
||||
parseMaterialLibrary(netReply);
|
||||
bool success;
|
||||
QByteArray data;
|
||||
std::tie<bool, QByteArray>(success, data) = requestData(libraryUrl);
|
||||
if (success) {
|
||||
QBuffer buffer { &data };
|
||||
buffer.open(QIODevice::ReadOnly);
|
||||
parseMaterialLibrary(&buffer);
|
||||
} else {
|
||||
qCDebug(modelformat) << "OBJ Reader WARNING:" << libraryName << "did not answer. Got"
|
||||
<< (!netReply ? "aborted" : netReply->attribute(QNetworkRequest::HttpReasonPhraseAttribute).toString());
|
||||
}
|
||||
if (netReply) {
|
||||
netReply->deleteLater();
|
||||
qCDebug(modelformat) << "OBJ Reader WARNING:" << libraryName << "did not answer";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -655,9 +665,9 @@ FBXGeometry* OBJReader::readOBJ(QByteArray& model, const QVariantHash& mapping,
|
|||
if (!objMaterial.diffuseTextureFilename.isEmpty()) {
|
||||
fbxMaterial.albedoTexture.filename = objMaterial.diffuseTextureFilename;
|
||||
}
|
||||
if (!objMaterial.specularTextureFilename.isEmpty()) {
|
||||
fbxMaterial.specularTexture.filename = objMaterial.specularTextureFilename;
|
||||
}
|
||||
if (!objMaterial.specularTextureFilename.isEmpty()) {
|
||||
fbxMaterial.specularTexture.filename = objMaterial.specularTextureFilename;
|
||||
}
|
||||
|
||||
modelMaterial->setEmissive(fbxMaterial.emissiveColor);
|
||||
modelMaterial->setAlbedo(fbxMaterial.diffuseColor);
|
||||
|
|
|
@ -72,7 +72,6 @@ public:
|
|||
QString currentMaterialName;
|
||||
QHash<QString, OBJMaterial> materials;
|
||||
|
||||
QNetworkReply* request(QUrl& url, bool isTest);
|
||||
FBXGeometry* readOBJ(QByteArray& model, const QVariantHash& mapping, bool combineParts, const QUrl& url = QUrl());
|
||||
|
||||
private:
|
||||
|
|
|
@ -525,11 +525,14 @@ void GL41VariableAllocationTexture::populateTransferQueue() {
|
|||
|
||||
// break down the transfers into chunks so that no single transfer is
|
||||
// consuming more than X bandwidth
|
||||
// For compressed format, regions must be a multiple of the 4x4 tiles, so enforce 4 lines as the minimum block
|
||||
auto mipSize = _gpuObject.getStoredMipFaceSize(sourceMip, face);
|
||||
const auto lines = mipDimensions.y;
|
||||
auto bytesPerLine = mipSize / lines;
|
||||
const uint32_t BLOCK_NUM_LINES { 4 };
|
||||
const auto numBlocks = (lines + (BLOCK_NUM_LINES - 1)) / BLOCK_NUM_LINES;
|
||||
auto bytesPerBlock = mipSize / numBlocks;
|
||||
Q_ASSERT(0 == (mipSize % lines));
|
||||
uint32_t linesPerTransfer = (uint32_t)(MAX_TRANSFER_SIZE / bytesPerLine);
|
||||
uint32_t linesPerTransfer = BLOCK_NUM_LINES * (uint32_t)(MAX_TRANSFER_SIZE / bytesPerBlock);
|
||||
uint32_t lineOffset = 0;
|
||||
while (lineOffset < lines) {
|
||||
uint32_t linesToCopy = std::min<uint32_t>(lines - lineOffset, linesPerTransfer);
|
||||
|
|
|
@ -196,11 +196,14 @@ void GL45ResourceTexture::populateTransferQueue() {
|
|||
|
||||
// break down the transfers into chunks so that no single transfer is
|
||||
// consuming more than X bandwidth
|
||||
// For compressed format, regions must be a multiple of the 4x4 tiles, so enforce 4 lines as the minimum block
|
||||
auto mipSize = _gpuObject.getStoredMipFaceSize(sourceMip, face);
|
||||
const auto lines = mipDimensions.y;
|
||||
auto bytesPerLine = mipSize / lines;
|
||||
const uint32_t BLOCK_NUM_LINES { 4 };
|
||||
const auto numBlocks = (lines + (BLOCK_NUM_LINES - 1)) / BLOCK_NUM_LINES;
|
||||
auto bytesPerBlock = mipSize / numBlocks;
|
||||
Q_ASSERT(0 == (mipSize % lines));
|
||||
uint32_t linesPerTransfer = (uint32_t)(MAX_TRANSFER_SIZE / bytesPerLine);
|
||||
uint32_t linesPerTransfer = BLOCK_NUM_LINES * (uint32_t)(MAX_TRANSFER_SIZE / bytesPerBlock);
|
||||
uint32_t lineOffset = 0;
|
||||
while (lineOffset < lines) {
|
||||
uint32_t linesToCopy = std::min<uint32_t>(lines - lineOffset, linesPerTransfer);
|
||||
|
|
|
@ -99,6 +99,61 @@ struct GPUKTXPayload {
|
|||
};
|
||||
const std::string GPUKTXPayload::KEY { "hifi.gpu" };
|
||||
|
||||
|
||||
struct IrradianceKTXPayload {
|
||||
using Version = uint8;
|
||||
|
||||
static const std::string KEY;
|
||||
static const Version CURRENT_VERSION{ 0 };
|
||||
static const size_t PADDING{ 3 };
|
||||
static const size_t SIZE{ sizeof(Version) + sizeof(SphericalHarmonics) + PADDING };
|
||||
static_assert(IrradianceKTXPayload::SIZE == 148, "Packing size may differ between platforms");
|
||||
static_assert(IrradianceKTXPayload::SIZE % 4 == 0, "IrradianceKTXPayload is not 4 bytes aligned");
|
||||
|
||||
SphericalHarmonics _irradianceSH;
|
||||
|
||||
Byte* serialize(Byte* data) const {
|
||||
*(Version*)data = CURRENT_VERSION;
|
||||
data += sizeof(Version);
|
||||
|
||||
memcpy(data, &_irradianceSH, sizeof(SphericalHarmonics));
|
||||
data += sizeof(SphericalHarmonics);
|
||||
|
||||
return data + PADDING;
|
||||
}
|
||||
|
||||
bool unserialize(const Byte* data, size_t size) {
|
||||
if (size != SIZE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Version version = *(const Version*)data;
|
||||
if (version != CURRENT_VERSION) {
|
||||
return false;
|
||||
}
|
||||
data += sizeof(Version);
|
||||
|
||||
memcpy(&_irradianceSH, data, sizeof(SphericalHarmonics));
|
||||
data += sizeof(SphericalHarmonics);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool isIrradianceKTX(const ktx::KeyValue& val) {
|
||||
return (val._key.compare(KEY) == 0);
|
||||
}
|
||||
|
||||
static bool findInKeyValues(const ktx::KeyValues& keyValues, IrradianceKTXPayload& payload) {
|
||||
auto found = std::find_if(keyValues.begin(), keyValues.end(), isIrradianceKTX);
|
||||
if (found != keyValues.end()) {
|
||||
auto value = found->_value;
|
||||
return payload.unserialize(value.data(), value.size());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
const std::string IrradianceKTXPayload::KEY{ "hifi.irradianceSH" };
|
||||
|
||||
KtxStorage::KtxStorage(const std::string& filename) : _filename(filename) {
|
||||
{
|
||||
// We are doing a lot of work here just to get descriptor data
|
||||
|
@ -304,16 +359,27 @@ ktx::KTXUniquePointer Texture::serialize(const Texture& texture) {
|
|||
}
|
||||
}
|
||||
|
||||
GPUKTXPayload keyval;
|
||||
keyval._samplerDesc = texture.getSampler().getDesc();
|
||||
keyval._usage = texture.getUsage();
|
||||
keyval._usageType = texture.getUsageType();
|
||||
GPUKTXPayload gpuKeyval;
|
||||
gpuKeyval._samplerDesc = texture.getSampler().getDesc();
|
||||
gpuKeyval._usage = texture.getUsage();
|
||||
gpuKeyval._usageType = texture.getUsageType();
|
||||
|
||||
Byte keyvalPayload[GPUKTXPayload::SIZE];
|
||||
keyval.serialize(keyvalPayload);
|
||||
gpuKeyval.serialize(keyvalPayload);
|
||||
|
||||
ktx::KeyValues keyValues;
|
||||
keyValues.emplace_back(GPUKTXPayload::KEY, (uint32)GPUKTXPayload::SIZE, (ktx::Byte*) &keyvalPayload);
|
||||
|
||||
if (texture.getIrradiance()) {
|
||||
IrradianceKTXPayload irradianceKeyval;
|
||||
irradianceKeyval._irradianceSH = *texture.getIrradiance();
|
||||
|
||||
Byte irradianceKeyvalPayload[IrradianceKTXPayload::SIZE];
|
||||
irradianceKeyval.serialize(irradianceKeyvalPayload);
|
||||
|
||||
keyValues.emplace_back(IrradianceKTXPayload::KEY, (uint32)IrradianceKTXPayload::SIZE, (ktx::Byte*) &irradianceKeyvalPayload);
|
||||
}
|
||||
|
||||
auto hash = texture.sourceHash();
|
||||
if (!hash.empty()) {
|
||||
// the sourceHash is an std::string in hex
|
||||
|
@ -409,6 +475,12 @@ TexturePointer Texture::unserialize(const std::string& ktxfile, const ktx::KTXDe
|
|||
// Assing the mips availables
|
||||
texture->setStoredMipFormat(mipFormat);
|
||||
texture->setKtxBacking(ktxfile);
|
||||
|
||||
IrradianceKTXPayload irradianceKtxKeyValue;
|
||||
if (IrradianceKTXPayload::findInKeyValues(descriptor.keyValues, irradianceKtxKeyValue)) {
|
||||
texture->overrideIrradiance(std::make_shared<SphericalHarmonics>(irradianceKtxKeyValue._irradianceSH));
|
||||
}
|
||||
|
||||
return texture;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,16 +40,16 @@ AssetResourceRequest::~AssetResourceRequest() {
|
|||
}
|
||||
}
|
||||
|
||||
bool AssetResourceRequest::urlIsAssetHash() const {
|
||||
bool AssetResourceRequest::urlIsAssetHash(const QUrl& url) {
|
||||
static const QString ATP_HASH_REGEX_STRING { "^atp:([A-Fa-f0-9]{64})(\\.[\\w]+)?$" };
|
||||
|
||||
QRegExp hashRegex { ATP_HASH_REGEX_STRING };
|
||||
return hashRegex.exactMatch(_url.toString());
|
||||
return hashRegex.exactMatch(url.toString());
|
||||
}
|
||||
|
||||
void AssetResourceRequest::doSend() {
|
||||
// We'll either have a hash or an ATP path to a file (that maps to a hash)
|
||||
if (urlIsAssetHash()) {
|
||||
if (urlIsAssetHash(_url)) {
|
||||
// We've detected that this is a hash - simply use AssetClient to request that asset
|
||||
auto parts = _url.path().split(".", QString::SkipEmptyParts);
|
||||
auto hash = parts.length() > 0 ? parts[0] : "";
|
||||
|
|
|
@ -32,7 +32,7 @@ private slots:
|
|||
void onDownloadProgress(qint64 bytesReceived, qint64 bytesTotal);
|
||||
|
||||
private:
|
||||
bool urlIsAssetHash() const;
|
||||
static bool urlIsAssetHash(const QUrl& url);
|
||||
|
||||
void requestMappingForPath(const AssetPath& path);
|
||||
void requestHash(const AssetHash& hash);
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
#include <QNetworkDiskCache>
|
||||
#include <QStandardPaths>
|
||||
#include <QThread>
|
||||
#include <QFileInfo>
|
||||
|
||||
#include <SharedUtil.h>
|
||||
|
||||
|
||||
#include "AssetResourceRequest.h"
|
||||
#include "FileResourceRequest.h"
|
||||
#include "HTTPResourceRequest.h"
|
||||
|
@ -116,3 +116,51 @@ ResourceRequest* ResourceManager::createResourceRequest(QObject* parent, const Q
|
|||
request->moveToThread(&_thread);
|
||||
return request;
|
||||
}
|
||||
|
||||
|
||||
bool ResourceManager::resourceExists(const QUrl& url) {
|
||||
auto scheme = url.scheme();
|
||||
if (scheme == URL_SCHEME_FILE) {
|
||||
QFileInfo file { url.toString() };
|
||||
return file.exists();
|
||||
} else if (scheme == URL_SCHEME_HTTP || scheme == URL_SCHEME_HTTPS || scheme == URL_SCHEME_FTP) {
|
||||
auto& networkAccessManager = NetworkAccessManager::getInstance();
|
||||
QNetworkRequest request { url };
|
||||
|
||||
request.setAttribute(QNetworkRequest::FollowRedirectsAttribute, true);
|
||||
request.setHeader(QNetworkRequest::UserAgentHeader, HIGH_FIDELITY_USER_AGENT);
|
||||
|
||||
auto reply = networkAccessManager.head(request);
|
||||
|
||||
QEventLoop loop;
|
||||
QObject::connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit);
|
||||
loop.exec();
|
||||
|
||||
reply->deleteLater();
|
||||
|
||||
return reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt() == 200;
|
||||
} else if (scheme == URL_SCHEME_ATP) {
|
||||
auto request = new AssetResourceRequest(url);
|
||||
ByteRange range;
|
||||
range.fromInclusive = 1;
|
||||
range.toExclusive = 1;
|
||||
request->setByteRange(range);
|
||||
request->setCacheEnabled(false);
|
||||
|
||||
QEventLoop loop;
|
||||
|
||||
QObject::connect(request, &AssetResourceRequest::finished, &loop, &QEventLoop::quit);
|
||||
|
||||
request->send();
|
||||
|
||||
loop.exec();
|
||||
|
||||
request->deleteLater();
|
||||
|
||||
return request->getResult() == ResourceRequest::Success;
|
||||
}
|
||||
|
||||
qCDebug(networking) << "Unknown scheme (" << scheme << ") for URL: " << url.url();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,10 @@ public:
|
|||
static void init();
|
||||
static void cleanup();
|
||||
|
||||
// Blocking call to check if a resource exists. This function uses a QEventLoop internally
|
||||
// to return to the calling thread so that events can still be processed.
|
||||
static bool resourceExists(const QUrl& url);
|
||||
|
||||
private:
|
||||
static QThread _thread;
|
||||
|
||||
|
|
|
@ -38,54 +38,7 @@ var METAVERSE_BASE = location.metaverseServerUrl;
|
|||
|
||||
// It's totally unnecessary to return to C++ to perform many of these requests, such as DELETEing an old story,
|
||||
// POSTING a new one, PUTTING a new audience, or GETTING story data. It's far more efficient to do all of that within JS
|
||||
function request(options, callback) { // cb(error, responseOfCorrectContentType) of url. A subset of npm request.
|
||||
var httpRequest = new XMLHttpRequest(), key;
|
||||
// QT bug: apparently doesn't handle onload. Workaround using readyState.
|
||||
httpRequest.onreadystatechange = function () {
|
||||
var READY_STATE_DONE = 4;
|
||||
var HTTP_OK = 200;
|
||||
if (httpRequest.readyState >= READY_STATE_DONE) {
|
||||
var error = (httpRequest.status !== HTTP_OK) && httpRequest.status.toString() + ':' + httpRequest.statusText,
|
||||
response = !error && httpRequest.responseText,
|
||||
contentType = !error && httpRequest.getResponseHeader('content-type');
|
||||
if (!error && contentType.indexOf('application/json') === 0) { // ignoring charset, etc.
|
||||
try {
|
||||
response = JSON.parse(response);
|
||||
} catch (e) {
|
||||
error = e;
|
||||
}
|
||||
}
|
||||
callback(error, response);
|
||||
}
|
||||
};
|
||||
if (typeof options === 'string') {
|
||||
options = { uri: options };
|
||||
}
|
||||
if (options.url) {
|
||||
options.uri = options.url;
|
||||
}
|
||||
if (!options.method) {
|
||||
options.method = 'GET';
|
||||
}
|
||||
if (options.body && (options.method === 'GET')) { // add query parameters
|
||||
var params = [], appender = (-1 === options.uri.search('?')) ? '?' : '&';
|
||||
for (key in options.body) {
|
||||
params.push(key + '=' + options.body[key]);
|
||||
}
|
||||
options.uri += appender + params.join('&');
|
||||
delete options.body;
|
||||
}
|
||||
if (options.json) {
|
||||
options.headers = options.headers || {};
|
||||
options.headers["Content-type"] = "application/json";
|
||||
options.body = JSON.stringify(options.body);
|
||||
}
|
||||
for (key in options.headers || {}) {
|
||||
httpRequest.setRequestHeader(key, options.headers[key]);
|
||||
}
|
||||
httpRequest.open(options.method, options.uri, true);
|
||||
httpRequest.send(options.body);
|
||||
}
|
||||
var request = Script.require('request').request;
|
||||
|
||||
function openLoginWindow() {
|
||||
if ((HMD.active && Settings.getValue("hmdTabletBecomesToolbar", false))
|
||||
|
|
|
@ -16,6 +16,13 @@
|
|||
(function () { // BEGIN LOCAL_SCOPE
|
||||
|
||||
var request = Script.require('request').request;
|
||||
var DEBUG = false;
|
||||
function debug() {
|
||||
if (!DEBUG) {
|
||||
return;
|
||||
}
|
||||
print('tablet-goto.js:', [].map.call(arguments, JSON.stringify));
|
||||
}
|
||||
|
||||
var gotoQmlSource = "TabletAddressDialog.qml";
|
||||
var buttonName = "GOTO";
|
||||
|
@ -39,6 +46,7 @@
|
|||
switch (message.method) {
|
||||
case 'request':
|
||||
request(message.params, function (error, data) {
|
||||
debug('rpc', request, 'error:', error, 'data:', data);
|
||||
response.error = error;
|
||||
response.result = data;
|
||||
tablet.sendToQml(response);
|
||||
|
@ -100,10 +108,24 @@
|
|||
button.clicked.connect(onClicked);
|
||||
tablet.screenChanged.connect(onScreenChanged);
|
||||
|
||||
var stories = {};
|
||||
var DEBUG = false;
|
||||
var stories = {}, pingPong = false;
|
||||
function expire(id) {
|
||||
var options = {
|
||||
uri: location.metaverseServerUrl + '/api/v1/user_stories/' + id,
|
||||
method: 'PUT',
|
||||
json: true,
|
||||
body: {expire: "true"}
|
||||
};
|
||||
request(options, function (error, response) {
|
||||
debug('expired story', options, 'error:', error, 'response:', response);
|
||||
if (error || (response.status !== 'success')) {
|
||||
print("ERROR expiring story: ", error || response.status);
|
||||
}
|
||||
});
|
||||
}
|
||||
function pollForAnnouncements() {
|
||||
var actions = DEBUG ? 'snapshot' : 'announcement';
|
||||
// We could bail now if !Account.isLoggedIn(), but what if we someday have system-wide announcments?
|
||||
var actions = 'announcement';
|
||||
var count = DEBUG ? 10 : 100;
|
||||
var options = [
|
||||
'now=' + new Date().toISOString(),
|
||||
|
@ -117,13 +139,24 @@
|
|||
request({
|
||||
uri: url
|
||||
}, function (error, data) {
|
||||
debug(url, error, data);
|
||||
if (error || (data.status !== 'success')) {
|
||||
print("Error: unable to get", url, error || data.status);
|
||||
return;
|
||||
}
|
||||
var didNotify = false;
|
||||
var didNotify = false, key;
|
||||
pingPong = !pingPong;
|
||||
data.user_stories.forEach(function (story) {
|
||||
if (stories[story.id]) { // already seen
|
||||
var stored = stories[story.id], storedOrNew = stored || story;
|
||||
debug('story exists:', !!stored, storedOrNew);
|
||||
if ((storedOrNew.username === Account.username) && (storedOrNew.place_name !== location.placename)) {
|
||||
if (storedOrNew.audience == 'for_connections') { // Only expire if we haven't already done so.
|
||||
expire(story.id);
|
||||
}
|
||||
return; // before marking
|
||||
}
|
||||
storedOrNew.pingPong = pingPong;
|
||||
if (stored) { // already seen
|
||||
return;
|
||||
}
|
||||
stories[story.id] = story;
|
||||
|
@ -131,12 +164,20 @@
|
|||
Window.displayAnnouncement(message);
|
||||
didNotify = true;
|
||||
});
|
||||
for (key in stories) { // Any story we were tracking that was not marked, has expired.
|
||||
if (stories[key].pingPong !== pingPong) {
|
||||
debug('removing story', key);
|
||||
delete stories[key];
|
||||
}
|
||||
}
|
||||
if (didNotify) {
|
||||
messagesWaiting(true);
|
||||
if (HMD.isHandControllerAvailable()) {
|
||||
var STRENGTH = 1.0, DURATION_MS = 60, HAND = 2; // both hands
|
||||
Controller.triggerHapticPulse(STRENGTH, DURATION_MS, HAND);
|
||||
}
|
||||
} else if (!Object.keys(stories).length) { // If there's nothing being tracked, then any messageWaiting has expired.
|
||||
messagesWaiting(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue