mirror of
https://github.com/overte-org/overte.git
synced 2025-08-07 00:19:40 +02:00
Merge pull request #1178 from HifiExperiments/uniformArrays
support more procedural shader uniform types, including arrays
This commit is contained in:
commit
1bb18c54ae
3 changed files with 173 additions and 52 deletions
|
@ -288,6 +288,10 @@ public:
|
||||||
_glUniformMatrix3fv(location, 1, false, glm::value_ptr(v));
|
_glUniformMatrix3fv(location, 1, false, glm::value_ptr(v));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void _glUniform(int location, const glm::mat4& v) {
|
||||||
|
_glUniformMatrix4fv(location, 1, false, glm::value_ptr(v));
|
||||||
|
}
|
||||||
|
|
||||||
// Maybe useful but shoudln't be public. Please convince me otherwise
|
// Maybe useful but shoudln't be public. Please convince me otherwise
|
||||||
// Well porting to gles i need it...
|
// Well porting to gles i need it...
|
||||||
void runLambda(std::function<void()> f);
|
void runLambda(std::function<void()> f);
|
||||||
|
|
|
@ -356,16 +356,53 @@ void Procedural::prepare(gpu::Batch& batch,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Then fill in every reflections the new custom bindings
|
// Then fill in every reflections the new custom bindings
|
||||||
int customSlot = procedural::slot::uniform::Custom;
|
size_t customSlot = procedural::slot::uniform::Custom;
|
||||||
|
_slotMap.clear();
|
||||||
for (const auto& key : _data.uniforms.keys()) {
|
for (const auto& key : _data.uniforms.keys()) {
|
||||||
std::string uniformName = key.toLocal8Bit().data();
|
bool isArrayUniform = false;
|
||||||
for (auto reflection : allFragmentReflections) {
|
size_t numSlots = 0;
|
||||||
reflection->uniforms[uniformName] = customSlot;
|
const QJsonValue& value = _data.uniforms[key];
|
||||||
|
if (value.isDouble()) {
|
||||||
|
numSlots = 1;
|
||||||
|
} else if (value.isArray()) {
|
||||||
|
const QJsonArray valueArray = value.toArray();
|
||||||
|
if (valueArray.size() > 0) {
|
||||||
|
if (valueArray[0].isArray()) {
|
||||||
|
const size_t valueLength = valueArray[0].toArray().size();
|
||||||
|
size_t count = 0;
|
||||||
|
for (const QJsonValue& value : valueArray) {
|
||||||
|
if (value.isArray()) {
|
||||||
|
const QJsonArray innerValueArray = value.toArray();
|
||||||
|
if (innerValueArray.size() == valueLength) {
|
||||||
|
if (valueLength == 3 || valueLength == 4 || valueLength == 9 || valueLength == 16) {
|
||||||
|
count++;
|
||||||
|
isArrayUniform = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
numSlots = count;
|
||||||
|
} else if (valueArray[0].isDouble()) {
|
||||||
|
numSlots = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (auto reflection : allVertexReflections) {
|
|
||||||
reflection->uniforms[uniformName] = customSlot;
|
if (numSlots > 0) {
|
||||||
|
std::string uniformName = key.toLocal8Bit().data();
|
||||||
|
std::string trueUniformName = uniformName;
|
||||||
|
if (isArrayUniform) {
|
||||||
|
trueUniformName += "[0]";
|
||||||
|
}
|
||||||
|
for (auto reflection : allFragmentReflections) {
|
||||||
|
reflection->uniforms[trueUniformName] = customSlot;
|
||||||
|
}
|
||||||
|
for (auto reflection : allVertexReflections) {
|
||||||
|
reflection->uniforms[trueUniformName] = customSlot;
|
||||||
|
}
|
||||||
|
_slotMap[uniformName] = customSlot;
|
||||||
|
customSlot += numSlots;
|
||||||
}
|
}
|
||||||
++customSlot;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,59 +485,138 @@ void Procedural::prepare(gpu::Batch& batch,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Procedural::setupUniforms() {
|
void Procedural::setupUniforms() {
|
||||||
_uniforms.clear();
|
_uniforms.clear();
|
||||||
// Set any userdata specified uniforms
|
// Set any userdata specified uniforms
|
||||||
int slot = procedural::slot::uniform::Custom;
|
|
||||||
for (const auto& key : _data.uniforms.keys()) {
|
for (const auto& key : _data.uniforms.keys()) {
|
||||||
std::string uniformName = key.toLocal8Bit().data();
|
const std::string uniformName = key.toLocal8Bit().data();
|
||||||
QJsonValue value = _data.uniforms[key];
|
auto slotItr = _slotMap.find(uniformName);
|
||||||
|
if (slotItr == _slotMap.end()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t slot = slotItr->second;
|
||||||
|
const QJsonValue& value = _data.uniforms[key];
|
||||||
if (value.isDouble()) {
|
if (value.isDouble()) {
|
||||||
float v = value.toDouble();
|
const float v = value.toDouble();
|
||||||
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform1f(slot, v); });
|
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform1f(slot, v); });
|
||||||
} else if (value.isArray()) {
|
} else if (value.isArray()) {
|
||||||
auto valueArray = value.toArray();
|
const QJsonArray valueArray = value.toArray();
|
||||||
switch (valueArray.size()) {
|
if (valueArray.size() > 0) {
|
||||||
case 0:
|
if (valueArray[0].isArray()) {
|
||||||
break;
|
const size_t valueLength = valueArray[0].toArray().size();
|
||||||
|
std::vector<float> vs;
|
||||||
case 1: {
|
vs.reserve(valueLength * valueArray.size());
|
||||||
float v = valueArray[0].toDouble();
|
size_t count = 0;
|
||||||
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform1f(slot, v); });
|
for (const QJsonValue& value : valueArray) {
|
||||||
break;
|
if (value.isArray()) {
|
||||||
}
|
const QJsonArray innerValueArray = value.toArray();
|
||||||
|
if (innerValueArray.size() == valueLength) {
|
||||||
case 2: {
|
if (valueLength == 3 || valueLength == 4 || valueLength == 9 || valueLength == 16) {
|
||||||
glm::vec2 v{ valueArray[0].toDouble(), valueArray[1].toDouble() };
|
for (size_t i = 0; i < valueLength; i++) {
|
||||||
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform2f(slot, v.x, v.y); });
|
vs.push_back(innerValueArray[i].toDouble());
|
||||||
break;
|
}
|
||||||
}
|
count++;
|
||||||
|
}
|
||||||
case 3: {
|
}
|
||||||
glm::vec3 v{
|
}
|
||||||
valueArray[0].toDouble(),
|
}
|
||||||
valueArray[1].toDouble(),
|
if (count > 0) {
|
||||||
valueArray[2].toDouble(),
|
switch (valueLength) {
|
||||||
};
|
case 3: {
|
||||||
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform3f(slot, v.x, v.y, v.z); });
|
_uniforms.push_back([slot, vs, count](gpu::Batch& batch) { batch._glUniform3fv(slot, count, vs.data()); });
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 4: {
|
||||||
default:
|
_uniforms.push_back([slot, vs, count](gpu::Batch& batch) { batch._glUniform4fv(slot, count, vs.data()); });
|
||||||
case 4: {
|
break;
|
||||||
glm::vec4 v{
|
}
|
||||||
valueArray[0].toDouble(),
|
case 9: {
|
||||||
valueArray[1].toDouble(),
|
_uniforms.push_back([slot, vs, count](gpu::Batch& batch) { batch._glUniformMatrix3fv(slot, count, false, vs.data()); });
|
||||||
valueArray[2].toDouble(),
|
break;
|
||||||
valueArray[3].toDouble(),
|
}
|
||||||
};
|
case 16: {
|
||||||
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform4f(slot, v.x, v.y, v.z, v.w); });
|
_uniforms.push_back([slot, vs, count](gpu::Batch& batch) { batch._glUniformMatrix4fv(slot, count, false, vs.data()); });
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (valueArray[0].isDouble()) {
|
||||||
|
switch (valueArray.size()) {
|
||||||
|
case 1: {
|
||||||
|
const float v = valueArray[0].toDouble();
|
||||||
|
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform(slot, v); });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 2: {
|
||||||
|
const glm::vec2 v{ valueArray[0].toDouble(), valueArray[1].toDouble() };
|
||||||
|
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform(slot, v); });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 3: {
|
||||||
|
const glm::vec3 v{
|
||||||
|
valueArray[0].toDouble(),
|
||||||
|
valueArray[1].toDouble(),
|
||||||
|
valueArray[2].toDouble(),
|
||||||
|
};
|
||||||
|
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform(slot, v); });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 4: {
|
||||||
|
const glm::vec4 v{
|
||||||
|
valueArray[0].toDouble(),
|
||||||
|
valueArray[1].toDouble(),
|
||||||
|
valueArray[2].toDouble(),
|
||||||
|
valueArray[3].toDouble(),
|
||||||
|
};
|
||||||
|
_uniforms.push_back([slot, v](gpu::Batch& batch) { batch._glUniform(slot, v); });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 9: {
|
||||||
|
const glm::mat3 m{
|
||||||
|
valueArray[0].toDouble(),
|
||||||
|
valueArray[1].toDouble(),
|
||||||
|
valueArray[2].toDouble(),
|
||||||
|
valueArray[3].toDouble(),
|
||||||
|
valueArray[4].toDouble(),
|
||||||
|
valueArray[5].toDouble(),
|
||||||
|
valueArray[6].toDouble(),
|
||||||
|
valueArray[7].toDouble(),
|
||||||
|
valueArray[8].toDouble(),
|
||||||
|
};
|
||||||
|
_uniforms.push_back([slot, m](gpu::Batch& batch) { batch._glUniform(slot, m); });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case 16: {
|
||||||
|
const glm::mat4 m{
|
||||||
|
valueArray[0].toDouble(),
|
||||||
|
valueArray[1].toDouble(),
|
||||||
|
valueArray[2].toDouble(),
|
||||||
|
valueArray[3].toDouble(),
|
||||||
|
valueArray[4].toDouble(),
|
||||||
|
valueArray[5].toDouble(),
|
||||||
|
valueArray[6].toDouble(),
|
||||||
|
valueArray[7].toDouble(),
|
||||||
|
valueArray[8].toDouble(),
|
||||||
|
valueArray[9].toDouble(),
|
||||||
|
valueArray[10].toDouble(),
|
||||||
|
valueArray[11].toDouble(),
|
||||||
|
valueArray[12].toDouble(),
|
||||||
|
valueArray[13].toDouble(),
|
||||||
|
valueArray[14].toDouble(),
|
||||||
|
valueArray[15].toDouble(),
|
||||||
|
};
|
||||||
|
_uniforms.push_back([slot, m](gpu::Batch& batch) { batch._glUniform(slot, m); });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slot++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_uniforms.push_back([this](gpu::Batch& batch) {
|
_uniforms.push_back([this](gpu::Batch& batch) {
|
||||||
|
@ -578,4 +694,4 @@ void graphics::ProceduralMaterial::initializeProcedural() {
|
||||||
_procedural._transparentFragmentSource = gpu::Shader::getFragmentShaderSource(shader::render_utils::fragment::simple_procedural_translucent);
|
_procedural._transparentFragmentSource = gpu::Shader::getFragmentShaderSource(shader::render_utils::fragment::simple_procedural_translucent);
|
||||||
|
|
||||||
_procedural._errorFallbackFragmentPath = ":" + QUrl("qrc:///shaders/errorShader.frag").path();
|
_procedural._errorFallbackFragmentPath = ":" + QUrl("qrc:///shaders/errorShader.frag").path();
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,6 +190,7 @@ protected:
|
||||||
NetworkTexturePointer _channels[MAX_PROCEDURAL_TEXTURE_CHANNELS];
|
NetworkTexturePointer _channels[MAX_PROCEDURAL_TEXTURE_CHANNELS];
|
||||||
std::unordered_map<std::string, std::string> _vertexReplacements;
|
std::unordered_map<std::string, std::string> _vertexReplacements;
|
||||||
std::unordered_map<std::string, std::string> _fragmentReplacements;
|
std::unordered_map<std::string, std::string> _fragmentReplacements;
|
||||||
|
std::unordered_map<std::string, size_t> _slotMap;
|
||||||
|
|
||||||
std::unordered_map<ProceduralProgramKey, gpu::PipelinePointer> _proceduralPipelines;
|
std::unordered_map<ProceduralProgramKey, gpu::PipelinePointer> _proceduralPipelines;
|
||||||
std::unordered_map<ProceduralProgramKey, gpu::PipelinePointer> _errorPipelines;
|
std::unordered_map<ProceduralProgramKey, gpu::PipelinePointer> _errorPipelines;
|
||||||
|
|
Loading…
Reference in a new issue