mirror of
https://github.com/godotengine/godot.git
synced 2025-10-19 16:03:29 +00:00
Renderer: Move reflect_spirv to RenderingShaderContainer
This change introduces a new protected type, `ReflectedShaderStage` to `RenderingShaderContainer` that derived types use to access SPIR-V and the reflected module, `SpvReflectShaderModule` allowing implementations to use the reflection information to compile their platform-specific module. * Fixes memory leak in `reflect_spirv` that would not deallocate the `SpvReflectShaderModule` if an error occurred. * Removes unnecessary allocation when creating `SpvReflectShaderModule` by passing `NO_COPY` flag to `spvReflectCreateShaderModule2` constructor function. * Replaces `VectorView` with `Span` for consistency * Fixes unnecessary allocations in D3D12 shader container in `_convert_spirv_to_nir` and `_convert_spirv_to_dxil` which implicitly converted the old `VectorView` to a `Vector`
This commit is contained in:
parent
9283328fe7
commit
65e8b0951b
12 changed files with 425 additions and 391 deletions
|
@ -268,7 +268,7 @@ uint32_t RenderingShaderContainerD3D12::_to_bytes_footer_extra_data(uint8_t *p_b
|
||||||
}
|
}
|
||||||
|
|
||||||
#if NIR_ENABLED
|
#if NIR_ENABLED
|
||||||
bool RenderingShaderContainerD3D12::_convert_spirv_to_nir(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv, const nir_shader_compiler_options *p_compiler_options, HashMap<int, nir_shader *> &r_stages_nir_shaders, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed) {
|
bool RenderingShaderContainerD3D12::_convert_spirv_to_nir(Span<ReflectedShaderStage> p_spirv, const nir_shader_compiler_options *p_compiler_options, HashMap<int, nir_shader *> &r_stages_nir_shaders, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed) {
|
||||||
r_stages_processed.clear();
|
r_stages_processed.clear();
|
||||||
|
|
||||||
dxil_spirv_runtime_conf dxil_runtime_conf = {};
|
dxil_spirv_runtime_conf dxil_runtime_conf = {};
|
||||||
|
@ -287,7 +287,7 @@ bool RenderingShaderContainerD3D12::_convert_spirv_to_nir(const Vector<Rendering
|
||||||
dxil_runtime_conf.inferred_read_only_images_as_srvs = false;
|
dxil_runtime_conf.inferred_read_only_images_as_srvs = false;
|
||||||
|
|
||||||
// Translate SPIR-V to NIR.
|
// Translate SPIR-V to NIR.
|
||||||
for (int64_t i = 0; i < p_spirv.size(); i++) {
|
for (uint64_t i = 0; i < p_spirv.size(); i++) {
|
||||||
RenderingDeviceCommons::ShaderStage stage = p_spirv[i].shader_stage;
|
RenderingDeviceCommons::ShaderStage stage = p_spirv[i].shader_stage;
|
||||||
RenderingDeviceCommons::ShaderStage stage_flag = (RenderingDeviceCommons::ShaderStage)(1 << stage);
|
RenderingDeviceCommons::ShaderStage stage_flag = (RenderingDeviceCommons::ShaderStage)(1 << stage);
|
||||||
r_stages.push_back(stage);
|
r_stages.push_back(stage);
|
||||||
|
@ -302,9 +302,10 @@ bool RenderingShaderContainerD3D12::_convert_spirv_to_nir(const Vector<Rendering
|
||||||
MESA_SHADER_COMPUTE, // SHADER_STAGE_COMPUTE
|
MESA_SHADER_COMPUTE, // SHADER_STAGE_COMPUTE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Span<uint32_t> code = p_spirv[i].spirv();
|
||||||
nir_shader *shader = spirv_to_nir(
|
nir_shader *shader = spirv_to_nir(
|
||||||
(const uint32_t *)(p_spirv[i].spirv.ptr()),
|
code.ptr(),
|
||||||
p_spirv[i].spirv.size() / sizeof(uint32_t),
|
code.size(),
|
||||||
nullptr,
|
nullptr,
|
||||||
0,
|
0,
|
||||||
SPIRV_TO_MESA_STAGES[stage],
|
SPIRV_TO_MESA_STAGES[stage],
|
||||||
|
@ -429,7 +430,7 @@ bool RenderingShaderContainerD3D12::_convert_nir_to_dxil(const HashMap<int, nir_
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RenderingShaderContainerD3D12::_convert_spirv_to_dxil(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv, HashMap<RenderingDeviceCommons::ShaderStage, Vector<uint8_t>> &r_dxil_blobs, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed) {
|
bool RenderingShaderContainerD3D12::_convert_spirv_to_dxil(Span<ReflectedShaderStage> p_spirv, HashMap<RenderingDeviceCommons::ShaderStage, Vector<uint8_t>> &r_dxil_blobs, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed) {
|
||||||
r_dxil_blobs.clear();
|
r_dxil_blobs.clear();
|
||||||
|
|
||||||
HashMap<int, nir_shader *> stages_nir_shaders;
|
HashMap<int, nir_shader *> stages_nir_shaders;
|
||||||
|
@ -764,7 +765,7 @@ void RenderingShaderContainerD3D12::_nir_report_bitcode_bit_offset(uint64_t p_bi
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void RenderingShaderContainerD3D12::_set_from_shader_reflection_post(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection) {
|
void RenderingShaderContainerD3D12::_set_from_shader_reflection_post(const RenderingDeviceCommons::ShaderReflection &p_reflection) {
|
||||||
reflection_binding_set_uniforms_data_d3d12.resize(reflection_binding_set_uniforms_data.size());
|
reflection_binding_set_uniforms_data_d3d12.resize(reflection_binding_set_uniforms_data.size());
|
||||||
reflection_specialization_data_d3d12.resize(reflection_specialization_data.size());
|
reflection_specialization_data_d3d12.resize(reflection_specialization_data.size());
|
||||||
|
|
||||||
|
@ -780,7 +781,7 @@ void RenderingShaderContainerD3D12::_set_from_shader_reflection_post(const Strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RenderingShaderContainerD3D12::_set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) {
|
bool RenderingShaderContainerD3D12::_set_code_from_spirv(Span<ReflectedShaderStage> p_spirv) {
|
||||||
#if NIR_ENABLED
|
#if NIR_ENABLED
|
||||||
reflection_data_d3d12.nir_runtime_data_root_param_idx = UINT32_MAX;
|
reflection_data_d3d12.nir_runtime_data_root_param_idx = UINT32_MAX;
|
||||||
|
|
||||||
|
|
|
@ -122,9 +122,9 @@ protected:
|
||||||
uint32_t root_signature_crc = 0;
|
uint32_t root_signature_crc = 0;
|
||||||
|
|
||||||
#if NIR_ENABLED
|
#if NIR_ENABLED
|
||||||
bool _convert_spirv_to_nir(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv, const nir_shader_compiler_options *p_compiler_options, HashMap<int, nir_shader *> &r_stages_nir_shaders, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed);
|
bool _convert_spirv_to_nir(Span<ReflectedShaderStage> p_spirv, const nir_shader_compiler_options *p_compiler_options, HashMap<int, nir_shader *> &r_stages_nir_shaders, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed);
|
||||||
bool _convert_nir_to_dxil(const HashMap<int, nir_shader *> &p_stages_nir_shaders, BitField<RenderingDeviceCommons::ShaderStage> p_stages_processed, HashMap<RenderingDeviceCommons::ShaderStage, Vector<uint8_t>> &r_dxil_blobs);
|
bool _convert_nir_to_dxil(const HashMap<int, nir_shader *> &p_stages_nir_shaders, BitField<RenderingDeviceCommons::ShaderStage> p_stages_processed, HashMap<RenderingDeviceCommons::ShaderStage, Vector<uint8_t>> &r_dxil_blobs);
|
||||||
bool _convert_spirv_to_dxil(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv, HashMap<RenderingDeviceCommons::ShaderStage, Vector<uint8_t>> &r_dxil_blobs, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed);
|
bool _convert_spirv_to_dxil(Span<ReflectedShaderStage> p_spirv, HashMap<RenderingDeviceCommons::ShaderStage, Vector<uint8_t>> &r_dxil_blobs, Vector<RenderingDeviceCommons::ShaderStage> &r_stages, BitField<RenderingDeviceCommons::ShaderStage> &r_stages_processed);
|
||||||
bool _generate_root_signature(BitField<RenderingDeviceCommons::ShaderStage> p_stages_processed);
|
bool _generate_root_signature(BitField<RenderingDeviceCommons::ShaderStage> p_stages_processed);
|
||||||
|
|
||||||
// GodotNirCallbacks.
|
// GodotNirCallbacks.
|
||||||
|
@ -146,8 +146,8 @@ protected:
|
||||||
virtual uint32_t _to_bytes_reflection_binding_uniform_extra_data(uint8_t *p_bytes, uint32_t p_index) const override;
|
virtual uint32_t _to_bytes_reflection_binding_uniform_extra_data(uint8_t *p_bytes, uint32_t p_index) const override;
|
||||||
virtual uint32_t _to_bytes_reflection_specialization_extra_data(uint8_t *p_bytes, uint32_t p_index) const override;
|
virtual uint32_t _to_bytes_reflection_specialization_extra_data(uint8_t *p_bytes, uint32_t p_index) const override;
|
||||||
virtual uint32_t _to_bytes_footer_extra_data(uint8_t *p_bytes) const override;
|
virtual uint32_t _to_bytes_footer_extra_data(uint8_t *p_bytes) const override;
|
||||||
virtual void _set_from_shader_reflection_post(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection) override;
|
virtual void _set_from_shader_reflection_post(const RenderingDeviceCommons::ShaderReflection &p_reflection) override;
|
||||||
virtual bool _set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) override;
|
virtual bool _set_code_from_spirv(Span<ReflectedShaderStage> p_spirv) override;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
struct ShaderReflectionD3D12 {
|
struct ShaderReflectionD3D12 {
|
||||||
|
|
|
@ -292,7 +292,7 @@ protected:
|
||||||
|
|
||||||
virtual uint32_t _format() const override;
|
virtual uint32_t _format() const override;
|
||||||
virtual uint32_t _format_version() const override;
|
virtual uint32_t _format_version() const override;
|
||||||
virtual bool _set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) override;
|
virtual bool _set_code_from_spirv(Span<ReflectedShaderStage> p_spirv) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
class RenderingShaderContainerFormatMetal : public RenderingShaderContainerFormat {
|
class RenderingShaderContainerFormatMetal : public RenderingShaderContainerFormat {
|
||||||
|
|
|
@ -253,7 +253,7 @@ Error RenderingShaderContainerMetal::compile_metal_source(const char *p_source,
|
||||||
#pragma clang diagnostic push
|
#pragma clang diagnostic push
|
||||||
#pragma clang diagnostic ignored "-Wunguarded-availability"
|
#pragma clang diagnostic ignored "-Wunguarded-availability"
|
||||||
|
|
||||||
bool RenderingShaderContainerMetal::_set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) {
|
bool RenderingShaderContainerMetal::_set_code_from_spirv(Span<ReflectedShaderStage> p_spirv) {
|
||||||
using namespace spirv_cross;
|
using namespace spirv_cross;
|
||||||
using spirv_cross::CompilerMSL;
|
using spirv_cross::CompilerMSL;
|
||||||
using spirv_cross::Resource;
|
using spirv_cross::Resource;
|
||||||
|
@ -354,12 +354,11 @@ bool RenderingShaderContainerMetal::_set_code_from_spirv(const Vector<RenderingD
|
||||||
|
|
||||||
for (uint32_t i = 0; i < p_spirv.size(); i++) {
|
for (uint32_t i = 0; i < p_spirv.size(); i++) {
|
||||||
StageData &stage_data = mtl_shaders.write[i];
|
StageData &stage_data = mtl_shaders.write[i];
|
||||||
RD::ShaderStageSPIRVData const &v = p_spirv[i];
|
const ReflectedShaderStage &v = p_spirv[i];
|
||||||
RD::ShaderStage stage = v.shader_stage;
|
RD::ShaderStage stage = v.shader_stage;
|
||||||
char const *stage_name = RD::SHADER_STAGE_NAMES[stage];
|
char const *stage_name = RD::SHADER_STAGE_NAMES[stage];
|
||||||
uint32_t const *const ir = reinterpret_cast<uint32_t const *const>(v.spirv.ptr());
|
Span<uint32_t> spirv = v.spirv();
|
||||||
size_t word_count = v.spirv.size() / sizeof(uint32_t);
|
Parser parser(spirv.ptr(), spirv.size());
|
||||||
Parser parser(ir, word_count);
|
|
||||||
try {
|
try {
|
||||||
parser.parse();
|
parser.parse();
|
||||||
} catch (CompilerError &e) {
|
} catch (CompilerError &e) {
|
||||||
|
|
|
@ -44,21 +44,21 @@ uint32_t RenderingShaderContainerVulkan::_format_version() const {
|
||||||
return FORMAT_VERSION;
|
return FORMAT_VERSION;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RenderingShaderContainerVulkan::_set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) {
|
bool RenderingShaderContainerVulkan::_set_code_from_spirv(Span<ReflectedShaderStage> p_spirv) {
|
||||||
PackedByteArray code_bytes;
|
PackedByteArray code_bytes;
|
||||||
shaders.resize(p_spirv.size());
|
shaders.resize(p_spirv.size());
|
||||||
for (int64_t i = 0; i < p_spirv.size(); i++) {
|
for (uint64_t i = 0; i < p_spirv.size(); i++) {
|
||||||
RenderingShaderContainer::Shader &shader = shaders.ptrw()[i];
|
RenderingShaderContainer::Shader &shader = shaders.ptrw()[i];
|
||||||
|
|
||||||
if (debug_info_enabled) {
|
if (debug_info_enabled) {
|
||||||
// Store SPIR-V as is when debug info is required.
|
// Store SPIR-V as is when debug info is required.
|
||||||
shader.code_compressed_bytes = p_spirv[i].spirv;
|
shader.code_compressed_bytes = p_spirv[i].spirv_data();
|
||||||
shader.code_compression_flags = 0;
|
shader.code_compression_flags = 0;
|
||||||
shader.code_decompressed_size = 0;
|
shader.code_decompressed_size = 0;
|
||||||
} else {
|
} else {
|
||||||
// Encode into smolv.
|
// Encode into smolv.
|
||||||
|
Span<uint8_t> spirv = p_spirv[i].spirv().reinterpret<uint8_t>();
|
||||||
smolv::ByteArray smolv_bytes;
|
smolv::ByteArray smolv_bytes;
|
||||||
bool smolv_encoded = smolv::Encode(p_spirv[i].spirv.ptr(), p_spirv[i].spirv.size(), smolv_bytes, smolv::kEncodeFlagStripDebugInfo);
|
bool smolv_encoded = smolv::Encode(spirv.ptr(), spirv.size(), smolv_bytes, smolv::kEncodeFlagStripDebugInfo);
|
||||||
ERR_FAIL_COND_V_MSG(!smolv_encoded, false, "Failed to compress SPIR-V into smolv.");
|
ERR_FAIL_COND_V_MSG(!smolv_encoded, false, "Failed to compress SPIR-V into smolv.");
|
||||||
|
|
||||||
code_bytes.resize(smolv_bytes.size());
|
code_bytes.resize(smolv_bytes.size());
|
||||||
|
|
|
@ -47,7 +47,7 @@ public:
|
||||||
protected:
|
protected:
|
||||||
virtual uint32_t _format() const override;
|
virtual uint32_t _format() const override;
|
||||||
virtual uint32_t _format_version() const override;
|
virtual uint32_t _format_version() const override;
|
||||||
virtual bool _set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) override;
|
virtual bool _set_code_from_spirv(Span<ReflectedShaderStage> p_spirv) override;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RenderingShaderContainerVulkan(bool p_debug_info_enabled);
|
RenderingShaderContainerVulkan(bool p_debug_info_enabled);
|
||||||
|
|
|
@ -428,15 +428,10 @@ void ShaderBakerExportPlugin::_process_work_item(WorkItem p_work_item) {
|
||||||
Vector<RD::ShaderStageSPIRVData> spirv_data = ShaderRD::compile_stages(p_work_item.stage_sources);
|
Vector<RD::ShaderStageSPIRVData> spirv_data = ShaderRD::compile_stages(p_work_item.stage_sources);
|
||||||
ERR_FAIL_COND_MSG(spirv_data.is_empty(), "Unable to retrieve SPIR-V data for shader");
|
ERR_FAIL_COND_MSG(spirv_data.is_empty(), "Unable to retrieve SPIR-V data for shader");
|
||||||
|
|
||||||
RD::ShaderReflection shader_refl;
|
|
||||||
Error err = RenderingDeviceCommons::reflect_spirv(spirv_data, shader_refl);
|
|
||||||
ERR_FAIL_COND_MSG(err != OK, "Unable to reflect SPIR-V data that was compiled");
|
|
||||||
|
|
||||||
Ref<RenderingShaderContainer> shader_container = shader_container_format->create_container();
|
Ref<RenderingShaderContainer> shader_container = shader_container_format->create_container();
|
||||||
shader_container->set_from_shader_reflection(p_work_item.shader_name, shader_refl);
|
|
||||||
|
|
||||||
// Compile shader binary from SPIR-V.
|
// Compile shader binary from SPIR-V.
|
||||||
bool code_compiled = shader_container->set_code_from_spirv(spirv_data);
|
bool code_compiled = shader_container->set_code_from_spirv(p_work_item.shader_name, spirv_data);
|
||||||
ERR_FAIL_COND_MSG(!code_compiled, vformat("Failed to compile code to native for SPIR-V."));
|
ERR_FAIL_COND_MSG(!code_compiled, vformat("Failed to compile code to native for SPIR-V."));
|
||||||
|
|
||||||
PackedByteArray shader_bytes = shader_container->to_bytes();
|
PackedByteArray shader_bytes = shader_container->to_bytes();
|
||||||
|
|
|
@ -3366,19 +3366,12 @@ String RenderingDevice::_shader_uniform_debug(RID p_shader, int p_set) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Vector<uint8_t> RenderingDevice::shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
|
Vector<uint8_t> RenderingDevice::shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
|
||||||
ShaderReflection shader_refl;
|
|
||||||
if (reflect_spirv(p_spirv, shader_refl) != OK) {
|
|
||||||
return Vector<uint8_t>();
|
|
||||||
}
|
|
||||||
|
|
||||||
const RenderingShaderContainerFormat &container_format = driver->get_shader_container_format();
|
const RenderingShaderContainerFormat &container_format = driver->get_shader_container_format();
|
||||||
Ref<RenderingShaderContainer> shader_container = container_format.create_container();
|
Ref<RenderingShaderContainer> shader_container = container_format.create_container();
|
||||||
ERR_FAIL_COND_V(shader_container.is_null(), Vector<uint8_t>());
|
ERR_FAIL_COND_V(shader_container.is_null(), Vector<uint8_t>());
|
||||||
|
|
||||||
shader_container->set_from_shader_reflection(p_shader_name, shader_refl);
|
|
||||||
|
|
||||||
// Compile shader binary from SPIR-V.
|
// Compile shader binary from SPIR-V.
|
||||||
bool code_compiled = shader_container->set_code_from_spirv(p_spirv);
|
bool code_compiled = shader_container->set_code_from_spirv(p_shader_name, p_spirv);
|
||||||
ERR_FAIL_COND_V_MSG(!code_compiled, Vector<uint8_t>(), vformat("Failed to compile code to native for SPIR-V."));
|
ERR_FAIL_COND_V_MSG(!code_compiled, Vector<uint8_t>(), vformat("Failed to compile code to native for SPIR-V."));
|
||||||
|
|
||||||
return shader_container->to_bytes();
|
return shader_container->to_bytes();
|
||||||
|
|
|
@ -30,8 +30,6 @@
|
||||||
|
|
||||||
#include "rendering_device_commons.h"
|
#include "rendering_device_commons.h"
|
||||||
|
|
||||||
#include "thirdparty/spirv-reflect/spirv_reflect.h"
|
|
||||||
|
|
||||||
/*****************/
|
/*****************/
|
||||||
/**** GENERIC ****/
|
/**** GENERIC ****/
|
||||||
/*****************/
|
/*****************/
|
||||||
|
@ -970,342 +968,3 @@ const char *RenderingDeviceCommons::SHADER_STAGE_NAMES[SHADER_STAGE_MAX] = {
|
||||||
"TesselationEvaluation",
|
"TesselationEvaluation",
|
||||||
"Compute",
|
"Compute",
|
||||||
};
|
};
|
||||||
|
|
||||||
Error RenderingDeviceCommons::reflect_spirv(VectorView<ShaderStageSPIRVData> p_spirv, ShaderReflection &r_reflection) {
|
|
||||||
r_reflection = {};
|
|
||||||
|
|
||||||
const uint32_t spirv_size = p_spirv.size();
|
|
||||||
for (uint32_t i = 0; i < spirv_size; i++) {
|
|
||||||
ShaderStage stage = p_spirv[i].shader_stage;
|
|
||||||
ShaderStage stage_flag = (ShaderStage)(1 << p_spirv[i].shader_stage);
|
|
||||||
|
|
||||||
if (p_spirv[i].shader_stage == SHADER_STAGE_COMPUTE) {
|
|
||||||
r_reflection.is_compute = true;
|
|
||||||
ERR_FAIL_COND_V_MSG(spirv_size != 1, FAILED,
|
|
||||||
"Compute shaders can only receive one stage, dedicated to compute.");
|
|
||||||
}
|
|
||||||
ERR_FAIL_COND_V_MSG(r_reflection.stages_bits.has_flag(stage_flag), FAILED,
|
|
||||||
"Stage " + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + " submitted more than once.");
|
|
||||||
|
|
||||||
{
|
|
||||||
SpvReflectShaderModule module;
|
|
||||||
const uint8_t *spirv = p_spirv[i].spirv.ptr();
|
|
||||||
SpvReflectResult result = spvReflectCreateShaderModule(p_spirv[i].spirv.size(), spirv, &module);
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed parsing shader.");
|
|
||||||
|
|
||||||
if (r_reflection.is_compute) {
|
|
||||||
r_reflection.compute_local_size[0] = module.entry_points->local_size.x;
|
|
||||||
r_reflection.compute_local_size[1] = module.entry_points->local_size.y;
|
|
||||||
r_reflection.compute_local_size[2] = module.entry_points->local_size.z;
|
|
||||||
}
|
|
||||||
uint32_t binding_count = 0;
|
|
||||||
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, nullptr);
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating descriptor bindings.");
|
|
||||||
|
|
||||||
if (binding_count > 0) {
|
|
||||||
// Parse bindings.
|
|
||||||
|
|
||||||
Vector<SpvReflectDescriptorBinding *> bindings;
|
|
||||||
bindings.resize(binding_count);
|
|
||||||
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, bindings.ptrw());
|
|
||||||
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed getting descriptor bindings.");
|
|
||||||
|
|
||||||
for (uint32_t j = 0; j < binding_count; j++) {
|
|
||||||
const SpvReflectDescriptorBinding &binding = *bindings[j];
|
|
||||||
|
|
||||||
ShaderUniform uniform;
|
|
||||||
|
|
||||||
bool need_array_dimensions = false;
|
|
||||||
bool need_block_size = false;
|
|
||||||
bool may_be_writable = false;
|
|
||||||
|
|
||||||
switch (binding.descriptor_type) {
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: {
|
|
||||||
uniform.type = UNIFORM_TYPE_SAMPLER;
|
|
||||||
need_array_dimensions = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
|
|
||||||
uniform.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
|
|
||||||
need_array_dimensions = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
|
|
||||||
uniform.type = UNIFORM_TYPE_TEXTURE;
|
|
||||||
need_array_dimensions = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
|
|
||||||
uniform.type = UNIFORM_TYPE_IMAGE;
|
|
||||||
need_array_dimensions = true;
|
|
||||||
may_be_writable = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
|
|
||||||
uniform.type = UNIFORM_TYPE_TEXTURE_BUFFER;
|
|
||||||
need_array_dimensions = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
|
|
||||||
uniform.type = UNIFORM_TYPE_IMAGE_BUFFER;
|
|
||||||
need_array_dimensions = true;
|
|
||||||
may_be_writable = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
|
|
||||||
uniform.type = UNIFORM_TYPE_UNIFORM_BUFFER;
|
|
||||||
need_block_size = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
|
|
||||||
uniform.type = UNIFORM_TYPE_STORAGE_BUFFER;
|
|
||||||
need_block_size = true;
|
|
||||||
may_be_writable = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
|
|
||||||
ERR_PRINT("Dynamic uniform buffer not supported.");
|
|
||||||
continue;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
|
|
||||||
ERR_PRINT("Dynamic storage buffer not supported.");
|
|
||||||
continue;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
|
|
||||||
uniform.type = UNIFORM_TYPE_INPUT_ATTACHMENT;
|
|
||||||
need_array_dimensions = true;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
|
|
||||||
ERR_PRINT("Acceleration structure not supported.");
|
|
||||||
continue;
|
|
||||||
} break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (need_array_dimensions) {
|
|
||||||
if (binding.array.dims_count == 0) {
|
|
||||||
uniform.length = 1;
|
|
||||||
} else {
|
|
||||||
for (uint32_t k = 0; k < binding.array.dims_count; k++) {
|
|
||||||
if (k == 0) {
|
|
||||||
uniform.length = binding.array.dims[0];
|
|
||||||
} else {
|
|
||||||
uniform.length *= binding.array.dims[k];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if (need_block_size) {
|
|
||||||
uniform.length = binding.block.size;
|
|
||||||
} else {
|
|
||||||
uniform.length = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (may_be_writable) {
|
|
||||||
if (binding.descriptor_type == SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
|
|
||||||
uniform.writable = !(binding.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE);
|
|
||||||
} else {
|
|
||||||
uniform.writable = !(binding.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) && !(binding.block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uniform.writable = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
uniform.binding = binding.binding;
|
|
||||||
uint32_t set = binding.set;
|
|
||||||
|
|
||||||
ERR_FAIL_COND_V_MSG(set >= MAX_UNIFORM_SETS, FAILED,
|
|
||||||
"On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(MAX_UNIFORM_SETS) + ").");
|
|
||||||
|
|
||||||
if (set < (uint32_t)r_reflection.uniform_sets.size()) {
|
|
||||||
// Check if this already exists.
|
|
||||||
bool exists = false;
|
|
||||||
for (int k = 0; k < r_reflection.uniform_sets[set].size(); k++) {
|
|
||||||
if (r_reflection.uniform_sets[set][k].binding == uniform.binding) {
|
|
||||||
// Already exists, verify that it's the same type.
|
|
||||||
ERR_FAIL_COND_V_MSG(r_reflection.uniform_sets[set][k].type != uniform.type, FAILED,
|
|
||||||
"On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform type.");
|
|
||||||
|
|
||||||
// Also, verify that it's the same size.
|
|
||||||
ERR_FAIL_COND_V_MSG(r_reflection.uniform_sets[set][k].length != uniform.length, FAILED,
|
|
||||||
"On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform size.");
|
|
||||||
|
|
||||||
// Also, verify that it has the same writability.
|
|
||||||
ERR_FAIL_COND_V_MSG(r_reflection.uniform_sets[set][k].writable != uniform.writable, FAILED,
|
|
||||||
"On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different writability.");
|
|
||||||
|
|
||||||
// Just append stage mask and return.
|
|
||||||
r_reflection.uniform_sets.write[set].write[k].stages.set_flag(stage_flag);
|
|
||||||
exists = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (exists) {
|
|
||||||
continue; // Merged.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uniform.stages.set_flag(stage_flag);
|
|
||||||
|
|
||||||
if (set >= (uint32_t)r_reflection.uniform_sets.size()) {
|
|
||||||
r_reflection.uniform_sets.resize(set + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
r_reflection.uniform_sets.write[set].push_back(uniform);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
// Specialization constants.
|
|
||||||
|
|
||||||
uint32_t sc_count = 0;
|
|
||||||
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, nullptr);
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating specialization constants.");
|
|
||||||
|
|
||||||
if (sc_count) {
|
|
||||||
Vector<SpvReflectSpecializationConstant *> spec_constants;
|
|
||||||
spec_constants.resize(sc_count);
|
|
||||||
|
|
||||||
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, spec_constants.ptrw());
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining specialization constants.");
|
|
||||||
|
|
||||||
for (uint32_t j = 0; j < sc_count; j++) {
|
|
||||||
int32_t existing = -1;
|
|
||||||
ShaderSpecializationConstant sconst;
|
|
||||||
SpvReflectSpecializationConstant *spc = spec_constants[j];
|
|
||||||
|
|
||||||
sconst.constant_id = spc->constant_id;
|
|
||||||
sconst.int_value = 0; // Clear previous value JIC.
|
|
||||||
switch (spc->constant_type) {
|
|
||||||
case SPV_REFLECT_SPECIALIZATION_CONSTANT_BOOL: {
|
|
||||||
sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
|
|
||||||
sconst.bool_value = spc->default_value.int_bool_value != 0;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_SPECIALIZATION_CONSTANT_INT: {
|
|
||||||
sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
|
|
||||||
sconst.int_value = spc->default_value.int_bool_value;
|
|
||||||
} break;
|
|
||||||
case SPV_REFLECT_SPECIALIZATION_CONSTANT_FLOAT: {
|
|
||||||
sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
|
|
||||||
sconst.float_value = spc->default_value.float_value;
|
|
||||||
} break;
|
|
||||||
}
|
|
||||||
sconst.stages.set_flag(stage_flag);
|
|
||||||
|
|
||||||
for (int k = 0; k < r_reflection.specialization_constants.size(); k++) {
|
|
||||||
if (r_reflection.specialization_constants[k].constant_id == sconst.constant_id) {
|
|
||||||
ERR_FAIL_COND_V_MSG(r_reflection.specialization_constants[k].type != sconst.type, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their types differ.");
|
|
||||||
ERR_FAIL_COND_V_MSG(r_reflection.specialization_constants[k].int_value != sconst.int_value, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their default values differ.");
|
|
||||||
existing = k;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (existing >= 0) {
|
|
||||||
r_reflection.specialization_constants.write[existing].stages.set_flag(stage_flag);
|
|
||||||
} else {
|
|
||||||
r_reflection.specialization_constants.push_back(sconst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r_reflection.specialization_constants.sort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stage == SHADER_STAGE_VERTEX || stage == SHADER_STAGE_FRAGMENT) {
|
|
||||||
uint32_t iv_count = 0;
|
|
||||||
result = spvReflectEnumerateInputVariables(&module, &iv_count, nullptr);
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating input variables.");
|
|
||||||
|
|
||||||
if (iv_count) {
|
|
||||||
Vector<SpvReflectInterfaceVariable *> input_vars;
|
|
||||||
input_vars.resize(iv_count);
|
|
||||||
|
|
||||||
result = spvReflectEnumerateInputVariables(&module, &iv_count, input_vars.ptrw());
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining input variables.");
|
|
||||||
|
|
||||||
for (const SpvReflectInterfaceVariable *v : input_vars) {
|
|
||||||
if (!v) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (stage == SHADER_STAGE_VERTEX) {
|
|
||||||
if (v->decoration_flags == 0) { // Regular input.
|
|
||||||
r_reflection.vertex_input_mask |= (((uint64_t)1) << v->location);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (v->built_in == SpvBuiltInViewIndex) {
|
|
||||||
r_reflection.has_multiview = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stage == SHADER_STAGE_FRAGMENT) {
|
|
||||||
uint32_t ov_count = 0;
|
|
||||||
result = spvReflectEnumerateOutputVariables(&module, &ov_count, nullptr);
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating output variables.");
|
|
||||||
|
|
||||||
if (ov_count) {
|
|
||||||
Vector<SpvReflectInterfaceVariable *> output_vars;
|
|
||||||
output_vars.resize(ov_count);
|
|
||||||
|
|
||||||
result = spvReflectEnumerateOutputVariables(&module, &ov_count, output_vars.ptrw());
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining output variables.");
|
|
||||||
|
|
||||||
for (const SpvReflectInterfaceVariable *refvar : output_vars) {
|
|
||||||
if (!refvar) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (refvar->built_in != SpvBuiltInFragDepth) {
|
|
||||||
r_reflection.fragment_output_mask |= 1 << refvar->location;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t pc_count = 0;
|
|
||||||
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, nullptr);
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating push constants.");
|
|
||||||
|
|
||||||
if (pc_count) {
|
|
||||||
ERR_FAIL_COND_V_MSG(pc_count > 1, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "': Only one push constant is supported, which should be the same across shader stages.");
|
|
||||||
|
|
||||||
Vector<SpvReflectBlockVariable *> pconstants;
|
|
||||||
pconstants.resize(pc_count);
|
|
||||||
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, pconstants.ptrw());
|
|
||||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining push constants.");
|
|
||||||
#if 0
|
|
||||||
if (pconstants[0] == nullptr) {
|
|
||||||
Ref<FileAccess> f = FileAccess::open("res://popo.spv", FileAccess::WRITE);
|
|
||||||
f->store_buffer((const uint8_t *)&SpirV[0], SpirV.size() * sizeof(uint32_t));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ERR_FAIL_COND_V_MSG(r_reflection.push_constant_size && r_reflection.push_constant_size != pconstants[0]->size, FAILED,
|
|
||||||
"Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
|
|
||||||
|
|
||||||
r_reflection.push_constant_size = pconstants[0]->size;
|
|
||||||
r_reflection.push_constant_stages.set_flag(stage_flag);
|
|
||||||
|
|
||||||
//print_line("Stage: " + String(SHADER_STAGE_NAMES[stage]) + " push constant of size=" + itos(push_constant.push_constant_size));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy the reflection data when no longer required.
|
|
||||||
spvReflectDestroyShaderModule(&module);
|
|
||||||
}
|
|
||||||
|
|
||||||
r_reflection.stages_bits.set_flag(stage_flag);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort all uniform_sets by binding.
|
|
||||||
for (uint32_t i = 0; i < r_reflection.uniform_sets.size(); i++) {
|
|
||||||
r_reflection.uniform_sets.write[i].sort();
|
|
||||||
}
|
|
||||||
|
|
||||||
return OK;
|
|
||||||
}
|
|
||||||
|
|
|
@ -1073,6 +1073,4 @@ public:
|
||||||
BitField<ShaderStage> stages_bits = {};
|
BitField<ShaderStage> stages_bits = {};
|
||||||
BitField<ShaderStage> push_constant_stages = {};
|
BitField<ShaderStage> push_constant_stages = {};
|
||||||
};
|
};
|
||||||
|
|
||||||
static Error reflect_spirv(VectorView<ShaderStageSPIRVData> p_spirv, ShaderReflection &r_reflection);
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -32,6 +32,8 @@
|
||||||
|
|
||||||
#include "core/io/compression.h"
|
#include "core/io/compression.h"
|
||||||
|
|
||||||
|
#include "thirdparty/spirv-reflect/spirv_reflect.h"
|
||||||
|
|
||||||
static inline uint32_t aligned_to(uint32_t p_size, uint32_t p_alignment) {
|
static inline uint32_t aligned_to(uint32_t p_size, uint32_t p_alignment) {
|
||||||
if (p_size % p_alignment) {
|
if (p_size % p_alignment) {
|
||||||
return p_size + (p_alignment - (p_size % p_alignment));
|
return p_size + (p_alignment - (p_size % p_alignment));
|
||||||
|
@ -40,6 +42,23 @@ static inline uint32_t aligned_to(uint32_t p_size, uint32_t p_alignment) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
RenderingShaderContainer::ReflectedShaderStage::ReflectedShaderStage() :
|
||||||
|
_module(memnew(SpvReflectShaderModule)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
RenderingShaderContainer::ReflectedShaderStage::~ReflectedShaderStage() {
|
||||||
|
spvReflectDestroyShaderModule(_module);
|
||||||
|
memdelete(_module);
|
||||||
|
}
|
||||||
|
|
||||||
|
const SpvReflectShaderModule &RenderingShaderContainer::ReflectedShaderStage::module() const {
|
||||||
|
return *_module;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Span<uint32_t> RenderingShaderContainer::ReflectedShaderStage::spirv() const {
|
||||||
|
return _spirv_data.span().reinterpret<uint32_t>();
|
||||||
|
}
|
||||||
|
|
||||||
uint32_t RenderingShaderContainer::_from_bytes_header_extra_data(const uint8_t *p_bytes) {
|
uint32_t RenderingShaderContainer::_from_bytes_header_extra_data(const uint8_t *p_bytes) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -100,18 +119,366 @@ uint32_t RenderingShaderContainer::_to_bytes_footer_extra_data(uint8_t *) const
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RenderingShaderContainer::_set_from_shader_reflection_post(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection) {
|
void RenderingShaderContainer::_set_from_shader_reflection_post(const RenderingDeviceCommons::ShaderReflection &p_reflection) {
|
||||||
// Do nothing.
|
// Do nothing.
|
||||||
}
|
}
|
||||||
|
|
||||||
void RenderingShaderContainer::set_from_shader_reflection(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection) {
|
Error RenderingShaderContainer::reflect_spirv(const String &p_shader_name, Span<RenderingDeviceCommons::ShaderStageSPIRVData> p_spirv, LocalVector<ReflectedShaderStage> &r_refl) {
|
||||||
|
using RDC = RenderingDeviceCommons;
|
||||||
|
RDC::ShaderReflection reflection;
|
||||||
|
|
||||||
|
const uint32_t spirv_size = p_spirv.size() + 0;
|
||||||
|
r_refl.resize(spirv_size);
|
||||||
|
|
||||||
|
for (uint32_t i = 0; i < spirv_size; i++) {
|
||||||
|
RDC::ShaderStage stage = p_spirv[i].shader_stage;
|
||||||
|
RDC::ShaderStage stage_flag = (RDC::ShaderStage)(1 << p_spirv[i].shader_stage);
|
||||||
|
r_refl[i].shader_stage = p_spirv[i].shader_stage;
|
||||||
|
r_refl[i]._spirv_data = p_spirv[i].spirv;
|
||||||
|
|
||||||
|
if (p_spirv[i].shader_stage == RDC::SHADER_STAGE_COMPUTE) {
|
||||||
|
reflection.is_compute = true;
|
||||||
|
ERR_FAIL_COND_V_MSG(spirv_size != 1, FAILED,
|
||||||
|
"Compute shaders can only receive one stage, dedicated to compute.");
|
||||||
|
}
|
||||||
|
ERR_FAIL_COND_V_MSG(reflection.stages_bits.has_flag(stage_flag), FAILED,
|
||||||
|
"Stage " + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + " submitted more than once.");
|
||||||
|
|
||||||
|
{
|
||||||
|
SpvReflectShaderModule &module = *r_refl.ptr()[i]._module;
|
||||||
|
const uint8_t *spirv = p_spirv[i].spirv.ptr();
|
||||||
|
SpvReflectResult result = spvReflectCreateShaderModule2(SPV_REFLECT_MODULE_FLAG_NO_COPY, p_spirv[i].spirv.size(), spirv, &module);
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed parsing shader.");
|
||||||
|
|
||||||
|
for (uint32_t j = 0; j < module.capability_count; j++) {
|
||||||
|
if (module.capabilities[j].value == SpvCapabilityMultiView) {
|
||||||
|
reflection.has_multiview = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reflection.is_compute) {
|
||||||
|
reflection.compute_local_size[0] = module.entry_points->local_size.x;
|
||||||
|
reflection.compute_local_size[1] = module.entry_points->local_size.y;
|
||||||
|
reflection.compute_local_size[2] = module.entry_points->local_size.z;
|
||||||
|
}
|
||||||
|
uint32_t binding_count = 0;
|
||||||
|
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, nullptr);
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating descriptor bindings.");
|
||||||
|
|
||||||
|
if (binding_count > 0) {
|
||||||
|
// Parse bindings.
|
||||||
|
|
||||||
|
Vector<SpvReflectDescriptorBinding *> bindings;
|
||||||
|
bindings.resize(binding_count);
|
||||||
|
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, bindings.ptrw());
|
||||||
|
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed getting descriptor bindings.");
|
||||||
|
|
||||||
|
for (uint32_t j = 0; j < binding_count; j++) {
|
||||||
|
const SpvReflectDescriptorBinding &binding = *bindings[j];
|
||||||
|
|
||||||
|
RDC::ShaderUniform uniform;
|
||||||
|
|
||||||
|
bool need_array_dimensions = false;
|
||||||
|
bool need_block_size = false;
|
||||||
|
bool may_be_writable = false;
|
||||||
|
|
||||||
|
switch (binding.descriptor_type) {
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_SAMPLER;
|
||||||
|
need_array_dimensions = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
|
||||||
|
need_array_dimensions = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_TEXTURE;
|
||||||
|
need_array_dimensions = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_IMAGE;
|
||||||
|
need_array_dimensions = true;
|
||||||
|
may_be_writable = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_TEXTURE_BUFFER;
|
||||||
|
need_array_dimensions = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_IMAGE_BUFFER;
|
||||||
|
need_array_dimensions = true;
|
||||||
|
may_be_writable = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_UNIFORM_BUFFER;
|
||||||
|
need_block_size = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_STORAGE_BUFFER;
|
||||||
|
need_block_size = true;
|
||||||
|
may_be_writable = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
|
||||||
|
ERR_PRINT("Dynamic uniform buffer not supported.");
|
||||||
|
continue;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
|
||||||
|
ERR_PRINT("Dynamic storage buffer not supported.");
|
||||||
|
continue;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
|
||||||
|
uniform.type = RDC::UNIFORM_TYPE_INPUT_ATTACHMENT;
|
||||||
|
need_array_dimensions = true;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
|
||||||
|
ERR_PRINT("Acceleration structure not supported.");
|
||||||
|
continue;
|
||||||
|
} break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (need_array_dimensions) {
|
||||||
|
if (binding.array.dims_count == 0) {
|
||||||
|
uniform.length = 1;
|
||||||
|
} else {
|
||||||
|
for (uint32_t k = 0; k < binding.array.dims_count; k++) {
|
||||||
|
if (k == 0) {
|
||||||
|
uniform.length = binding.array.dims[0];
|
||||||
|
} else {
|
||||||
|
uniform.length *= binding.array.dims[k];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if (need_block_size) {
|
||||||
|
uniform.length = binding.block.size;
|
||||||
|
} else {
|
||||||
|
uniform.length = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (may_be_writable) {
|
||||||
|
if (binding.descriptor_type == SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
|
||||||
|
uniform.writable = !(binding.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE);
|
||||||
|
} else {
|
||||||
|
uniform.writable = !(binding.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) && !(binding.block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uniform.writable = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uniform.binding = binding.binding;
|
||||||
|
uint32_t set = binding.set;
|
||||||
|
|
||||||
|
ERR_FAIL_COND_V_MSG(set >= RDC::MAX_UNIFORM_SETS, FAILED,
|
||||||
|
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(RDC::MAX_UNIFORM_SETS) + ").");
|
||||||
|
|
||||||
|
if (set < (uint32_t)reflection.uniform_sets.size()) {
|
||||||
|
// Check if this already exists.
|
||||||
|
bool exists = false;
|
||||||
|
for (int k = 0; k < reflection.uniform_sets[set].size(); k++) {
|
||||||
|
if (reflection.uniform_sets[set][k].binding == uniform.binding) {
|
||||||
|
// Already exists, verify that it's the same type.
|
||||||
|
ERR_FAIL_COND_V_MSG(reflection.uniform_sets[set][k].type != uniform.type, FAILED,
|
||||||
|
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform type.");
|
||||||
|
|
||||||
|
// Also, verify that it's the same size.
|
||||||
|
ERR_FAIL_COND_V_MSG(reflection.uniform_sets[set][k].length != uniform.length, FAILED,
|
||||||
|
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform size.");
|
||||||
|
|
||||||
|
// Also, verify that it has the same writability.
|
||||||
|
ERR_FAIL_COND_V_MSG(reflection.uniform_sets[set][k].writable != uniform.writable, FAILED,
|
||||||
|
"On shader stage '" + String(RDC::SHADER_STAGE_NAMES[stage]) + "', uniform '" + binding.name + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different writability.");
|
||||||
|
|
||||||
|
// Just append stage mask and return.
|
||||||
|
reflection.uniform_sets.write[set].write[k].stages.set_flag(stage_flag);
|
||||||
|
exists = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exists) {
|
||||||
|
continue; // Merged.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uniform.stages.set_flag(stage_flag);
|
||||||
|
|
||||||
|
if (set >= (uint32_t)reflection.uniform_sets.size()) {
|
||||||
|
reflection.uniform_sets.resize(set + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
reflection.uniform_sets.write[set].push_back(uniform);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Specialization constants.
|
||||||
|
|
||||||
|
uint32_t sc_count = 0;
|
||||||
|
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, nullptr);
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating specialization constants.");
|
||||||
|
|
||||||
|
if (sc_count) {
|
||||||
|
Vector<SpvReflectSpecializationConstant *> spec_constants;
|
||||||
|
spec_constants.resize(sc_count);
|
||||||
|
|
||||||
|
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, spec_constants.ptrw());
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining specialization constants.");
|
||||||
|
|
||||||
|
for (uint32_t j = 0; j < sc_count; j++) {
|
||||||
|
int32_t existing = -1;
|
||||||
|
RDC::ShaderSpecializationConstant sconst;
|
||||||
|
SpvReflectSpecializationConstant *spc = spec_constants[j];
|
||||||
|
|
||||||
|
sconst.constant_id = spc->constant_id;
|
||||||
|
sconst.int_value = 0; // Clear previous value JIC.
|
||||||
|
switch (spc->constant_type) {
|
||||||
|
case SPV_REFLECT_SPECIALIZATION_CONSTANT_BOOL: {
|
||||||
|
sconst.type = RDC::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
|
||||||
|
sconst.bool_value = spc->default_value.int_bool_value != 0;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_SPECIALIZATION_CONSTANT_INT: {
|
||||||
|
sconst.type = RDC::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
|
||||||
|
sconst.int_value = spc->default_value.int_bool_value;
|
||||||
|
} break;
|
||||||
|
case SPV_REFLECT_SPECIALIZATION_CONSTANT_FLOAT: {
|
||||||
|
sconst.type = RDC::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
|
||||||
|
sconst.float_value = spc->default_value.float_value;
|
||||||
|
} break;
|
||||||
|
}
|
||||||
|
sconst.stages.set_flag(stage_flag);
|
||||||
|
|
||||||
|
for (int k = 0; k < reflection.specialization_constants.size(); k++) {
|
||||||
|
if (reflection.specialization_constants[k].constant_id == sconst.constant_id) {
|
||||||
|
ERR_FAIL_COND_V_MSG(reflection.specialization_constants[k].type != sconst.type, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their types differ.");
|
||||||
|
ERR_FAIL_COND_V_MSG(reflection.specialization_constants[k].int_value != sconst.int_value, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their default values differ.");
|
||||||
|
existing = k;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (existing >= 0) {
|
||||||
|
reflection.specialization_constants.write[existing].stages.set_flag(stage_flag);
|
||||||
|
} else {
|
||||||
|
reflection.specialization_constants.push_back(sconst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
reflection.specialization_constants.sort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stage == RDC::SHADER_STAGE_VERTEX || stage == RDC::SHADER_STAGE_FRAGMENT) {
|
||||||
|
uint32_t iv_count = 0;
|
||||||
|
result = spvReflectEnumerateInputVariables(&module, &iv_count, nullptr);
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating input variables.");
|
||||||
|
|
||||||
|
if (iv_count) {
|
||||||
|
Vector<SpvReflectInterfaceVariable *> input_vars;
|
||||||
|
input_vars.resize(iv_count);
|
||||||
|
|
||||||
|
result = spvReflectEnumerateInputVariables(&module, &iv_count, input_vars.ptrw());
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining input variables.");
|
||||||
|
|
||||||
|
for (const SpvReflectInterfaceVariable *v : input_vars) {
|
||||||
|
if (!v) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (stage == RDC::SHADER_STAGE_VERTEX) {
|
||||||
|
if (v->decoration_flags == 0) { // Regular input.
|
||||||
|
reflection.vertex_input_mask |= (((uint64_t)1) << v->location);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (v->built_in == SpvBuiltInViewIndex) {
|
||||||
|
reflection.has_multiview = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stage == RDC::SHADER_STAGE_FRAGMENT) {
|
||||||
|
uint32_t ov_count = 0;
|
||||||
|
result = spvReflectEnumerateOutputVariables(&module, &ov_count, nullptr);
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating output variables.");
|
||||||
|
|
||||||
|
if (ov_count) {
|
||||||
|
Vector<SpvReflectInterfaceVariable *> output_vars;
|
||||||
|
output_vars.resize(ov_count);
|
||||||
|
|
||||||
|
result = spvReflectEnumerateOutputVariables(&module, &ov_count, output_vars.ptrw());
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining output variables.");
|
||||||
|
|
||||||
|
for (const SpvReflectInterfaceVariable *refvar : output_vars) {
|
||||||
|
if (!refvar) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (refvar->built_in != SpvBuiltInFragDepth) {
|
||||||
|
reflection.fragment_output_mask |= 1 << refvar->location;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t pc_count = 0;
|
||||||
|
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, nullptr);
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed enumerating push constants.");
|
||||||
|
|
||||||
|
if (pc_count) {
|
||||||
|
ERR_FAIL_COND_V_MSG(pc_count > 1, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "': Only one push constant is supported, which should be the same across shader stages.");
|
||||||
|
|
||||||
|
Vector<SpvReflectBlockVariable *> pconstants;
|
||||||
|
pconstants.resize(pc_count);
|
||||||
|
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, pconstants.ptrw());
|
||||||
|
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "' failed obtaining push constants.");
|
||||||
|
#if 0
|
||||||
|
if (pconstants[0] == nullptr) {
|
||||||
|
Ref<FileAccess> f = FileAccess::open("res://popo.spv", FileAccess::WRITE);
|
||||||
|
f->store_buffer((const uint8_t *)&SpirV[0], SpirV.size() * sizeof(uint32_t));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ERR_FAIL_COND_V_MSG(reflection.push_constant_size && reflection.push_constant_size != pconstants[0]->size, FAILED,
|
||||||
|
"Reflection of SPIR-V shader stage '" + String(RDC::SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
|
||||||
|
|
||||||
|
reflection.push_constant_size = pconstants[0]->size;
|
||||||
|
reflection.push_constant_stages.set_flag(stage_flag);
|
||||||
|
|
||||||
|
//print_line("Stage: " + String(RDC::SHADER_STAGE_NAMES[stage]) + " push constant of size=" + itos(push_constant.push_constant_size));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
reflection.stages_bits.set_flag(stage_flag);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort all uniform_sets by binding.
|
||||||
|
for (uint32_t i = 0; i < reflection.uniform_sets.size(); i++) {
|
||||||
|
reflection.uniform_sets.write[i].sort();
|
||||||
|
}
|
||||||
|
|
||||||
|
set_from_shader_reflection(reflection);
|
||||||
|
|
||||||
|
return OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RenderingShaderContainer::set_from_shader_reflection(const RenderingDeviceCommons::ShaderReflection &p_reflection) {
|
||||||
reflection_binding_set_uniforms_count.clear();
|
reflection_binding_set_uniforms_count.clear();
|
||||||
reflection_binding_set_uniforms_data.clear();
|
reflection_binding_set_uniforms_data.clear();
|
||||||
reflection_specialization_data.clear();
|
reflection_specialization_data.clear();
|
||||||
reflection_shader_stages.clear();
|
reflection_shader_stages.clear();
|
||||||
|
|
||||||
shader_name = p_shader_name.utf8();
|
|
||||||
|
|
||||||
reflection_data.vertex_input_mask = p_reflection.vertex_input_mask;
|
reflection_data.vertex_input_mask = p_reflection.vertex_input_mask;
|
||||||
reflection_data.fragment_output_mask = p_reflection.fragment_output_mask;
|
reflection_data.fragment_output_mask = p_reflection.fragment_output_mask;
|
||||||
reflection_data.specialization_constants_count = p_reflection.specialization_constants.size();
|
reflection_data.specialization_constants_count = p_reflection.specialization_constants.size();
|
||||||
|
@ -156,11 +523,13 @@ void RenderingShaderContainer::set_from_shader_reflection(const String &p_shader
|
||||||
|
|
||||||
reflection_data.stage_count = reflection_shader_stages.size();
|
reflection_data.stage_count = reflection_shader_stages.size();
|
||||||
|
|
||||||
_set_from_shader_reflection_post(p_shader_name, p_reflection);
|
_set_from_shader_reflection_post(p_reflection);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RenderingShaderContainer::set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) {
|
bool RenderingShaderContainer::set_code_from_spirv(const String &p_shader_name, Span<RenderingDeviceCommons::ShaderStageSPIRVData> p_spirv) {
|
||||||
return _set_code_from_spirv(p_spirv);
|
LocalVector<ReflectedShaderStage> spirv;
|
||||||
|
ERR_FAIL_COND_V(reflect_spirv(p_shader_name, p_spirv, spirv) != OK, false);
|
||||||
|
return _set_code_from_spirv(spirv.span());
|
||||||
}
|
}
|
||||||
|
|
||||||
RenderingDeviceCommons::ShaderReflection RenderingShaderContainer::get_shader_reflection() const {
|
RenderingDeviceCommons::ShaderReflection RenderingShaderContainer::get_shader_reflection() const {
|
||||||
|
|
|
@ -33,6 +33,8 @@
|
||||||
#include "core/object/ref_counted.h"
|
#include "core/object/ref_counted.h"
|
||||||
#include "servers/rendering/rendering_device_commons.h"
|
#include "servers/rendering/rendering_device_commons.h"
|
||||||
|
|
||||||
|
struct SpvReflectShaderModule;
|
||||||
|
|
||||||
class RenderingShaderContainer : public RefCounted {
|
class RenderingShaderContainer : public RefCounted {
|
||||||
GDSOFTCLASS(RenderingShaderContainer, RefCounted);
|
GDSOFTCLASS(RenderingShaderContainer, RefCounted);
|
||||||
|
|
||||||
|
@ -118,10 +120,29 @@ protected:
|
||||||
virtual uint32_t _to_bytes_footer_extra_data(uint8_t *p_bytes) const;
|
virtual uint32_t _to_bytes_footer_extra_data(uint8_t *p_bytes) const;
|
||||||
|
|
||||||
// This method will be called when set_from_shader_reflection() is finished. Used to update internal structures to match the reflection if necessary.
|
// This method will be called when set_from_shader_reflection() is finished. Used to update internal structures to match the reflection if necessary.
|
||||||
virtual void _set_from_shader_reflection_post(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection);
|
virtual void _set_from_shader_reflection_post(const RenderingDeviceCommons::ShaderReflection &p_reflection);
|
||||||
|
|
||||||
|
class ReflectedShaderStage {
|
||||||
|
friend class RenderingShaderContainer;
|
||||||
|
|
||||||
|
Vector<uint8_t> _spirv_data;
|
||||||
|
SpvReflectShaderModule *_module = nullptr;
|
||||||
|
|
||||||
|
public:
|
||||||
|
RenderingDeviceCommons::ShaderStage shader_stage = RenderingDeviceCommons::SHADER_STAGE_MAX;
|
||||||
|
const SpvReflectShaderModule &module() const;
|
||||||
|
const Span<uint32_t> spirv() const;
|
||||||
|
const Vector<uint8_t> spirv_data() const { return _spirv_data; }
|
||||||
|
|
||||||
|
ReflectedShaderStage();
|
||||||
|
~ReflectedShaderStage();
|
||||||
|
};
|
||||||
|
|
||||||
// This method will be called when set_code_from_spirv() is called.
|
// This method will be called when set_code_from_spirv() is called.
|
||||||
virtual bool _set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv) = 0;
|
virtual bool _set_code_from_spirv(Span<ReflectedShaderStage> p_spirv) = 0;
|
||||||
|
|
||||||
|
void set_from_shader_reflection(const RenderingDeviceCommons::ShaderReflection &p_reflection);
|
||||||
|
Error reflect_spirv(const String &p_shader_name, Span<RenderingDeviceCommons::ShaderStageSPIRVData> p_spirv, LocalVector<ReflectedShaderStage> &r_refl);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum CompressionFlags {
|
enum CompressionFlags {
|
||||||
|
@ -138,8 +159,7 @@ public:
|
||||||
CharString shader_name;
|
CharString shader_name;
|
||||||
Vector<Shader> shaders;
|
Vector<Shader> shaders;
|
||||||
|
|
||||||
void set_from_shader_reflection(const String &p_shader_name, const RenderingDeviceCommons::ShaderReflection &p_reflection);
|
bool set_code_from_spirv(const String &p_shader_name, Span<RenderingDeviceCommons::ShaderStageSPIRVData> p_spirv);
|
||||||
bool set_code_from_spirv(const Vector<RenderingDeviceCommons::ShaderStageSPIRVData> &p_spirv);
|
|
||||||
RenderingDeviceCommons::ShaderReflection get_shader_reflection() const;
|
RenderingDeviceCommons::ShaderReflection get_shader_reflection() const;
|
||||||
bool from_bytes(const PackedByteArray &p_bytes);
|
bool from_bytes(const PackedByteArray &p_bytes);
|
||||||
PackedByteArray to_bytes() const;
|
PackedByteArray to_bytes() const;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue