/* * Copyright © 2025 Lucas Francisco Fryzek * SPDX-License-Identifier: MIT */ #include "fgvk_shader.h" #include "fgvk_physical_device.h" #include "fgvk_device.h" #include "fgvk_cmd_buffer.h" static const nir_shader_compiler_options * fgvk_get_nir_options(struct vk_physical_device *vk_pdev, mesa_shader_stage stage, UNUSED const struct vk_pipeline_robustness_state *rs) { const struct fgvk_physical_device *pdev = container_of(vk_pdev, struct fgvk_physical_device, vk); return fgcc_nir_options(pdev->fgcc); } static struct spirv_to_nir_options fgvk_get_spirv_options(struct vk_physical_device *vk_pdev, UNUSED mesa_shader_stage stage, const struct vk_pipeline_robustness_state *rs) { const struct fgvk_physical_device *pdev = container_of(vk_pdev, struct fgvk_physical_device, vk); return (struct spirv_to_nir_options) {}; } static void fgvk_preprocess_nir(struct vk_physical_device *vk_pdev, nir_shader *nir, UNUSED const struct vk_pipeline_robustness_state *rs) { const struct fgvk_physical_device *pdev = container_of(vk_pdev, struct fgvk_physical_device, vk); fgcc_preprocess_nir(nir, pdev->fgcc); } static void fgvk_hash_state(struct vk_physical_device *device, const struct vk_graphics_pipeline_state *state, const struct vk_features *enabled_features, VkShaderStageFlags stages, blake3_hash blake3_out) { // TODO need to figure out what state should get hashed into shaders struct mesa_blake3 blake3_ctx; _mesa_blake3_init(&blake3_ctx); _mesa_blake3_final(&blake3_ctx, blake3_out); } static const struct vk_shader_ops fgvk_shader_ops; static void fgvk_shader_destroy(struct vk_device *vk_dev, struct vk_shader *vk_shader, const VkAllocationCallbacks* pAllocator) { struct fgvk_device *dev = container_of(vk_dev, struct fgvk_device, vk); struct fgvk_shader *shader = container_of(vk_shader, struct fgvk_shader, vk); vk_shader_free(&dev->vk, pAllocator, &shader->vk); } static VkResult fgvk_compile_shader(struct fgvk_device *dev, struct vk_shader_compile_info *info, const struct vk_graphics_pipeline_state *state, const VkAllocationCallbacks* pAllocator, struct vk_shader **shader_out) { struct fgvk_shader *shader; VkResult result = VK_SUCCESS; /* We consume the NIR, regardless of success or failure */ nir_shader *nir = info->nir; shader = vk_shader_zalloc(&dev->vk, &fgvk_shader_ops, info->stage, pAllocator, sizeof(*shader)); if (shader == NULL) { ralloc_free(nir); return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY); } // TODO some shader compliation should happen here ralloc_free(nir); if (result != VK_SUCCESS) { fgvk_shader_destroy(&dev->vk, &shader->vk, pAllocator); return result; } *shader_out = &shader->vk; return VK_SUCCESS; } static VkResult fgvk_compile_shaders(struct vk_device *vk_dev, uint32_t shader_count, struct vk_shader_compile_info *infos, const struct vk_graphics_pipeline_state *state, const struct vk_features *enabled_features, const VkAllocationCallbacks* pAllocator, struct vk_shader **shaders_out) { struct fgvk_device *dev = container_of(vk_dev, struct fgvk_device, vk); for (uint32_t i = 0; i < shader_count; i++) { VkResult result = fgvk_compile_shader(dev, &infos[i], state, pAllocator, &shaders_out[i]); if (result != VK_SUCCESS) { /* Clean up all the shaders before this point */ for (uint32_t j = 0; j < i; j++) fgvk_shader_destroy(&dev->vk, shaders_out[j], pAllocator); /* Clean up all the NIR after this point */ for (uint32_t j = i + 1; j < shader_count; j++) ralloc_free(infos[j].nir); /* Memset the output array */ memset(shaders_out, 0, shader_count * sizeof(*shaders_out)); return result; } } return VK_SUCCESS; } static bool fgvk_shader_serialize(struct vk_device *vk_dev, const struct vk_shader *vk_shader, struct blob *blob) { UNREACHABLE("TODO"); } static VkResult fgvk_deserialize_shader(struct vk_device *vk_dev, struct blob_reader *blob, uint32_t binary_version, const VkAllocationCallbacks* pAllocator, struct vk_shader **shader_out) { UNREACHABLE("TODO"); } static VkResult fgvk_shader_get_executable_properties( UNUSED struct vk_device *device, const struct vk_shader *vk_shader, uint32_t *executable_count, VkPipelineExecutablePropertiesKHR *properties) { UNREACHABLE("TODO"); } static VkResult fgvk_shader_get_executable_statistics( UNUSED struct vk_device *device, const struct vk_shader *vk_shader, uint32_t executable_index, uint32_t *statistic_count, VkPipelineExecutableStatisticKHR *statistics) { UNREACHABLE("TODO"); } static VkResult fgvk_shader_get_executable_internal_representations( UNUSED struct vk_device *device, const struct vk_shader *vk_shader, uint32_t executable_index, uint32_t *internal_representation_count, VkPipelineExecutableInternalRepresentationKHR *internal_representations) { UNREACHABLE("TODO"); } static const struct vk_shader_ops fgvk_shader_ops = { .destroy = fgvk_shader_destroy, .serialize = fgvk_shader_serialize, .get_executable_properties = fgvk_shader_get_executable_properties, .get_executable_statistics = fgvk_shader_get_executable_statistics, .get_executable_internal_representations = fgvk_shader_get_executable_internal_representations, }; const struct vk_device_shader_ops fgvk_device_shader_ops = { .get_nir_options = fgvk_get_nir_options, .get_spirv_options = fgvk_get_spirv_options, .preprocess_nir = fgvk_preprocess_nir, .hash_state = fgvk_hash_state, .compile = fgvk_compile_shaders, .deserialize = fgvk_deserialize_shader, .cmd_set_dynamic_graphics_state = vk_cmd_set_dynamic_graphics_state, .cmd_bind_shaders = fgvk_cmd_bind_shaders, };