About Social Code
aboutsummaryrefslogtreecommitdiff
path: root/src/frygon/vulkan/fgvk_shader.c
blob: 7cc871e0cbec85d74a511e344392c79e4ddb2b21 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
/*
 * Copyright © 2025 Lucas Francisco Fryzek
 * SPDX-License-Identifier: MIT
 */
#include "fgvk_shader.h"

#include "fgvk_physical_device.h"
#include "fgvk_device.h"
#include "fgvk_cmd_buffer.h"

static const nir_shader_compiler_options *
fgvk_get_nir_options(struct vk_physical_device *vk_pdev,
                     mesa_shader_stage stage,
                     UNUSED const struct vk_pipeline_robustness_state *rs)
{
   const struct fgvk_physical_device *pdev =
      container_of(vk_pdev, struct fgvk_physical_device, vk);
   return fgcc_nir_options(pdev->fgcc);
}

static struct spirv_to_nir_options
fgvk_get_spirv_options(struct vk_physical_device *vk_pdev,
                       UNUSED mesa_shader_stage stage,
                       const struct vk_pipeline_robustness_state *rs)
{
   const struct fgvk_physical_device *pdev =
      container_of(vk_pdev, struct fgvk_physical_device, vk);

   return (struct spirv_to_nir_options) {};
}

static void
fgvk_preprocess_nir(struct vk_physical_device *vk_pdev,
                    nir_shader *nir,
                    UNUSED const struct vk_pipeline_robustness_state *rs)
{
   const struct fgvk_physical_device *pdev =
      container_of(vk_pdev, struct fgvk_physical_device, vk);

   fgcc_preprocess_nir(nir, pdev->fgcc);
}

static void
fgvk_hash_state(struct vk_physical_device *device,
                const struct vk_graphics_pipeline_state *state,
                const struct vk_features *enabled_features,
                VkShaderStageFlags stages,
                blake3_hash blake3_out)
{
   // TODO need to figure out what state should get hashed into shaders
   struct mesa_blake3 blake3_ctx;
   _mesa_blake3_init(&blake3_ctx);
   _mesa_blake3_final(&blake3_ctx, blake3_out);
}

static const struct vk_shader_ops fgvk_shader_ops;

static void
fgvk_shader_destroy(struct vk_device *vk_dev,
                    struct vk_shader *vk_shader,
                    const VkAllocationCallbacks* pAllocator)
{
   struct fgvk_device *dev = container_of(vk_dev, struct fgvk_device, vk);
   struct fgvk_shader *shader = container_of(vk_shader, struct fgvk_shader, vk);

   vk_shader_free(&dev->vk, pAllocator, &shader->vk);
}

static VkResult
fgvk_compile_shader(struct fgvk_device *dev,
                    struct vk_shader_compile_info *info,
                    const struct vk_graphics_pipeline_state *state,
                    const VkAllocationCallbacks* pAllocator,
                    struct vk_shader **shader_out)
{
   struct fgvk_shader *shader;
   VkResult result = VK_SUCCESS;

   /* We consume the NIR, regardless of success or failure */
   nir_shader *nir = info->nir;

   shader = vk_shader_zalloc(&dev->vk, &fgvk_shader_ops, info->stage,
                             pAllocator, sizeof(*shader));

   if (shader == NULL) {
      ralloc_free(nir);
      return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
   }

   // TODO some shader compliation should happen here

   ralloc_free(nir);
   if (result != VK_SUCCESS) {
      fgvk_shader_destroy(&dev->vk, &shader->vk, pAllocator);
      return result;
   }

   *shader_out = &shader->vk;

   return VK_SUCCESS;
}

static VkResult
fgvk_compile_shaders(struct vk_device *vk_dev,
                     uint32_t shader_count,
                     struct vk_shader_compile_info *infos,
                     const struct vk_graphics_pipeline_state *state,
                     const struct vk_features *enabled_features,
                     const VkAllocationCallbacks* pAllocator,
                     struct vk_shader **shaders_out)
{
   struct fgvk_device *dev = container_of(vk_dev, struct fgvk_device, vk);

   for (uint32_t i = 0; i < shader_count; i++) {
      VkResult result = fgvk_compile_shader(dev, &infos[i], state,
                                            pAllocator, &shaders_out[i]);
      if (result != VK_SUCCESS) {
         /* Clean up all the shaders before this point */
         for (uint32_t j = 0; j < i; j++)
            fgvk_shader_destroy(&dev->vk, shaders_out[j], pAllocator);

         /* Clean up all the NIR after this point */
         for (uint32_t j = i + 1; j < shader_count; j++)
            ralloc_free(infos[j].nir);

         /* Memset the output array */
         memset(shaders_out, 0, shader_count * sizeof(*shaders_out));

         return result;
      }
   }

   return VK_SUCCESS;
}

static bool
fgvk_shader_serialize(struct vk_device *vk_dev,
                      const struct vk_shader *vk_shader,
                      struct blob *blob)
{
   UNREACHABLE("TODO");
}

static VkResult
fgvk_deserialize_shader(struct vk_device *vk_dev,
                        struct blob_reader *blob,
                        uint32_t binary_version,
                        const VkAllocationCallbacks* pAllocator,
                        struct vk_shader **shader_out)
{
   UNREACHABLE("TODO");
}

static VkResult
fgvk_shader_get_executable_properties(
   UNUSED struct vk_device *device,
   const struct vk_shader *vk_shader,
   uint32_t *executable_count,
   VkPipelineExecutablePropertiesKHR *properties)
{
   UNREACHABLE("TODO");
}

static VkResult
fgvk_shader_get_executable_statistics(
   UNUSED struct vk_device *device,
   const struct vk_shader *vk_shader,
   uint32_t executable_index,
   uint32_t *statistic_count,
   VkPipelineExecutableStatisticKHR *statistics)
{
   UNREACHABLE("TODO");
}

static VkResult
fgvk_shader_get_executable_internal_representations(
   UNUSED struct vk_device *device,
   const struct vk_shader *vk_shader,
   uint32_t executable_index,
   uint32_t *internal_representation_count,
   VkPipelineExecutableInternalRepresentationKHR *internal_representations)
{
   UNREACHABLE("TODO");
}

static const struct vk_shader_ops fgvk_shader_ops = {
   .destroy = fgvk_shader_destroy,
   .serialize = fgvk_shader_serialize,
   .get_executable_properties = fgvk_shader_get_executable_properties,
   .get_executable_statistics = fgvk_shader_get_executable_statistics,
   .get_executable_internal_representations =
      fgvk_shader_get_executable_internal_representations,
};

const struct vk_device_shader_ops fgvk_device_shader_ops = {
   .get_nir_options = fgvk_get_nir_options,
   .get_spirv_options = fgvk_get_spirv_options,
   .preprocess_nir = fgvk_preprocess_nir,
   .hash_state = fgvk_hash_state,
   .compile = fgvk_compile_shaders,
   .deserialize = fgvk_deserialize_shader,
   .cmd_set_dynamic_graphics_state = vk_cmd_set_dynamic_graphics_state,
   .cmd_bind_shaders = fgvk_cmd_bind_shaders,
};