diff --git a/src/gallium/frontends/lavapipe/lvp_descriptor_set.c b/src/gallium/frontends/lavapipe/lvp_descriptor_set.c index 4482f25c196..b70d0a1c940 100644 --- a/src/gallium/frontends/lavapipe/lvp_descriptor_set.c +++ b/src/gallium/frontends/lavapipe/lvp_descriptor_set.c @@ -98,7 +98,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDescriptorSetLayout( else set_layout->size += binding->descriptorCount; - for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) { + for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < LVP_SHADER_STAGES; stage++) { set_layout->binding[b].stage[stage].const_buffer_index = -1; set_layout->binding[b].stage[stage].shader_buffer_index = -1; set_layout->binding[b].stage[stage].sampler_index = -1; @@ -237,7 +237,7 @@ lvp_pipeline_layout_create(struct lvp_device *device, const struct lvp_descriptor_set_layout *set_layout = vk_to_lvp_descriptor_set_layout(layout->vk.set_layouts[set]); - for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + for (unsigned i = 0; i < LVP_SHADER_STAGES; i++) { layout->stage[i].uniform_block_size += set_layout->stage[i].uniform_block_size; for (unsigned j = 0; j < set_layout->stage[i].uniform_block_count; j++) { assert(layout->stage[i].uniform_block_count + j < MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS); @@ -291,7 +291,7 @@ lvp_pipeline_layout_create(struct lvp_device *device, const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i; layout->push_constant_size = MAX2(layout->push_constant_size, range->offset + range->size); - layout->push_constant_stages |= (range->stageFlags & BITFIELD_MASK(MESA_SHADER_STAGES)); + layout->push_constant_stages |= (range->stageFlags & BITFIELD_MASK(LVP_SHADER_STAGES)); } layout->push_constant_size = align(layout->push_constant_size, 16); return layout; @@ -318,7 +318,7 @@ lvp_descriptor_set_create(struct lvp_device *device, struct lvp_descriptor_set *set; size_t base_size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]); size_t size = base_size; - for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) + for (unsigned i = 0; i < LVP_SHADER_STAGES; i++) size += layout->stage[i].uniform_block_size; set = vk_alloc(&device->vk.alloc /* XXX: Use the pool */, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); diff --git a/src/gallium/frontends/lavapipe/lvp_execute.c b/src/gallium/frontends/lavapipe/lvp_execute.c index 9781b72622d..a382a8098cb 100644 --- a/src/gallium/frontends/lavapipe/lvp_execute.c +++ b/src/gallium/frontends/lavapipe/lvp_execute.c @@ -85,10 +85,10 @@ struct rendering_state { bool blend_color_dirty; bool ve_dirty; bool vb_dirty; - bool constbuf_dirty[MESA_SHADER_STAGES]; - bool pcbuf_dirty[MESA_SHADER_STAGES]; - bool has_pcbuf[MESA_SHADER_STAGES]; - bool inlines_dirty[MESA_SHADER_STAGES]; + bool constbuf_dirty[LVP_SHADER_STAGES]; + bool pcbuf_dirty[LVP_SHADER_STAGES]; + bool has_pcbuf[LVP_SHADER_STAGES]; + bool inlines_dirty[LVP_SHADER_STAGES]; bool vp_dirty; bool scissor_dirty; bool ib_dirty; @@ -129,29 +129,29 @@ struct rendering_state { ubyte index_size; unsigned index_offset; struct pipe_resource *index_buffer; - struct pipe_constant_buffer const_buffer[MESA_SHADER_STAGES][16]; - int num_const_bufs[MESA_SHADER_STAGES]; + struct pipe_constant_buffer const_buffer[LVP_SHADER_STAGES][16]; + int num_const_bufs[LVP_SHADER_STAGES]; int num_vb; unsigned start_vb; struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS]; struct cso_velems_state velem; - struct lvp_access_info access[MESA_SHADER_STAGES]; - struct pipe_sampler_view *sv[MESA_SHADER_STAGES][PIPE_MAX_SHADER_SAMPLER_VIEWS]; - int num_sampler_views[MESA_SHADER_STAGES]; - struct pipe_sampler_state ss[MESA_SHADER_STAGES][PIPE_MAX_SAMPLERS]; + struct lvp_access_info access[LVP_SHADER_STAGES]; + struct pipe_sampler_view *sv[LVP_SHADER_STAGES][PIPE_MAX_SHADER_SAMPLER_VIEWS]; + int num_sampler_views[LVP_SHADER_STAGES]; + struct pipe_sampler_state ss[LVP_SHADER_STAGES][PIPE_MAX_SAMPLERS]; /* cso_context api is stupid */ - const struct pipe_sampler_state *cso_ss_ptr[MESA_SHADER_STAGES][PIPE_MAX_SAMPLERS]; - int num_sampler_states[MESA_SHADER_STAGES]; - bool sv_dirty[MESA_SHADER_STAGES]; - bool ss_dirty[MESA_SHADER_STAGES]; + const struct pipe_sampler_state *cso_ss_ptr[LVP_SHADER_STAGES][PIPE_MAX_SAMPLERS]; + int num_sampler_states[LVP_SHADER_STAGES]; + bool sv_dirty[LVP_SHADER_STAGES]; + bool ss_dirty[LVP_SHADER_STAGES]; - struct pipe_image_view iv[MESA_SHADER_STAGES][PIPE_MAX_SHADER_IMAGES]; - int num_shader_images[MESA_SHADER_STAGES]; - struct pipe_shader_buffer sb[MESA_SHADER_STAGES][PIPE_MAX_SHADER_BUFFERS]; - int num_shader_buffers[MESA_SHADER_STAGES]; - bool iv_dirty[MESA_SHADER_STAGES]; - bool sb_dirty[MESA_SHADER_STAGES]; + struct pipe_image_view iv[LVP_SHADER_STAGES][PIPE_MAX_SHADER_IMAGES]; + int num_shader_images[LVP_SHADER_STAGES]; + struct pipe_shader_buffer sb[LVP_SHADER_STAGES][PIPE_MAX_SHADER_BUFFERS]; + int num_shader_buffers[LVP_SHADER_STAGES]; + bool iv_dirty[LVP_SHADER_STAGES]; + bool sb_dirty[LVP_SHADER_STAGES]; bool disable_multisample; enum gs_output gs_output_lines : 2; @@ -167,7 +167,7 @@ struct rendering_state { void *block[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS]; uint16_t size[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS]; uint16_t count; - } uniform_blocks[MESA_SHADER_STAGES]; + } uniform_blocks[LVP_SHADER_STAGES]; VkRect2D render_area; bool suspending; @@ -194,7 +194,7 @@ struct rendering_state { struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS]; uint32_t so_offsets[PIPE_MAX_SO_BUFFERS]; - struct lvp_shader *shaders[MESA_SHADER_STAGES]; + struct lvp_shader *shaders[LVP_SHADER_STAGES]; bool tess_ccw; void *tess_states[2]; @@ -524,8 +524,8 @@ static void emit_state(struct rendering_state *state) state->ve_dirty = false; } - bool constbuf_dirty[MESA_SHADER_STAGES] = {false}; - bool pcbuf_dirty[MESA_SHADER_STAGES] = {false}; + bool constbuf_dirty[LVP_SHADER_STAGES] = {false}; + bool pcbuf_dirty[LVP_SHADER_STAGES] = {false}; for (unsigned sh = 0; sh < MESA_SHADER_COMPUTE; sh++) { constbuf_dirty[sh] = state->constbuf_dirty[sh]; @@ -1165,7 +1165,7 @@ struct dyn_info { uint16_t sampler_view_count; uint16_t image_count; uint16_t uniform_block_count; - } stage[MESA_SHADER_STAGES]; + } stage[LVP_SHADER_STAGES]; uint32_t dyn_index; const uint32_t *dynamic_offsets; @@ -1350,7 +1350,7 @@ static void increment_dyn_info(struct dyn_info *dyn_info, const struct lvp_descriptor_set_layout *layout = vk_to_lvp_descriptor_set_layout(vk_layout); - for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) { + for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < LVP_SHADER_STAGES; stage++) { dyn_info->stage[stage].const_buffer_count += layout->stage[stage].const_buffer_count; dyn_info->stage[stage].shader_buffer_count += layout->stage[stage].shader_buffer_count; dyn_info->stage[stage].sampler_count += layout->stage[stage].sampler_count; @@ -4567,7 +4567,7 @@ VkResult lvp_execute_cmds(struct lvp_device *device, state->rs_state.scissor = true; state->rs_state.no_ms_sample_mask_out = true; - for (enum pipe_shader_type s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) { + for (enum pipe_shader_type s = MESA_SHADER_VERTEX; s < LVP_SHADER_STAGES; s++) { for (unsigned i = 0; i < ARRAY_SIZE(state->cso_ss_ptr[s]); i++) state->cso_ss_ptr[s][i] = &state->ss[s][i]; } diff --git a/src/gallium/frontends/lavapipe/lvp_pipeline.c b/src/gallium/frontends/lavapipe/lvp_pipeline.c index 00c64a28625..249103affd3 100644 --- a/src/gallium/frontends/lavapipe/lvp_pipeline.c +++ b/src/gallium/frontends/lavapipe/lvp_pipeline.c @@ -74,7 +74,7 @@ shader_destroy(struct lvp_device *device, struct lvp_shader *shader) void lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline) { - for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) + for (unsigned i = 0; i < LVP_SHADER_STAGES; i++) shader_destroy(device, &pipeline->shaders[i]); if (pipeline->layout) diff --git a/src/gallium/frontends/lavapipe/lvp_private.h b/src/gallium/frontends/lavapipe/lvp_private.h index 613e78a47b1..d59c077d33d 100644 --- a/src/gallium/frontends/lavapipe/lvp_private.h +++ b/src/gallium/frontends/lavapipe/lvp_private.h @@ -113,7 +113,8 @@ void __lvp_finishme(const char *file, int line, const char *format, ...) return; \ } while (0) -#define LVP_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1) +#define LVP_SHADER_STAGES MESA_SHADER_STAGES +#define LVP_STAGE_MASK ((1 << LVP_SHADER_STAGES) - 1) #define lvp_foreach_stage(stage, stage_bits) \ for (gl_shader_stage stage, \ @@ -126,7 +127,7 @@ struct lvp_physical_device { struct pipe_loader_device *pld; struct pipe_screen *pscreen; - const nir_shader_compiler_options *drv_options[MESA_SHADER_STAGES]; + const nir_shader_compiler_options *drv_options[LVP_SHADER_STAGES]; uint32_t max_images; struct vk_sync_timeline_type sync_timeline_type; @@ -268,7 +269,7 @@ struct lvp_descriptor_set_binding_layout { int16_t image_index; int16_t uniform_block_index; int16_t uniform_block_offset; - } stage[MESA_SHADER_STAGES]; + } stage[LVP_SHADER_STAGES]; /* Immutable samplers (or NULL if no immutable samplers) */ struct pipe_sampler_state **immutable_samplers; @@ -299,7 +300,7 @@ struct lvp_descriptor_set_layout { uint16_t uniform_block_count; uint16_t uniform_block_size; uint16_t uniform_block_sizes[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS]; //zero-indexed - } stage[MESA_SHADER_STAGES]; + } stage[LVP_SHADER_STAGES]; /* Number of dynamic offsets used by this descriptor set */ uint16_t dynamic_offset_count; @@ -396,7 +397,7 @@ struct lvp_pipeline_layout { uint16_t uniform_block_size; uint16_t uniform_block_count; uint16_t uniform_block_sizes[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS]; - } stage[MESA_SHADER_STAGES]; + } stage[LVP_SHADER_STAGES]; }; @@ -465,7 +466,7 @@ struct lvp_pipeline { void *state_data; bool is_compute_pipeline; bool force_min_sample; - struct lvp_shader shaders[MESA_SHADER_STAGES]; + struct lvp_shader shaders[LVP_SHADER_STAGES]; gl_shader_stage last_vertex; struct vk_graphics_pipeline_state graphics_state; VkGraphicsPipelineLibraryFlagsEXT stages;