From 99a00e224757664412b338bebb846e9024f038c7 Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Wed, 31 May 2023 21:08:47 -0400 Subject: [PATCH] treewide: Use nir_trim_vector more Via Coccinelle patches @@ expression a, b, c; @@ -nir_channels(b, a, (1 << c) - 1) +nir_trim_vector(b, a, c) @@ expression a, b, c; @@ -nir_channels(b, a, BITFIELD_MASK(c)) +nir_trim_vector(b, a, c) @@ expression a, b; @@ -nir_channels(b, a, 3) +nir_trim_vector(b, a, 2) @@ expression a, b; @@ -nir_channels(b, a, 7) +nir_trim_vector(b, a, 3) Plus a fixup for pointless trimming an immediate in RADV and radeonsi. Signed-off-by: Alyssa Rosenzweig Reviewed-by: Rhys Perry Part-of: --- src/amd/vulkan/meta/radv_meta.c | 2 +- src/amd/vulkan/meta/radv_meta_blit2d.c | 6 +++--- src/amd/vulkan/meta/radv_meta_clear.c | 2 +- src/amd/vulkan/meta/radv_meta_fmask_copy.c | 2 +- src/amd/vulkan/meta/radv_meta_resolve_cs.c | 4 +++- src/amd/vulkan/meta/radv_meta_resolve_fs.c | 10 ++++++---- src/amd/vulkan/radv_device_generated_commands.c | 6 ++++-- src/amd/vulkan/radv_rt_common.c | 3 ++- src/amd/vulkan/radv_rt_shader.c | 2 +- src/asahi/lib/agx_meta.c | 2 +- .../compiler/v3d_nir_lower_image_load_store.c | 6 +++--- src/compiler/nir/nir_deref.c | 2 +- src/compiler/nir/nir_lower_bitmap.c | 2 +- src/compiler/nir/nir_lower_drawpixels.c | 5 ++--- src/compiler/nir/nir_lower_io.c | 8 ++++---- src/compiler/nir/nir_lower_tex.c | 8 ++++---- src/compiler/nir/nir_lower_vec3_to_vec4.c | 2 +- src/compiler/nir/nir_lower_viewport_transform.c | 2 +- src/compiler/nir/nir_split_64bit_vec3_and_vec4.c | 2 +- src/compiler/spirv/vtn_amd.c | 2 +- .../ir3/ir3_nir_lower_load_barycentric_at_offset.c | 3 ++- src/freedreno/vulkan/tu_shader.cc | 2 +- src/gallium/auxiliary/nir/nir_draw_helpers.c | 2 +- src/gallium/auxiliary/nir/tgsi_to_nir.c | 2 +- src/gallium/drivers/d3d12/d3d12_blit.cpp | 2 +- .../drivers/d3d12/d3d12_lower_image_casts.c | 6 ++++-- .../drivers/r600/sfn/sfn_nir_lower_64bit.cpp | 14 +++++++------- src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp | 7 ++++--- src/gallium/drivers/radeonsi/si_shaderlib_nir.c | 8 ++++---- src/gallium/drivers/zink/zink_compiler.c | 5 +++-- .../drivers/zink/zink_lower_cubemap_to_array.c | 9 +++++---- src/intel/blorp/blorp_blit.c | 8 ++++---- src/intel/blorp/blorp_nir_builder.h | 2 +- src/intel/compiler/brw_nir_lower_storage_image.c | 6 +++--- src/intel/compiler/brw_nir_rt_builder.h | 4 ++-- src/intel/vulkan/anv_nir_apply_pipeline_layout.c | 6 +++--- .../vulkan_hasvk/anv_nir_apply_pipeline_layout.c | 2 +- src/mesa/main/ffvertex_prog.c | 5 ++--- src/mesa/state_tracker/st_atifs_to_nir.c | 4 ++-- src/mesa/state_tracker/st_pbo_compute.c | 11 +++++++---- .../compiler/dxil_nir_lower_int_cubemaps.c | 8 ++++---- .../compiler/dxil_nir_lower_int_samplers.c | 4 ++-- src/microsoft/spirv_to_dxil/dxil_spirv_nir.c | 5 +++-- src/microsoft/vulkan/dzn_nir.c | 4 ++-- src/panfrost/util/pan_lower_framebuffer.c | 4 ++-- src/panfrost/vulkan/panvk_vX_meta_copy.c | 2 +- 46 files changed, 114 insertions(+), 99 deletions(-) diff --git a/src/amd/vulkan/meta/radv_meta.c b/src/amd/vulkan/meta/radv_meta.c index edcfe001499..777aab7d558 100644 --- a/src/amd/vulkan/meta/radv_meta.c +++ b/src/amd/vulkan/meta/radv_meta.c @@ -678,7 +678,7 @@ radv_meta_load_descriptor(nir_builder *b, unsigned desc_set, unsigned binding) { nir_ssa_def *rsrc = nir_vulkan_resource_index(b, 3, 32, nir_imm_int(b, 0), .desc_set = desc_set, .binding = binding); - return nir_channels(b, rsrc, 0x3); + return nir_trim_vector(b, rsrc, 2); } nir_ssa_def * diff --git a/src/amd/vulkan/meta/radv_meta_blit2d.c b/src/amd/vulkan/meta/radv_meta_blit2d.c index 75d1b7de8c7..ea1a92f92d2 100644 --- a/src/amd/vulkan/meta/radv_meta_blit2d.c +++ b/src/amd/vulkan/meta/radv_meta_blit2d.c @@ -505,7 +505,7 @@ build_nir_copy_fragment_shader(struct radv_device *device, texel_fetch_build_fun color_out->data.location = FRAG_RESULT_DATA0; nir_ssa_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in)); - nir_ssa_def *tex_pos = nir_channels(&b, pos_int, 0x3); + nir_ssa_def *tex_pos = nir_trim_vector(&b, pos_int, 2); nir_ssa_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled); nir_store_var(&b, color_out, color, 0xf); @@ -530,7 +530,7 @@ build_nir_copy_fragment_shader_depth(struct radv_device *device, texel_fetch_bui color_out->data.location = FRAG_RESULT_DEPTH; nir_ssa_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in)); - nir_ssa_def *tex_pos = nir_channels(&b, pos_int, 0x3); + nir_ssa_def *tex_pos = nir_trim_vector(&b, pos_int, 2); nir_ssa_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled); nir_store_var(&b, color_out, color, 0x1); @@ -555,7 +555,7 @@ build_nir_copy_fragment_shader_stencil(struct radv_device *device, texel_fetch_b color_out->data.location = FRAG_RESULT_STENCIL; nir_ssa_def *pos_int = nir_f2i32(&b, nir_load_var(&b, tex_pos_in)); - nir_ssa_def *tex_pos = nir_channels(&b, pos_int, 0x3); + nir_ssa_def *tex_pos = nir_trim_vector(&b, pos_int, 2); nir_ssa_def *color = txf_func(&b, device, tex_pos, is_3d, is_multisampled); nir_store_var(&b, color_out, color, 0x1); diff --git a/src/amd/vulkan/meta/radv_meta_clear.c b/src/amd/vulkan/meta/radv_meta_clear.c index 9fe4bbc8d6f..8f34b8a2086 100644 --- a/src/amd/vulkan/meta/radv_meta_clear.c +++ b/src/amd/vulkan/meta/radv_meta_clear.c @@ -983,7 +983,7 @@ build_clear_dcc_comp_to_single_shader(struct radv_device *dev, bool is_msaa) nir_ssa_def *dcc_block_size = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8); /* Compute the coordinates. */ - nir_ssa_def *coord = nir_channels(&b, global_id, 0x3); + nir_ssa_def *coord = nir_trim_vector(&b, global_id, 2); coord = nir_imul(&b, coord, dcc_block_size); coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1), diff --git a/src/amd/vulkan/meta/radv_meta_fmask_copy.c b/src/amd/vulkan/meta/radv_meta_fmask_copy.c index 25782bd29ba..3c6e652ae72 100644 --- a/src/amd/vulkan/meta/radv_meta_fmask_copy.c +++ b/src/amd/vulkan/meta/radv_meta_fmask_copy.c @@ -52,7 +52,7 @@ build_fmask_copy_compute_shader(struct radv_device *dev, int samples) nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id); /* Get coordinates. */ - nir_ssa_def *src_coord = nir_channels(&b, global_id, 0x3); + nir_ssa_def *src_coord = nir_trim_vector(&b, global_id, 2); nir_ssa_def *dst_coord = nir_vec4(&b, nir_channel(&b, src_coord, 0), nir_channel(&b, src_coord, 1), nir_ssa_undef(&b, 1, 32), diff --git a/src/amd/vulkan/meta/radv_meta_resolve_cs.c b/src/amd/vulkan/meta/radv_meta_resolve_cs.c index 86658700697..ab1377e2093 100644 --- a/src/amd/vulkan/meta/radv_meta_resolve_cs.c +++ b/src/amd/vulkan/meta/radv_meta_resolve_cs.c @@ -139,7 +139,9 @@ build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples, nir_ssa_def *offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8); - nir_ssa_def *resolve_coord = nir_iadd(&b, nir_channels(&b, global_id, 0x3), offset); + nir_ssa_def *resolve_coord = nir_iadd(&b, + nir_trim_vector(&b, global_id, 2), + offset); nir_ssa_def *img_coord = nir_vec3(&b, nir_channel(&b, resolve_coord, 0), nir_channel(&b, resolve_coord, 1), diff --git a/src/amd/vulkan/meta/radv_meta_resolve_fs.c b/src/amd/vulkan/meta/radv_meta_resolve_fs.c index ef5aabdb353..8e0d52bfe3f 100644 --- a/src/amd/vulkan/meta/radv_meta_resolve_fs.c +++ b/src/amd/vulkan/meta/radv_meta_resolve_fs.c @@ -48,12 +48,14 @@ build_resolve_fragment_shader(struct radv_device *dev, bool is_integer, int samp nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_color"); color_out->data.location = FRAG_RESULT_DATA0; - nir_ssa_def *pos_in = nir_channels(&b, nir_load_frag_coord(&b), 0x3); + nir_ssa_def *pos_in = nir_trim_vector(&b, nir_load_frag_coord(&b), 2); nir_ssa_def *src_offset = nir_load_push_constant(&b, 2, 32, nir_imm_int(&b, 0), .range = 8); nir_ssa_def *pos_int = nir_f2i32(&b, pos_in); - nir_ssa_def *img_coord = nir_channels(&b, nir_iadd(&b, pos_int, src_offset), 0x3); + nir_ssa_def *img_coord = nir_trim_vector(&b, + nir_iadd(&b, pos_int, src_offset), + 2); nir_variable *color = nir_local_variable_create(b.impl, glsl_vec4_type(), "color"); radv_meta_build_resolve_shader_core(dev, &b, is_integer, samples, input_img, color, img_coord); @@ -267,11 +269,11 @@ build_depth_stencil_resolve_fragment_shader(struct radv_device *dev, int samples nir_variable *fs_out = nir_variable_create(b.shader, nir_var_shader_out, vec4, "f_out"); fs_out->data.location = index == DEPTH_RESOLVE ? FRAG_RESULT_DEPTH : FRAG_RESULT_STENCIL; - nir_ssa_def *pos_in = nir_channels(&b, nir_load_frag_coord(&b), 0x3); + nir_ssa_def *pos_in = nir_trim_vector(&b, nir_load_frag_coord(&b), 2); nir_ssa_def *pos_int = nir_f2i32(&b, pos_in); - nir_ssa_def *img_coord = nir_channels(&b, pos_int, 0x3); + nir_ssa_def *img_coord = nir_trim_vector(&b, pos_int, 2); nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img); nir_ssa_def *outval = nir_txf_ms_deref(&b, input_img_deref, img_coord, nir_imm_int(&b, 0)); diff --git a/src/amd/vulkan/radv_device_generated_commands.c b/src/amd/vulkan/radv_device_generated_commands.c index 937437dbfc5..2d58554979b 100644 --- a/src/amd/vulkan/radv_device_generated_commands.c +++ b/src/amd/vulkan/radv_device_generated_commands.c @@ -430,7 +430,8 @@ build_dgc_prepare_shader(struct radv_device *dev) nir_ssa_def *stream_data = nir_load_ssbo(&b, 4, 32, stream_buf, stream_offset); - nir_ssa_def *va = nir_pack_64_2x32(&b, nir_channels(&b, stream_data, 0x3)); + nir_ssa_def *va = nir_pack_64_2x32(&b, + nir_trim_vector(&b, stream_data, 2)); nir_ssa_def *size = nir_channel(&b, stream_data, 2); nir_ssa_def *stride = nir_channel(&b, stream_data, 3); @@ -529,7 +530,8 @@ build_dgc_prepare_shader(struct radv_device *dev) */ nir_ssa_def *num_records = nir_channel(&b, nir_load_var(&b, vbo_data), 2); nir_ssa_def *buf_va = nir_iand_imm( - &b, nir_pack_64_2x32(&b, nir_channels(&b, nir_load_var(&b, vbo_data), 0x3)), + &b, + nir_pack_64_2x32(&b, nir_trim_vector(&b, nir_load_var(&b, vbo_data), 2)), (1ull << 48) - 1ull); nir_push_if(&b, nir_ior(&b, nir_ieq_imm(&b, num_records, 0), nir_ieq_imm(&b, buf_va, 0))); diff --git a/src/amd/vulkan/radv_rt_common.c b/src/amd/vulkan/radv_rt_common.c index 478a0773627..adaa727af02 100644 --- a/src/amd/vulkan/radv_rt_common.c +++ b/src/amd/vulkan/radv_rt_common.c @@ -682,7 +682,8 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, nir_store_deref(b, args->vars.top_stack, nir_load_deref(b, args->vars.stack), 1); nir_store_deref(b, args->vars.bvh_base, - nir_pack_64_2x32(b, nir_channels(b, instance_data, 0x3)), 1); + nir_pack_64_2x32(b, nir_trim_vector(b, instance_data, 2)), + 1); /* Push the instance root node onto the stack */ nir_store_deref(b, args->vars.current_node, nir_imm_int(b, RADV_BVH_ROOT_NODE), 0x1); diff --git a/src/amd/vulkan/radv_rt_shader.c b/src/amd/vulkan/radv_rt_shader.c index b10ee3375f7..23abd745a39 100644 --- a/src/amd/vulkan/radv_rt_shader.c +++ b/src/amd/vulkan/radv_rt_shader.c @@ -1470,7 +1470,7 @@ build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_stage nir_store_var(&b, trav_vars.bvh_base, root_bvh_base, 1); - nir_ssa_def *vec3ones = nir_channels(&b, nir_imm_vec4(&b, 1.0, 1.0, 1.0, 1.0), 0x7); + nir_ssa_def *vec3ones = nir_imm_vec3(&b, 1.0, 1.0, 1.0); nir_store_var(&b, trav_vars.origin, nir_load_var(&b, vars.origin), 7); nir_store_var(&b, trav_vars.dir, nir_load_var(&b, vars.direction), 7); diff --git a/src/asahi/lib/agx_meta.c b/src/asahi/lib/agx_meta.c index f83c5609980..cd7d37e945e 100644 --- a/src/asahi/lib/agx_meta.c +++ b/src/asahi/lib/agx_meta.c @@ -37,7 +37,7 @@ build_background_op(nir_builder *b, enum agx_meta_op op, unsigned rt, { if (op == AGX_META_OP_LOAD) { nir_ssa_def *fragcoord = nir_load_frag_coord(b); - nir_ssa_def *coord = nir_channels(b, fragcoord, 0x3); + nir_ssa_def *coord = nir_trim_vector(b, fragcoord, 2); nir_tex_instr *tex = nir_tex_instr_create(b->shader, msaa ? 2 : 1); /* The type doesn't matter as long as it matches the store */ diff --git a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c index d080b0b3f7c..2900a29817f 100644 --- a/src/broadcom/compiler/v3d_nir_lower_image_load_store.c +++ b/src/broadcom/compiler/v3d_nir_lower_image_load_store.c @@ -103,9 +103,9 @@ v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr) b->cursor = nir_before_instr(&instr->instr); - nir_ssa_def *color = nir_channels(b, - nir_ssa_for_src(b, instr->src[3], 4), - (1 << num_components) - 1); + nir_ssa_def *color = nir_trim_vector(b, + nir_ssa_for_src(b, instr->src[3], 4), + num_components); nir_ssa_def *formatted = NULL; if (format == PIPE_FORMAT_R11G11B10_FLOAT) { diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c index 27b8cab4bbb..f22a268b648 100644 --- a/src/compiler/nir/nir_deref.c +++ b/src/compiler/nir/nir_deref.c @@ -1379,7 +1379,7 @@ opt_store_vec_deref(nir_builder *b, nir_intrinsic_instr *store) nir_src_for_ssa(&parent->dest.ssa)); /* Restrict things down as needed so the bitcast doesn't fail */ - data = nir_channels(b, data, (1 << util_last_bit(write_mask)) - 1); + data = nir_trim_vector(b, data, util_last_bit(write_mask)); if (old_bit_size != new_bit_size) data = nir_bitcast_vector(b, data, new_bit_size); data = resize_vector(b, data, new_num_comps); diff --git a/src/compiler/nir/nir_lower_bitmap.c b/src/compiler/nir/nir_lower_bitmap.c index 3033ac1c0ab..81d764a73d3 100644 --- a/src/compiler/nir/nir_lower_bitmap.c +++ b/src/compiler/nir/nir_lower_bitmap.c @@ -84,7 +84,7 @@ lower_bitmap(nir_shader *shader, nir_builder *b, tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, &tex_deref->dest.ssa); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, - nir_channels(b, texcoord, (1 << tex->coord_components) - 1)); + nir_trim_vector(b, texcoord, tex->coord_components)); nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32); nir_builder_instr_insert(b, &tex->instr); diff --git a/src/compiler/nir/nir_lower_drawpixels.c b/src/compiler/nir/nir_lower_drawpixels.c index 9c88d69bacd..796d7586b3f 100644 --- a/src/compiler/nir/nir_lower_drawpixels.c +++ b/src/compiler/nir/nir_lower_drawpixels.c @@ -118,8 +118,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr * &tex_deref->dest.ssa); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, - nir_channels(b, texcoord, - (1 << tex->coord_components) - 1)); + nir_trim_vector(b, texcoord, tex->coord_components)); nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32); nir_builder_instr_insert(b, &tex->instr); @@ -159,7 +158,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr * tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, &pixelmap_deref->dest.ssa); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, - nir_channels(b, def, 0x3)); + nir_trim_vector(b, def, 2)); nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32); nir_builder_instr_insert(b, &tex->instr); diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c index 4e8e49ba695..ad83e9e0138 100644 --- a/src/compiler/nir/nir_lower_io.c +++ b/src/compiler/nir/nir_lower_io.c @@ -1039,7 +1039,7 @@ addr_to_index(nir_builder *b, nir_ssa_def *addr, return nir_unpack_64_2x32_split_y(b, addr); case nir_address_format_vec2_index_32bit_offset: assert(addr->num_components == 3); - return nir_channels(b, addr, 0x3); + return nir_trim_vector(b, addr, 2); default: unreachable("Invalid address format"); } } @@ -1111,7 +1111,7 @@ addr_to_global(nir_builder *b, nir_ssa_def *addr, case nir_address_format_64bit_global_32bit_offset: case nir_address_format_64bit_bounded_global: assert(addr->num_components == 4); - return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)), + return nir_iadd(b, nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2)), nir_u2u64(b, nir_channel(b, addr, 3))); case nir_address_format_32bit_index_offset: @@ -1429,12 +1429,12 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin, if (op == nir_intrinsic_load_global_constant_offset) { assert(addr_format == nir_address_format_64bit_global_32bit_offset); load->src[0] = nir_src_for_ssa( - nir_pack_64_2x32(b, nir_channels(b, addr, 0x3))); + nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2))); load->src[1] = nir_src_for_ssa(nir_channel(b, addr, 3)); } else if (op == nir_intrinsic_load_global_constant_bounded) { assert(addr_format == nir_address_format_64bit_bounded_global); load->src[0] = nir_src_for_ssa( - nir_pack_64_2x32(b, nir_channels(b, addr, 0x3))); + nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2))); load->src[1] = nir_src_for_ssa(nir_channel(b, addr, 3)); load->src[2] = nir_src_for_ssa(nir_channel(b, addr, 2)); } else if (addr_format_is_global(addr_format, mode)) { diff --git a/src/compiler/nir/nir_lower_tex.c b/src/compiler/nir/nir_lower_tex.c index d5a75edebf0..3835372e0ba 100644 --- a/src/compiler/nir/nir_lower_tex.c +++ b/src/compiler/nir/nir_lower_tex.c @@ -703,15 +703,15 @@ lower_gradient_cube_map(nir_builder *b, nir_tex_instr *tex) */ nir_ssa_def *rcp_Q_z = nir_frcp(b, nir_channel(b, Q, 2)); - nir_ssa_def *Q_xy = nir_channels(b, Q, 0x3); + nir_ssa_def *Q_xy = nir_trim_vector(b, Q, 2); nir_ssa_def *tmp = nir_fmul(b, Q_xy, rcp_Q_z); - nir_ssa_def *dQdx_xy = nir_channels(b, dQdx, 0x3); + nir_ssa_def *dQdx_xy = nir_trim_vector(b, dQdx, 2); nir_ssa_def *dQdx_z = nir_channel(b, dQdx, 2); nir_ssa_def *dx = nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdx_xy, nir_fmul(b, tmp, dQdx_z))); - nir_ssa_def *dQdy_xy = nir_channels(b, dQdy, 0x3); + nir_ssa_def *dQdy_xy = nir_trim_vector(b, dQdy, 2); nir_ssa_def *dQdy_z = nir_channel(b, dQdy, 2); nir_ssa_def *dy = nir_fmul(b, rcp_Q_z, nir_fsub(b, dQdy_xy, nir_fmul(b, tmp, dQdy_z))); @@ -1009,7 +1009,7 @@ linearize_srgb_result(nir_builder *b, nir_tex_instr *tex) b->cursor = nir_after_instr(&tex->instr); nir_ssa_def *rgb = - nir_format_srgb_to_linear(b, nir_channels(b, &tex->dest.ssa, 0x7)); + nir_format_srgb_to_linear(b, nir_trim_vector(b, &tex->dest.ssa, 3)); /* alpha is untouched: */ nir_ssa_def *result = nir_vec4(b, diff --git a/src/compiler/nir/nir_lower_vec3_to_vec4.c b/src/compiler/nir/nir_lower_vec3_to_vec4.c index a0ab162df37..34caef87c9a 100644 --- a/src/compiler/nir/nir_lower_vec3_to_vec4.c +++ b/src/compiler/nir/nir_lower_vec3_to_vec4.c @@ -59,7 +59,7 @@ lower_vec3_to_vec4_instr(nir_builder *b, nir_instr *instr, void *data) intrin->dest.ssa.num_components = 4; b->cursor = nir_after_instr(&intrin->instr); - nir_ssa_def *vec3 = nir_channels(b, &intrin->dest.ssa, 0x7); + nir_ssa_def *vec3 = nir_trim_vector(b, &intrin->dest.ssa, 3); nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa, vec3, vec3->parent_instr); diff --git a/src/compiler/nir/nir_lower_viewport_transform.c b/src/compiler/nir/nir_lower_viewport_transform.c index 923972f9f06..dbf0f6aad4a 100644 --- a/src/compiler/nir/nir_lower_viewport_transform.c +++ b/src/compiler/nir/nir_lower_viewport_transform.c @@ -67,7 +67,7 @@ lower_viewport_transform_instr(nir_builder *b, nir_instr *instr, nir_ssa_def *w_recip = nir_frcp(b, nir_channel(b, input_point, 3)); - nir_ssa_def *ndc_point = nir_fmul(b, nir_channels(b, input_point, 0x7), + nir_ssa_def *ndc_point = nir_fmul(b, nir_trim_vector(b, input_point, 3), w_recip); nir_ssa_def *screen = nir_fadd(b, nir_fmul(b, ndc_point, scale), diff --git a/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c b/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c index ac6c7c1edc8..c87fe22a932 100644 --- a/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c +++ b/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c @@ -208,7 +208,7 @@ split_store_deref(nir_builder *b, nir_intrinsic_instr *intr, int write_mask_xy = nir_intrinsic_write_mask(intr) & 3; if (write_mask_xy) { - nir_ssa_def *src_xy = nir_channels(b, intr->src[1].ssa, 3); + nir_ssa_def *src_xy = nir_trim_vector(b, intr->src[1].ssa, 2); nir_build_store_deref(b, &deref_xy->dest.ssa, src_xy, write_mask_xy); } diff --git a/src/compiler/spirv/vtn_amd.c b/src/compiler/spirv/vtn_amd.c index d96806f4183..f76b5544401 100644 --- a/src/compiler/spirv/vtn_amd.c +++ b/src/compiler/spirv/vtn_amd.c @@ -37,7 +37,7 @@ vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode, break; case CubeFaceCoordAMD: { def = nir_cube_face_coord_amd(&b->nb, vtn_get_nir_ssa(b, w[5])); - nir_ssa_def *st = nir_channels(&b->nb, def, 0x3); + nir_ssa_def *st = nir_trim_vector(&b->nb, def, 2); nir_ssa_def *invma = nir_frcp(&b->nb, nir_channel(&b->nb, def, 2)); def = nir_ffma_imm2(&b->nb, st, invma, 0.5); break; diff --git a/src/freedreno/ir3/ir3_nir_lower_load_barycentric_at_offset.c b/src/freedreno/ir3/ir3_nir_lower_load_barycentric_at_offset.c index 3591df9fce5..d82d9673854 100644 --- a/src/freedreno/ir3/ir3_nir_lower_load_barycentric_at_offset.c +++ b/src/freedreno/ir3/ir3_nir_lower_load_barycentric_at_offset.c @@ -74,7 +74,8 @@ ir3_nir_lower_load_barycentric_at_offset_instr(nir_builder *b, nir_instr *instr, pos = nir_ffma(b, chan(off, 1), nir_fddy(b, sij), pos); /* convert back into screen space, dividing by the offset 1/w */ - return nir_fmul(b, nir_channels(b, pos, 0x3), nir_frcp(b, chan(pos, 2))); + return nir_fmul(b, nir_trim_vector(b, pos, 2), + nir_frcp(b, chan(pos, 2))); } } diff --git a/src/freedreno/vulkan/tu_shader.cc b/src/freedreno/vulkan/tu_shader.cc index d21b1f2ca55..b52601189e2 100644 --- a/src/freedreno/vulkan/tu_shader.cc +++ b/src/freedreno/vulkan/tu_shader.cc @@ -881,7 +881,7 @@ lower_fdm_instr(struct nir_builder *b, nir_instr *instr, void *data) nir_ssa_def *frag_offset = nir_load_frag_offset_ir3(b, view, .range = options->num_views); nir_ssa_def *unscaled_coord = nir_load_frag_coord_unscaled_ir3(b); - nir_ssa_def *xy = nir_channels(b, unscaled_coord, 0x3); + nir_ssa_def *xy = nir_trim_vector(b, unscaled_coord, 2); xy = nir_fmul(b, nir_fsub(b, xy, frag_offset), nir_i2f32(b, frag_size)); return nir_vec4(b, nir_channel(b, xy, 0), diff --git a/src/gallium/auxiliary/nir/nir_draw_helpers.c b/src/gallium/auxiliary/nir/nir_draw_helpers.c index ddf1eb735b5..fd0a0916011 100644 --- a/src/gallium/auxiliary/nir/nir_draw_helpers.c +++ b/src/gallium/auxiliary/nir/nir_draw_helpers.c @@ -67,7 +67,7 @@ nir_lower_pstipple_block(nir_block *block, nir_ssa_def *frag_coord = state->fs_pos_is_sysval ? nir_load_frag_coord(b) : load_frag_coord(b); - texcoord = nir_fmul(b, nir_channels(b, frag_coord, 0x3), + texcoord = nir_fmul(b, nir_trim_vector(b, frag_coord, 2), nir_imm_vec2(b, 1.0/32.0, 1.0/32.0)); nir_tex_instr *tex = nir_tex_instr_create(b->shader, 1); diff --git a/src/gallium/auxiliary/nir/tgsi_to_nir.c b/src/gallium/auxiliary/nir/tgsi_to_nir.c index 44a8a8f4193..609adb96c15 100644 --- a/src/gallium/auxiliary/nir/tgsi_to_nir.c +++ b/src/gallium/auxiliary/nir/tgsi_to_nir.c @@ -966,7 +966,7 @@ ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, unsigned dest_bitsize, * two components, and we need to truncate here to avoid creating a * vec8 after bitcasting the destination. */ - def = nir_channels(b, def, 0x3); + def = nir_trim_vector(b, def, 2); } def = nir_bitcast_vector(b, def, 32); } diff --git a/src/gallium/drivers/d3d12/d3d12_blit.cpp b/src/gallium/drivers/d3d12/d3d12_blit.cpp index 19d3eb02786..6d491165719 100644 --- a/src/gallium/drivers/d3d12/d3d12_blit.cpp +++ b/src/gallium/drivers/d3d12/d3d12_blit.cpp @@ -699,7 +699,7 @@ get_stencil_resolve_fs(struct d3d12_context *ctx, bool no_flip) tex->sampler_dim = GLSL_SAMPLER_DIM_MS; tex->op = nir_texop_txf_ms; tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, - nir_channels(&b, nir_f2i32(&b, pos_src), 0x3)); + nir_trim_vector(&b, nir_f2i32(&b, pos_src), 2)); tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_ms_index, nir_imm_int(&b, 0)); /* just use first sample */ tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_texture_deref, tex_deref); tex->dest_type = nir_type_uint32; diff --git a/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c b/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c index feb203bd8c9..26e37f756a1 100644 --- a/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c +++ b/src/gallium/drivers/d3d12/d3d12_lower_image_casts.c @@ -73,7 +73,8 @@ convert_value(nir_builder *b, nir_ssa_def *value, src_as_vec = nir_pack_unorm_4x8(b, value); else { nir_ssa_def *packed_channels[2]; - packed_channels[0] = nir_pack_unorm_2x16(b, nir_channels(b, value, 0x3)); + packed_channels[0] = nir_pack_unorm_2x16(b, + nir_trim_vector(b, value, 2)); packed_channels[1] = nir_pack_unorm_2x16(b, nir_channels(b, value, 0x3 << 2)); src_as_vec = nir_vec(b, packed_channels, 2); } @@ -82,7 +83,8 @@ convert_value(nir_builder *b, nir_ssa_def *value, src_as_vec = nir_pack_snorm_4x8(b, value); else { nir_ssa_def *packed_channels[2]; - packed_channels[0] = nir_pack_snorm_2x16(b, nir_channels(b, value, 0x3)); + packed_channels[0] = nir_pack_snorm_2x16(b, + nir_trim_vector(b, value, 2)); packed_channels[1] = nir_pack_snorm_2x16(b, nir_channels(b, value, 0x3 << 2)); src_as_vec = nir_vec(b, packed_channels, 2); } diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp index 69e3f41e2ef..54ecd94ac3d 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_64bit.cpp @@ -421,7 +421,7 @@ LowerSplit64BitVar::split_store_deref_array(nir_intrinsic_instr *intr, assert(old_components > 2 && old_components <= 4); - auto src_xy = nir_channels(b, intr->src[1].ssa, 3); + auto src_xy = nir_trim_vector(b, intr->src[1].ssa, 2); auto vars = get_var_pair(old_var); @@ -458,7 +458,7 @@ LowerSplit64BitVar::split_store_deref_var(nir_intrinsic_instr *intr, assert(old_components > 2 && old_components <= 4); - auto src_xy = nir_channels(b, intr->src[1].ssa, 3); + auto src_xy = nir_trim_vector(b, intr->src[1].ssa, 2); auto vars = get_var_pair(old_var); @@ -558,7 +558,7 @@ LowerSplit64BitVar::split_store_output(nir_intrinsic_instr *store1) nir_io_semantics sem = nir_intrinsic_io_semantics(store1); auto store2 = nir_instr_as_intrinsic(nir_instr_clone(b->shader, &store1->instr)); - auto src1 = nir_channels(b, src.ssa, 3); + auto src1 = nir_trim_vector(b, src.ssa, 2); auto src2 = nir_channels(b, src.ssa, old_components == 3 ? 4 : 0xc); nir_instr_rewrite_src(&store1->instr, &src, nir_src_for_ssa(src1)); @@ -671,8 +671,8 @@ LowerSplit64BitVar::split_reduction3(nir_alu_instr *alu, { nir_ssa_def *src[2][2]; - src[0][0] = nir_channels(b, nir_ssa_for_src(b, alu->src[0].src, 2), 3); - src[0][1] = nir_channels(b, nir_ssa_for_src(b, alu->src[1].src, 2), 3); + src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2); + src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2); src[1][0] = nir_channel(b, nir_ssa_for_src(b, alu->src[0].src, 3), 2); src[1][1] = nir_channel(b, nir_ssa_for_src(b, alu->src[1].src, 3), 2); @@ -688,8 +688,8 @@ LowerSplit64BitVar::split_reduction4(nir_alu_instr *alu, { nir_ssa_def *src[2][2]; - src[0][0] = nir_channels(b, nir_ssa_for_src(b, alu->src[0].src, 2), 3); - src[0][1] = nir_channels(b, nir_ssa_for_src(b, alu->src[1].src, 2), 3); + src[0][0] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[0].src, 2), 2); + src[0][1] = nir_trim_vector(b, nir_ssa_for_src(b, alu->src[1].src, 2), 2); src[1][0] = nir_channels(b, nir_ssa_for_src(b, alu->src[0].src, 4), 0xc); src[1][1] = nir_channels(b, nir_ssa_for_src(b, alu->src[1].src, 4), 0xc); diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp index cff20fddf02..b9229752402 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_lower_tex.cpp @@ -42,7 +42,7 @@ lower_coord_shift_normalized(nir_builder *b, nir_tex_instr *tex) nir_ssa_def *corr = nullptr; if (unlikely(tex->array_is_lowered_cube)) { auto corr2 = nir_fadd(b, - nir_channels(b, tex->src[coord_index].src.ssa, 3), + nir_trim_vector(b, tex->src[coord_index].src.ssa, 2), nir_fmul_imm(b, scale, -0.5f)); corr = nir_vec3(b, nir_channel(b, corr2, 0), @@ -66,7 +66,7 @@ lower_coord_shift_unnormalized(nir_builder *b, nir_tex_instr *tex) nir_ssa_def *corr = nullptr; if (unlikely(tex->array_is_lowered_cube)) { auto corr2 = nir_fadd_imm(b, - nir_channels(b, tex->src[coord_index].src.ssa, 3), + nir_trim_vector(b, tex->src[coord_index].src.ssa, 2), -0.5f); corr = nir_vec3(b, nir_channel(b, corr2, 0), @@ -267,7 +267,8 @@ r600_nir_lower_cube_to_2darray_impl(nir_builder *b, nir_instr *instr, void *_opt int coord_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord); assert(coord_idx >= 0); - auto cubed = nir_cube_r600(b, nir_channels(b, tex->src[coord_idx].src.ssa, 0x7)); + auto cubed = nir_cube_r600(b, + nir_trim_vector(b, tex->src[coord_idx].src.ssa, 3)); auto xy = nir_fmad(b, nir_vec2(b, nir_channel(b, cubed, 1), nir_channel(b, cubed, 0)), nir_frcp(b, nir_fabs(b, nir_channel(b, cubed, 2))), diff --git a/src/gallium/drivers/radeonsi/si_shaderlib_nir.c b/src/gallium/drivers/radeonsi/si_shaderlib_nir.c index bee8d28d479..c9e9547aa16 100644 --- a/src/gallium/drivers/radeonsi/si_shaderlib_nir.c +++ b/src/gallium/drivers/radeonsi/si_shaderlib_nir.c @@ -199,9 +199,9 @@ void *gfx9_create_clear_dcc_msaa_cs(struct si_context *sctx, struct si_texture * /* Multiply the coordinates by the DCC block size (they are DCC block coordinates). */ coord = nir_imul(&b, coord, - nir_channels(&b, nir_imm_ivec4(&b, tex->surface.u.gfx9.color.dcc_block_width, - tex->surface.u.gfx9.color.dcc_block_height, - tex->surface.u.gfx9.color.dcc_block_depth, 0), 0x7)); + nir_imm_ivec3(&b, tex->surface.u.gfx9.color.dcc_block_width, + tex->surface.u.gfx9.color.dcc_block_height, + tex->surface.u.gfx9.color.dcc_block_depth)); nir_ssa_def *offset = ac_nir_dcc_addr_from_coord(&b, &sctx->screen->info, tex->surface.bpe, @@ -474,7 +474,7 @@ void *si_create_blit_cs(struct si_context *sctx, const union si_compute_blit_sha /* Add box.xyz. */ nir_ssa_def *coord_src = NULL, *coord_dst = NULL; - unpack_2x16_signed(&b, nir_channels(&b, nir_load_user_data_amd(&b), 0x7), + unpack_2x16_signed(&b, nir_trim_vector(&b, nir_load_user_data_amd(&b), 3), &coord_src, &coord_dst); coord_dst = nir_iadd(&b, coord_dst, dst_xyz); coord_src = nir_iadd(&b, coord_src, src_xyz); diff --git a/src/gallium/drivers/zink/zink_compiler.c b/src/gallium/drivers/zink/zink_compiler.c index a4384a1bbc8..466b9688296 100644 --- a/src/gallium/drivers/zink/zink_compiler.c +++ b/src/gallium/drivers/zink/zink_compiler.c @@ -701,7 +701,7 @@ viewport_map(nir_builder *b, nir_ssa_def *vert, nir_ssa_def *scale) { nir_ssa_def *w_recip = nir_frcp(b, nir_channel(b, vert, 3)); - nir_ssa_def *ndc_point = nir_fmul(b, nir_channels(b, vert, 0x3), + nir_ssa_def *ndc_point = nir_fmul(b, nir_trim_vector(b, vert, 2), w_recip); return nir_fmul(b, ndc_point, scale); } @@ -3007,7 +3007,8 @@ lower_64bit_vars_function(nir_shader *shader, nir_function *function, nir_variab for (unsigned i = 0; i < 2; i++, num_components -= 4) { nir_deref_instr *strct = nir_build_deref_struct(&b, deref, i); nir_ssa_def *load = nir_load_deref(&b, strct); - comp[i * 2] = nir_pack_64_2x32(&b, nir_channels(&b, load, BITFIELD_MASK(2))); + comp[i * 2] = nir_pack_64_2x32(&b, + nir_trim_vector(&b, load, 2)); if (num_components > 2) comp[i * 2 + 1] = nir_pack_64_2x32(&b, nir_channels(&b, load, BITFIELD_RANGE(2, 2))); } diff --git a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c index f09679186f4..55dd965ba96 100644 --- a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c +++ b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c @@ -168,7 +168,8 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def * array_tex->src[s].src_type = tex->src[i].src_type; if (psrc->ssa->num_components != nir_tex_instr_src_size(array_tex, s)) { - nir_ssa_def *c = nir_channels(b, psrc->ssa, BITFIELD_MASK(nir_tex_instr_src_size(array_tex, s))); + nir_ssa_def *c = nir_trim_vector(b, psrc->ssa, + nir_tex_instr_src_size(array_tex, s)); array_tex->src[s].src = nir_src_for_ssa(c); } else nir_src_copy(&array_tex->src[s].src, psrc, &array_tex->instr); @@ -310,8 +311,8 @@ handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord) b->cursor = nir_after_instr(coord->parent_instr); nir_ssa_def *const_05 = nir_imm_float(b, 0.5f); - nir_ssa_def *texel_coords = nir_fmul(b, nir_channels(b, coord, 3), - nir_i2f32(b, nir_channels(b, tex_size, 3))); + nir_ssa_def *texel_coords = nir_fmul(b, nir_trim_vector(b, coord, 2), + nir_i2f32(b, nir_trim_vector(b, tex_size, 2))); nir_ssa_def *x_orig = nir_channel(b, texel_coords, 0); nir_ssa_def *y_orig = nir_channel(b, texel_coords, 1); @@ -490,7 +491,7 @@ lower_cube_txs(nir_builder *b, nir_tex_instr *tex) nir_ssa_def *size = nir_vec3(b, nir_channel(b, &tex->dest.ssa, 0), nir_channel(b, &tex->dest.ssa, 1), cube_array_dim); - return nir_channels(b, size, BITFIELD_MASK(num_components)); + return nir_trim_vector(b, size, num_components); } static nir_ssa_def * diff --git a/src/intel/blorp/blorp_blit.c b/src/intel/blorp/blorp_blit.c index 46ed6ad7a47..d7e7a42dd48 100644 --- a/src/intel/blorp/blorp_blit.c +++ b/src/intel/blorp/blorp_blit.c @@ -111,7 +111,7 @@ blorp_blit_get_cs_dst_coords(nir_builder *b, coord = nir_isub(b, coord, nir_load_var(b, v->v_dst_offset)); assert(!key->persample_msaa_dispatch); - return nir_channels(b, coord, 0x3); + return nir_trim_vector(b, coord, 2); } /** @@ -718,7 +718,7 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos, const struct brw_blorp_blit_prog_key *key, struct brw_blorp_blit_vars *v) { - nir_ssa_def *pos_xy = nir_channels(b, pos, 0x3); + nir_ssa_def *pos_xy = nir_trim_vector(b, pos, 2); nir_ssa_def *rect_grid = nir_load_var(b, v->v_rect_grid); nir_ssa_def *scale = nir_imm_vec2(b, key->x_scale, key->y_scale); @@ -1286,7 +1286,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, * number 0, because that's the only sample there is. */ if (key->src_samples == 1) - src_pos = nir_channels(&b, src_pos, 0x3); + src_pos = nir_trim_vector(&b, src_pos, 2); /* X, Y, and S are now the coordinates of the pixel in the source image * that we want to texture from. Exception: if we are blending, then S is @@ -1373,7 +1373,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, /* Resolves (effecively) use texelFetch, so we need integers and we * don't care about the sample index if we got one. */ - src_pos = nir_f2i32(&b, nir_channels(&b, src_pos, 0x3)); + src_pos = nir_f2i32(&b, nir_trim_vector(&b, src_pos, 2)); if (devinfo->ver == 6) { /* Because gfx6 only supports 4x interleved MSAA, we can do all the diff --git a/src/intel/blorp/blorp_nir_builder.h b/src/intel/blorp/blorp_nir_builder.h index 7f8446dabdd..545e9b5d14b 100644 --- a/src/intel/blorp/blorp_nir_builder.h +++ b/src/intel/blorp/blorp_nir_builder.h @@ -53,7 +53,7 @@ blorp_nir_txf_ms_mcs(nir_builder *b, nir_ssa_def *xy_pos, nir_ssa_def *layer) } else { tex->is_array = false; tex->coord_components = 2; - coord = nir_channels(b, xy_pos, 0x3); + coord = nir_trim_vector(b, xy_pos, 2); } tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord, coord); diff --git a/src/intel/compiler/brw_nir_lower_storage_image.c b/src/intel/compiler/brw_nir_lower_storage_image.c index eb44fdbf009..fb55c091dae 100644 --- a/src/intel/compiler/brw_nir_lower_storage_image.c +++ b/src/intel/compiler/brw_nir_lower_storage_image.c @@ -116,7 +116,7 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo, */ nir_ssa_def *xypos = (coord->num_components == 1) ? nir_vec2(b, coord, nir_imm_int(b, 0)) : - nir_channels(b, coord, 0x3); + nir_trim_vector(b, coord, 2); xypos = nir_iadd(b, xypos, offset); /* The layout of 3-D textures in memory is sort-of like a tiling @@ -174,8 +174,8 @@ image_address(nir_builder *b, const struct intel_device_info *devinfo, /* Calculate the minor x and y indices. */ nir_ssa_def *minor = nir_ubfe(b, xypos, nir_imm_int(b, 0), - nir_channels(b, tiling, 0x3)); - nir_ssa_def *major = nir_ushr(b, xypos, nir_channels(b, tiling, 0x3)); + nir_trim_vector(b, tiling, 2)); + nir_ssa_def *major = nir_ushr(b, xypos, nir_trim_vector(b, tiling, 2)); /* Calculate the texel index from the start of the tile row and the * vertical coordinate of the row. diff --git a/src/intel/compiler/brw_nir_rt_builder.h b/src/intel/compiler/brw_nir_rt_builder.h index d8d604b3be0..23c1220363d 100644 --- a/src/intel/compiler/brw_nir_rt_builder.h +++ b/src/intel/compiler/brw_nir_rt_builder.h @@ -313,7 +313,7 @@ brw_nir_rt_load_globals_addr(nir_builder *b, { nir_ssa_def *data; data = brw_nir_rt_load_const(b, 16, addr, nir_imm_true(b)); - defs->base_mem_addr = nir_pack_64_2x32(b, nir_channels(b, data, 0x3)); + defs->base_mem_addr = nir_pack_64_2x32(b, nir_trim_vector(b, data, 2)); defs->call_stack_handler_addr = nir_pack_64_2x32(b, nir_channels(b, data, 0x3 << 2)); @@ -781,7 +781,7 @@ brw_nir_rt_load_mem_ray_from_addr(nir_builder *b, brw_nir_rt_load(b, nir_iadd_imm(b, ray_addr, 48), 16, 4, 32), }; - defs->orig = nir_channels(b, data[0], 0x7); + defs->orig = nir_trim_vector(b, data[0], 3); defs->dir = nir_vec3(b, nir_channel(b, data[0], 3), nir_channel(b, data[1], 0), nir_channel(b, data[1], 1)); diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c index 40318dfa7d4..e76520f7468 100644 --- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c @@ -316,7 +316,7 @@ build_load_descriptor_mem(nir_builder *b, switch (state->desc_addr_format) { case nir_address_format_64bit_global_32bit_offset: { nir_ssa_def *base_addr = - nir_pack_64_2x32(b, nir_channels(b, desc_addr, 0x3)); + nir_pack_64_2x32(b, nir_trim_vector(b, desc_addr, 2)); nir_ssa_def *offset32 = nir_iadd_imm(b, nir_channel(b, desc_addr, 3), desc_offset); @@ -916,7 +916,7 @@ build_indirect_buffer_addr_for_res_index(nir_builder *b, * have a sliding window range. */ nir_ssa_def *base_ptr = - nir_pack_64_2x32(b, nir_channels(b, desc, 0x3)); + nir_pack_64_2x32(b, nir_trim_vector(b, desc, 2)); base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset)); desc = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr), nir_unpack_64_2x32_split_y(b, base_ptr), @@ -975,7 +975,7 @@ build_direct_buffer_addr_for_res_index(nir_builder *b, * have a sliding window range. */ nir_ssa_def *base_ptr = - nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)); + nir_pack_64_2x32(b, nir_trim_vector(b, addr, 2)); base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset)); addr = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr), nir_unpack_64_2x32_split_y(b, base_ptr), diff --git a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c index b5fd65cfd9e..e3e149690b4 100644 --- a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c @@ -485,7 +485,7 @@ build_buffer_addr_for_res_index(nir_builder *b, * have a sliding window range. */ nir_ssa_def *base_ptr = - nir_pack_64_2x32(b, nir_channels(b, desc, 0x3)); + nir_pack_64_2x32(b, nir_trim_vector(b, desc, 2)); base_ptr = nir_iadd(b, base_ptr, nir_u2u64(b, dynamic_offset)); desc = nir_vec4(b, nir_unpack_64_2x32_split_x(b, base_ptr), nir_unpack_64_2x32_split_y(b, base_ptr), diff --git a/src/mesa/main/ffvertex_prog.c b/src/mesa/main/ffvertex_prog.c index 0a0cbcbc4e1..96ce1759a39 100644 --- a/src/mesa/main/ffvertex_prog.c +++ b/src/mesa/main/ffvertex_prog.c @@ -1052,9 +1052,8 @@ static void build_fog( struct tnl_program *p ) switch (p->state->fog_distance_mode) { case FDM_EYE_RADIAL: /* Z = sqrt(Xe*Xe + Ye*Ye + Ze*Ze) */ - fog = nir_fast_length(p->b, nir_channels(p->b, - get_eye_position(p), - 0x7)); + fog = nir_fast_length(p->b, + nir_trim_vector(p->b, get_eye_position(p), 3)); break; case FDM_EYE_PLANE: /* Z = Ze */ fog = get_eye_position_z(p); diff --git a/src/mesa/state_tracker/st_atifs_to_nir.c b/src/mesa/state_tracker/st_atifs_to_nir.c index 18802a8689c..249d634ba13 100644 --- a/src/mesa/state_tracker/st_atifs_to_nir.c +++ b/src/mesa/state_tracker/st_atifs_to_nir.c @@ -100,7 +100,7 @@ apply_swizzle(struct st_translate *t, nir_ssa_def *rcp = nir_frcp(t->b, nir_channel(t->b, src, swizzle == GL_SWIZZLE_STR_DR_ATI ? 2 : 3)); - nir_ssa_def *st_mul = nir_fmul(t->b, nir_channels(t->b, src, 0x3), rcp); + nir_ssa_def *st_mul = nir_fmul(t->b, nir_trim_vector(t->b, src, 2), rcp); return nir_vec4(t->b, nir_channel(t->b, st_mul, 0), @@ -364,7 +364,7 @@ compile_setupinst(struct st_translate *t, tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref, &tex_deref->dest.ssa); tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord, - nir_channels(t->b, coord, (1 << tex->coord_components) - 1)); + nir_trim_vector(t->b, coord, tex->coord_components)); nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32); nir_builder_instr_insert(t->b, &tex->instr); diff --git a/src/mesa/state_tracker/st_pbo_compute.c b/src/mesa/state_tracker/st_pbo_compute.c index 82aa82f1bed..2a4ec9fd2aa 100644 --- a/src/mesa/state_tracker/st_pbo_compute.c +++ b/src/mesa/state_tracker/st_pbo_compute.c @@ -433,7 +433,7 @@ grab_components(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset, { if (weird_packed) { nir_push_if(b, nir_ieq_imm(b, sd->bits1, 32)); - write_conversion(b, nir_channels(b, pixel, 3), buffer_offset, sd); + write_conversion(b, nir_trim_vector(b, pixel, 2), buffer_offset, sd); nir_push_else(b, NULL); write_conversion(b, nir_channel(b, pixel, 0), buffer_offset, sd); nir_pop_if(b, NULL); @@ -442,12 +442,15 @@ grab_components(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset, write_conversion(b, nir_channel(b, pixel, 0), buffer_offset, sd); nir_push_else(b, NULL); nir_push_if(b, nir_ieq_imm(b, sd->channels, 2)); - write_conversion(b, nir_channels(b, pixel, (1 << 2) - 1), buffer_offset, sd); + write_conversion(b, nir_trim_vector(b, pixel, 2), buffer_offset, + sd); nir_push_else(b, NULL); nir_push_if(b, nir_ieq_imm(b, sd->channels, 3)); - write_conversion(b, nir_channels(b, pixel, (1 << 3) - 1), buffer_offset, sd); + write_conversion(b, nir_trim_vector(b, pixel, 3), + buffer_offset, sd); nir_push_else(b, NULL); - write_conversion(b, nir_channels(b, pixel, (1 << 4) - 1), buffer_offset, sd); + write_conversion(b, nir_trim_vector(b, pixel, 4), + buffer_offset, sd); nir_pop_if(b, NULL); nir_pop_if(b, NULL); nir_pop_if(b, NULL); diff --git a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c index 09840ecd1dd..b4d55a51ae5 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c @@ -321,8 +321,8 @@ handle_cube_gather(nir_builder *b, nir_tex_instr *tex, nir_ssa_def *coord) b->cursor = nir_after_instr(coord->parent_instr); nir_ssa_def *const_05 = nir_imm_float(b, 0.5f); - nir_ssa_def *texel_coords = nir_fmul(b, nir_channels(b, coord, 3), - nir_i2f32(b, nir_channels(b, tex_size, 3))); + nir_ssa_def *texel_coords = nir_fmul(b, nir_trim_vector(b, coord, 2), + nir_i2f32(b, nir_trim_vector(b, tex_size, 2))); nir_ssa_def *x_orig = nir_channel(b, texel_coords, 0); nir_ssa_def *y_orig = nir_channel(b, texel_coords, 1); @@ -426,7 +426,7 @@ lower_cube_txs(nir_builder *b, nir_tex_instr *tex) { b->cursor = nir_after_instr(&tex->instr); if (!tex->is_array) - return nir_channels(b, &tex->dest.ssa, 3); + return nir_trim_vector(b, &tex->dest.ssa, 2); nir_ssa_def *array_dim = nir_channel(b, &tex->dest.ssa, 2); nir_ssa_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6)); @@ -440,7 +440,7 @@ lower_cube_image_size(nir_builder *b, nir_intrinsic_instr *intr) { b->cursor = nir_after_instr(&intr->instr); if (!nir_intrinsic_image_array(intr)) - return nir_channels(b, &intr->dest.ssa, 3); + return nir_trim_vector(b, &intr->dest.ssa, 2); nir_ssa_def *array_dim = nir_channel(b, &intr->dest.ssa, 2); nir_ssa_def *cube_array_dim = nir_idiv(b, array_dim, nir_imm_int(b, 6)); diff --git a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c index 35894ad9f03..e0015ffb21e 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c @@ -85,8 +85,8 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex) * check for is_array though, in the worst case we create an additional * move the the optimization will remove later again. */ int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord); - nir_ssa_def *ssa_src = nir_channels(b, tex->src[coord_index].src.ssa, - (1 << coord_components) - 1); + nir_ssa_def *ssa_src = nir_trim_vector(b, tex->src[coord_index].src.ssa, + coord_components); nir_src src = nir_src_for_ssa(ssa_src); nir_src_copy(&tql->src[0].src, &src, &tql->instr); tql->src[0].src_type = nir_tex_src_coord; diff --git a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c index 013ac4d98fc..0f9beaa672d 100644 --- a/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c +++ b/src/microsoft/spirv_to_dxil/dxil_spirv_nir.c @@ -732,7 +732,8 @@ write_pntc_with_pos(nir_builder *b, nir_instr *instr, void *_data) nir_channel(b, load_desc, 0), nir_imm_int(b, offset), 4, 32, 16), 0x6); - nir_ssa_def *point_center_in_clip = nir_fmul(b, nir_channels(b, pos, 0x3), nir_frcp(b, nir_channel(b, pos, 3))); + nir_ssa_def *point_center_in_clip = nir_fmul(b, nir_trim_vector(b, pos, 2), + nir_frcp(b, nir_channel(b, pos, 3))); nir_ssa_def *point_center = nir_fmul(b, nir_fadd_imm(b, nir_fmul(b, point_center_in_clip, @@ -796,7 +797,7 @@ lower_pntc_read(nir_builder *b, nir_instr *instr, void *data) nir_replicate(b, nir_imm_float(b, 0), 2)); nir_ssa_def *pntc = nir_fadd_imm(b, - nir_fsub(b, nir_channels(b, pos, 0x3), nir_channels(b, point_center, 0x3)), + nir_fsub(b, nir_trim_vector(b, pos, 2), nir_trim_vector(b, point_center, 2)), 0.5); nir_ssa_def_rewrite_uses_after(point_center, pntc, pntc->parent_instr); return true; diff --git a/src/microsoft/vulkan/dzn_nir.c b/src/microsoft/vulkan/dzn_nir.c index cf13545219f..cc73b64c3c6 100644 --- a/src/microsoft/vulkan/dzn_nir.c +++ b/src/microsoft/vulkan/dzn_nir.c @@ -646,7 +646,7 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info) coord_var->data.location = VARYING_SLOT_TEX0; coord_var->data.driver_location = 1; nir_ssa_def *coord = - nir_channels(&b, nir_load_var(&b, coord_var), (1 << coord_comps) - 1); + nir_trim_vector(&b, nir_load_var(&b, coord_var), coord_comps); uint32_t out_comps = (info->loc == FRAG_RESULT_DEPTH || info->loc == FRAG_RESULT_STENCIL) ? 1 : 4; @@ -771,7 +771,7 @@ dzn_nir_blit_fs(const struct dzn_nir_blit_info *info) res = &tex->dest.ssa; } - nir_store_var(&b, out, nir_channels(&b, res, (1 << out_comps) - 1), 0xf); + nir_store_var(&b, out, nir_trim_vector(&b, res, out_comps), 0xf); return b.shader; } diff --git a/src/panfrost/util/pan_lower_framebuffer.c b/src/panfrost/util/pan_lower_framebuffer.c index 0d5ee5a9a97..9317613f8c7 100644 --- a/src/panfrost/util/pan_lower_framebuffer.c +++ b/src/panfrost/util/pan_lower_framebuffer.c @@ -195,7 +195,7 @@ static nir_ssa_def * pan_unpack_pure_8(nir_builder *b, nir_ssa_def *pack, unsigned num_components) { nir_ssa_def *unpacked = nir_unpack_32_4x8(b, nir_channel(b, pack, 0)); - return nir_channels(b, unpacked, (1 << num_components) - 1); + return nir_trim_vector(b, unpacked, num_components); } static nir_ssa_def * @@ -347,7 +347,7 @@ pan_unpack_r11g11b10(nir_builder *b, nir_ssa_def *v) static nir_ssa_def * pan_linear_to_srgb(nir_builder *b, nir_ssa_def *linear) { - nir_ssa_def *rgb = nir_channels(b, linear, 0x7); + nir_ssa_def *rgb = nir_trim_vector(b, linear, 3); /* TODO: fp16 native conversion */ nir_ssa_def *srgb = diff --git a/src/panfrost/vulkan/panvk_vX_meta_copy.c b/src/panfrost/vulkan/panvk_vX_meta_copy.c index 7337efd5ee7..befcc2cbbab 100644 --- a/src/panfrost/vulkan/panvk_vX_meta_copy.c +++ b/src/panfrost/vulkan/panvk_vX_meta_copy.c @@ -390,7 +390,7 @@ panvk_meta_copy_img2img_shader(struct panfrost_device *pdev, if (dstcompsz == 16) texel = nir_u2u16(&b, texel); - texel = nir_channels(&b, texel, (1 << ndstcomps) - 1); + texel = nir_trim_vector(&b, texel, ndstcomps); outtype = glsl_vector_type(basetype, ndstcomps); }