nir: Rename nir_mem_access_size_align::align_mul to align

It's a simple alignment so calling it align_mul is a bit misleading.

Suggested-by: M Henning <drawoc@darkrefraction.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21524>
This commit is contained in:
Faith Ekstrand
2023-03-01 09:10:20 -06:00
committed by Marge Bot
parent 802bf1d9a6
commit eb9a56b6ca
3 changed files with 19 additions and 19 deletions

View File

@@ -4949,7 +4949,7 @@ bool nir_lower_explicit_io(nir_shader *shader,
typedef struct { typedef struct {
uint8_t num_components; uint8_t num_components;
uint8_t bit_size; uint8_t bit_size;
uint16_t align_mul; uint16_t align;
} nir_mem_access_size_align; } nir_mem_access_size_align;
typedef nir_mem_access_size_align typedef nir_mem_access_size_align

View File

@@ -94,10 +94,10 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
offset_is_const, cb_data); offset_is_const, cb_data);
assert(util_is_power_of_two_nonzero(align_mul)); assert(util_is_power_of_two_nonzero(align_mul));
assert(util_is_power_of_two_nonzero(requested.align_mul)); assert(util_is_power_of_two_nonzero(requested.align));
if (requested.num_components == num_components && if (requested.num_components == num_components &&
requested.bit_size == bit_size && requested.bit_size == bit_size &&
requested.align_mul <= whole_align) requested.align <= whole_align)
return false; return false;
/* Otherwise, we have to break it into chunks. We could end up with as /* Otherwise, we have to break it into chunks. We could end up with as
@@ -115,22 +115,22 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
offset_is_const, cb_data); offset_is_const, cb_data);
unsigned chunk_bytes; unsigned chunk_bytes;
assert(util_is_power_of_two_nonzero(requested.align_mul)); assert(util_is_power_of_two_nonzero(requested.align));
if (align_mul < requested.align_mul) { if (align_mul < requested.align) {
/* For this case, we need to be able to shift the value so we assume /* For this case, we need to be able to shift the value so we assume
* there's at most one component. * there's at most one component.
*/ */
assert(requested.num_components == 1); assert(requested.num_components == 1);
assert(requested.bit_size >= requested.align_mul * 8); assert(requested.bit_size >= requested.align * 8);
uint64_t align_mask = requested.align_mul - 1; uint64_t align_mask = requested.align - 1;
nir_ssa_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start); nir_ssa_def *chunk_offset = nir_iadd_imm(b, offset, chunk_start);
nir_ssa_def *pad = nir_iand_imm(b, chunk_offset, align_mask); nir_ssa_def *pad = nir_iand_imm(b, chunk_offset, align_mask);
chunk_offset = nir_iand_imm(b, chunk_offset, ~align_mask); chunk_offset = nir_iand_imm(b, chunk_offset, ~align_mask);
nir_intrinsic_instr *load = nir_intrinsic_instr *load =
dup_mem_intrinsic(b, intrin, chunk_offset, dup_mem_intrinsic(b, intrin, chunk_offset,
requested.align_mul, 0, NULL, requested.align, 0, NULL,
requested.num_components, requested.bit_size); requested.num_components, requested.bit_size);
nir_ssa_def *shifted = nir_ssa_def *shifted =
@@ -139,9 +139,9 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
chunk_bytes = MIN2(bytes_left, align_mul); chunk_bytes = MIN2(bytes_left, align_mul);
assert(num_chunks < ARRAY_SIZE(chunks)); assert(num_chunks < ARRAY_SIZE(chunks));
chunks[num_chunks++] = nir_u2uN(b, shifted, chunk_bytes * 8); chunks[num_chunks++] = nir_u2uN(b, shifted, chunk_bytes * 8);
} else if (chunk_align_offset % requested.align_mul) { } else if (chunk_align_offset % requested.align) {
/* In this case, we know how much to adjust the offset */ /* In this case, we know how much to adjust the offset */
uint32_t delta = chunk_align_offset % requested.align_mul; uint32_t delta = chunk_align_offset % requested.align;
nir_ssa_def *chunk_offset = nir_ssa_def *chunk_offset =
nir_iadd_imm(b, offset, chunk_start - (int)delta); nir_iadd_imm(b, offset, chunk_start - (int)delta);
@@ -224,10 +224,10 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
offset_is_const, cb_data); offset_is_const, cb_data);
assert(util_is_power_of_two_nonzero(align_mul)); assert(util_is_power_of_two_nonzero(align_mul));
assert(util_is_power_of_two_nonzero(requested.align_mul)); assert(util_is_power_of_two_nonzero(requested.align));
if (requested.num_components == num_components && if (requested.num_components == num_components &&
requested.bit_size == bit_size && requested.bit_size == bit_size &&
requested.align_mul <= whole_align && requested.align <= whole_align &&
writemask == BITFIELD_MASK(num_components)) writemask == BITFIELD_MASK(num_components))
return false; return false;
@@ -263,9 +263,9 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
requested.num_components * (requested.bit_size / 8); requested.num_components * (requested.bit_size / 8);
assert(chunk_bytes <= max_chunk_bytes); assert(chunk_bytes <= max_chunk_bytes);
assert(util_is_power_of_two_nonzero(requested.align_mul)); assert(util_is_power_of_two_nonzero(requested.align));
assert(requested.align_mul <= align_mul); assert(requested.align <= align_mul);
assert((chunk_align_offset % requested.align_mul) == 0); assert((chunk_align_offset % requested.align) == 0);
nir_ssa_def *packed = nir_extract_bits(b, &value, 1, chunk_start * 8, nir_ssa_def *packed = nir_extract_bits(b, &value, 1, chunk_start * 8,
requested.num_components, requested.num_components,

View File

@@ -1283,7 +1283,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
return (nir_mem_access_size_align) { return (nir_mem_access_size_align) {
.bit_size = 32, .bit_size = 32,
.num_components = comps32, .num_components = comps32,
.align_mul = 4, .align = 4,
}; };
} }
break; break;
@@ -1293,7 +1293,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
return (nir_mem_access_size_align) { return (nir_mem_access_size_align) {
.bit_size = 32, .bit_size = 32,
.num_components = 1, .num_components = 1,
.align_mul = 4, .align = 4,
}; };
} }
break; break;
@@ -1328,7 +1328,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
return (nir_mem_access_size_align) { return (nir_mem_access_size_align) {
.bit_size = bytes * 8, .bit_size = bytes * 8,
.num_components = 1, .num_components = 1,
.align_mul = 1, .align = 1,
}; };
} else { } else {
bytes = MIN2(bytes, 16); bytes = MIN2(bytes, 16);
@@ -1336,7 +1336,7 @@ get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
.bit_size = 32, .bit_size = 32,
.num_components = is_scratch ? 1 : .num_components = is_scratch ? 1 :
is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4, is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
.align_mul = 4, .align = 4,
}; };
} }
} }