intel/compiler: Prevent warnings in the following patch
The next patch replaces an unsigned bitfield with a plain unsigned, which triggers gcc to begin warning on signed/unsigned comparisons. Keeping this patch separate from the actual move allows bisectablity and generates no additional warnings temporarily. Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
@@ -2874,8 +2874,8 @@ fs_visitor::opt_register_renaming()
|
|||||||
bool progress = false;
|
bool progress = false;
|
||||||
int depth = 0;
|
int depth = 0;
|
||||||
|
|
||||||
int remap[alloc.count];
|
unsigned remap[alloc.count];
|
||||||
memset(remap, -1, sizeof(int) * alloc.count);
|
memset(remap, ~0u, sizeof(unsigned) * alloc.count);
|
||||||
|
|
||||||
foreach_block_and_inst(block, fs_inst, inst, cfg) {
|
foreach_block_and_inst(block, fs_inst, inst, cfg) {
|
||||||
if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
|
if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
|
||||||
@@ -2888,20 +2888,20 @@ fs_visitor::opt_register_renaming()
|
|||||||
/* Rewrite instruction sources. */
|
/* Rewrite instruction sources. */
|
||||||
for (int i = 0; i < inst->sources; i++) {
|
for (int i = 0; i < inst->sources; i++) {
|
||||||
if (inst->src[i].file == VGRF &&
|
if (inst->src[i].file == VGRF &&
|
||||||
remap[inst->src[i].nr] != -1 &&
|
remap[inst->src[i].nr] != ~0u &&
|
||||||
remap[inst->src[i].nr] != inst->src[i].nr) {
|
remap[inst->src[i].nr] != inst->src[i].nr) {
|
||||||
inst->src[i].nr = remap[inst->src[i].nr];
|
inst->src[i].nr = remap[inst->src[i].nr];
|
||||||
progress = true;
|
progress = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const int dst = inst->dst.nr;
|
const unsigned dst = inst->dst.nr;
|
||||||
|
|
||||||
if (depth == 0 &&
|
if (depth == 0 &&
|
||||||
inst->dst.file == VGRF &&
|
inst->dst.file == VGRF &&
|
||||||
alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
|
alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
|
||||||
!inst->is_partial_write()) {
|
!inst->is_partial_write()) {
|
||||||
if (remap[dst] == -1) {
|
if (remap[dst] == ~0u) {
|
||||||
remap[dst] = dst;
|
remap[dst] = dst;
|
||||||
} else {
|
} else {
|
||||||
remap[dst] = alloc.allocate(regs_written(inst));
|
remap[dst] = alloc.allocate(regs_written(inst));
|
||||||
@@ -2909,7 +2909,7 @@ fs_visitor::opt_register_renaming()
|
|||||||
progress = true;
|
progress = true;
|
||||||
}
|
}
|
||||||
} else if (inst->dst.file == VGRF &&
|
} else if (inst->dst.file == VGRF &&
|
||||||
remap[dst] != -1 &&
|
remap[dst] != ~0u &&
|
||||||
remap[dst] != dst) {
|
remap[dst] != dst) {
|
||||||
inst->dst.nr = remap[dst];
|
inst->dst.nr = remap[dst];
|
||||||
progress = true;
|
progress = true;
|
||||||
@@ -2920,7 +2920,7 @@ fs_visitor::opt_register_renaming()
|
|||||||
invalidate_live_intervals();
|
invalidate_live_intervals();
|
||||||
|
|
||||||
for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
|
for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
|
||||||
if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != -1) {
|
if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != ~0u) {
|
||||||
delta_xy[i].nr = remap[delta_xy[i].nr];
|
delta_xy[i].nr = remap[delta_xy[i].nr];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3608,7 +3608,7 @@ void
|
|||||||
fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
|
fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
|
||||||
{
|
{
|
||||||
int write_len = regs_written(inst);
|
int write_len = regs_written(inst);
|
||||||
int first_write_grf = inst->dst.nr;
|
unsigned first_write_grf = inst->dst.nr;
|
||||||
bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
|
bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
|
||||||
assert(write_len < (int)sizeof(needs_dep) - 1);
|
assert(write_len < (int)sizeof(needs_dep) - 1);
|
||||||
|
|
||||||
@@ -4783,7 +4783,7 @@ lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
|
|||||||
bld.MOV(sources[length++], min_lod);
|
bld.MOV(sources[length++], min_lod);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlen;
|
unsigned mlen;
|
||||||
if (reg_width == 2)
|
if (reg_width == 2)
|
||||||
mlen = length * reg_width - header_size;
|
mlen = length * reg_width - header_size;
|
||||||
else
|
else
|
||||||
|
@@ -119,7 +119,7 @@ public:
|
|||||||
void setup_payload_interference(struct ra_graph *g, int payload_reg_count,
|
void setup_payload_interference(struct ra_graph *g, int payload_reg_count,
|
||||||
int first_payload_node);
|
int first_payload_node);
|
||||||
int choose_spill_reg(struct ra_graph *g);
|
int choose_spill_reg(struct ra_graph *g);
|
||||||
void spill_reg(int spill_reg);
|
void spill_reg(unsigned spill_reg);
|
||||||
void split_virtual_grfs();
|
void split_virtual_grfs();
|
||||||
bool compact_virtual_grfs();
|
bool compact_virtual_grfs();
|
||||||
void assign_constant_locations();
|
void assign_constant_locations();
|
||||||
|
@@ -1840,7 +1840,7 @@ fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Store the control data bits in the message payload and send it. */
|
/* Store the control data bits in the message payload and send it. */
|
||||||
int mlen = 2;
|
unsigned mlen = 2;
|
||||||
if (channel_mask.file != BAD_FILE)
|
if (channel_mask.file != BAD_FILE)
|
||||||
mlen += 4; /* channel masks, plus 3 extra copies of the data */
|
mlen += 4; /* channel masks, plus 3 extra copies of the data */
|
||||||
if (per_slot_offset.file != BAD_FILE)
|
if (per_slot_offset.file != BAD_FILE)
|
||||||
@@ -1848,7 +1848,7 @@ fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
|
|||||||
|
|
||||||
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
|
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
|
||||||
fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
|
fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
|
||||||
int i = 0;
|
unsigned i = 0;
|
||||||
sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
|
sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
|
||||||
if (per_slot_offset.file != BAD_FILE)
|
if (per_slot_offset.file != BAD_FILE)
|
||||||
sources[i++] = per_slot_offset;
|
sources[i++] = per_slot_offset;
|
||||||
|
@@ -912,7 +912,7 @@ fs_visitor::choose_spill_reg(struct ra_graph *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
fs_visitor::spill_reg(int spill_reg)
|
fs_visitor::spill_reg(unsigned spill_reg)
|
||||||
{
|
{
|
||||||
int size = alloc.sizes[spill_reg];
|
int size = alloc.sizes[spill_reg];
|
||||||
unsigned int spill_offset = last_scratch;
|
unsigned int spill_offset = last_scratch;
|
||||||
|
@@ -158,7 +158,7 @@ fs_visitor::register_coalesce()
|
|||||||
|
|
||||||
int src_size = 0;
|
int src_size = 0;
|
||||||
int channels_remaining = 0;
|
int channels_remaining = 0;
|
||||||
int src_reg = -1, dst_reg = -1;
|
unsigned src_reg = ~0u, dst_reg = ~0u;
|
||||||
int dst_reg_offset[MAX_VGRF_SIZE];
|
int dst_reg_offset[MAX_VGRF_SIZE];
|
||||||
fs_inst *mov[MAX_VGRF_SIZE];
|
fs_inst *mov[MAX_VGRF_SIZE];
|
||||||
int dst_var[MAX_VGRF_SIZE];
|
int dst_var[MAX_VGRF_SIZE];
|
||||||
@@ -221,7 +221,7 @@ fs_visitor::register_coalesce()
|
|||||||
if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
|
if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
|
||||||
/* Registers are out-of-order. */
|
/* Registers are out-of-order. */
|
||||||
can_coalesce = false;
|
can_coalesce = false;
|
||||||
src_reg = -1;
|
src_reg = ~0u;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,7 +231,7 @@ fs_visitor::register_coalesce()
|
|||||||
if (!can_coalesce_vars(live_intervals, cfg, inst,
|
if (!can_coalesce_vars(live_intervals, cfg, inst,
|
||||||
dst_var[i], src_var[i])) {
|
dst_var[i], src_var[i])) {
|
||||||
can_coalesce = false;
|
can_coalesce = false;
|
||||||
src_reg = -1;
|
src_reg = ~0u;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -278,7 +278,7 @@ fs_visitor::register_coalesce()
|
|||||||
MAX2(live_intervals->end[dst_var[i]],
|
MAX2(live_intervals->end[dst_var[i]],
|
||||||
live_intervals->end[src_var[i]]);
|
live_intervals->end[src_var[i]]);
|
||||||
}
|
}
|
||||||
src_reg = -1;
|
src_reg = ~0u;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (progress) {
|
if (progress) {
|
||||||
|
@@ -430,7 +430,7 @@ schedule_node::set_latency_gen7(bool is_haswell)
|
|||||||
class instruction_scheduler {
|
class instruction_scheduler {
|
||||||
public:
|
public:
|
||||||
instruction_scheduler(backend_shader *s, int grf_count,
|
instruction_scheduler(backend_shader *s, int grf_count,
|
||||||
int hw_reg_count, int block_count,
|
unsigned hw_reg_count, int block_count,
|
||||||
instruction_scheduler_mode mode)
|
instruction_scheduler_mode mode)
|
||||||
{
|
{
|
||||||
this->bs = s;
|
this->bs = s;
|
||||||
@@ -511,7 +511,7 @@ public:
|
|||||||
bool post_reg_alloc;
|
bool post_reg_alloc;
|
||||||
int instructions_to_schedule;
|
int instructions_to_schedule;
|
||||||
int grf_count;
|
int grf_count;
|
||||||
int hw_reg_count;
|
unsigned hw_reg_count;
|
||||||
int reg_pressure;
|
int reg_pressure;
|
||||||
int block_idx;
|
int block_idx;
|
||||||
exec_list instructions;
|
exec_list instructions;
|
||||||
@@ -665,7 +665,7 @@ fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
|
|||||||
int payload_last_use_ip[hw_reg_count];
|
int payload_last_use_ip[hw_reg_count];
|
||||||
v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
|
v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
|
||||||
|
|
||||||
for (int i = 0; i < hw_reg_count; i++) {
|
for (unsigned i = 0; i < hw_reg_count; i++) {
|
||||||
if (payload_last_use_ip[i] == -1)
|
if (payload_last_use_ip[i] == -1)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@@ -409,7 +409,7 @@ vec4_visitor::opt_vector_float()
|
|||||||
bool progress = false;
|
bool progress = false;
|
||||||
|
|
||||||
foreach_block(block, cfg) {
|
foreach_block(block, cfg) {
|
||||||
int last_reg = -1, last_offset = -1;
|
unsigned last_reg = ~0u, last_offset = ~0u;
|
||||||
enum brw_reg_file last_reg_file = BAD_FILE;
|
enum brw_reg_file last_reg_file = BAD_FILE;
|
||||||
|
|
||||||
uint8_t imm[4] = { 0 };
|
uint8_t imm[4] = { 0 };
|
||||||
@@ -442,7 +442,7 @@ vec4_visitor::opt_vector_float()
|
|||||||
need_type = BRW_REGISTER_TYPE_F;
|
need_type = BRW_REGISTER_TYPE_F;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
last_reg = -1;
|
last_reg = ~0u;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If this wasn't a MOV, or the destination register doesn't match,
|
/* If this wasn't a MOV, or the destination register doesn't match,
|
||||||
@@ -470,7 +470,7 @@ vec4_visitor::opt_vector_float()
|
|||||||
}
|
}
|
||||||
|
|
||||||
inst_count = 0;
|
inst_count = 0;
|
||||||
last_reg = -1;
|
last_reg = ~0u;;
|
||||||
writemask = 0;
|
writemask = 0;
|
||||||
dest_type = BRW_REGISTER_TYPE_F;
|
dest_type = BRW_REGISTER_TYPE_F;
|
||||||
|
|
||||||
@@ -1397,8 +1397,10 @@ vec4_visitor::opt_register_coalesce()
|
|||||||
* in the register instead.
|
* in the register instead.
|
||||||
*/
|
*/
|
||||||
if (to_mrf && scan_inst->mlen > 0) {
|
if (to_mrf && scan_inst->mlen > 0) {
|
||||||
if (inst->dst.nr >= scan_inst->base_mrf &&
|
unsigned start = scan_inst->base_mrf;
|
||||||
inst->dst.nr < scan_inst->base_mrf + scan_inst->mlen) {
|
unsigned end = scan_inst->base_mrf + scan_inst->mlen;
|
||||||
|
|
||||||
|
if (inst->dst.nr >= start && inst->dst.nr < end) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@@ -132,7 +132,7 @@ public:
|
|||||||
bool reg_allocate();
|
bool reg_allocate();
|
||||||
void evaluate_spill_costs(float *spill_costs, bool *no_spill);
|
void evaluate_spill_costs(float *spill_costs, bool *no_spill);
|
||||||
int choose_spill_reg(struct ra_graph *g);
|
int choose_spill_reg(struct ra_graph *g);
|
||||||
void spill_reg(int spill_reg);
|
void spill_reg(unsigned spill_reg);
|
||||||
void move_grf_array_access_to_scratch();
|
void move_grf_array_access_to_scratch();
|
||||||
void move_uniform_array_access_to_pull_constants();
|
void move_uniform_array_access_to_pull_constants();
|
||||||
void move_push_constants_to_pull_constants();
|
void move_push_constants_to_pull_constants();
|
||||||
|
@@ -502,18 +502,18 @@ vec4_visitor::choose_spill_reg(struct ra_graph *g)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vec4_visitor::spill_reg(int spill_reg_nr)
|
vec4_visitor::spill_reg(unsigned spill_reg_nr)
|
||||||
{
|
{
|
||||||
assert(alloc.sizes[spill_reg_nr] == 1 || alloc.sizes[spill_reg_nr] == 2);
|
assert(alloc.sizes[spill_reg_nr] == 1 || alloc.sizes[spill_reg_nr] == 2);
|
||||||
unsigned int spill_offset = last_scratch;
|
unsigned spill_offset = last_scratch;
|
||||||
last_scratch += alloc.sizes[spill_reg_nr];
|
last_scratch += alloc.sizes[spill_reg_nr];
|
||||||
|
|
||||||
/* Generate spill/unspill instructions for the objects being spilled. */
|
/* Generate spill/unspill instructions for the objects being spilled. */
|
||||||
int scratch_reg = -1;
|
unsigned scratch_reg = ~0u;
|
||||||
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
|
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
|
||||||
for (unsigned int i = 0; i < 3; i++) {
|
for (unsigned i = 0; i < 3; i++) {
|
||||||
if (inst->src[i].file == VGRF && inst->src[i].nr == spill_reg_nr) {
|
if (inst->src[i].file == VGRF && inst->src[i].nr == spill_reg_nr) {
|
||||||
if (scratch_reg == -1 ||
|
if (scratch_reg == ~0u ||
|
||||||
!can_use_scratch_for_source(inst, i, scratch_reg)) {
|
!can_use_scratch_for_source(inst, i, scratch_reg)) {
|
||||||
/* We need to unspill anyway so make sure we read the full vec4
|
/* We need to unspill anyway so make sure we read the full vec4
|
||||||
* in any case. This way, the cached register can be reused
|
* in any case. This way, the cached register can be reused
|
||||||
@@ -529,7 +529,7 @@ vec4_visitor::spill_reg(int spill_reg_nr)
|
|||||||
dst_reg(temp), inst->src[i], spill_offset);
|
dst_reg(temp), inst->src[i], spill_offset);
|
||||||
temp.offset = inst->src[i].offset;
|
temp.offset = inst->src[i].offset;
|
||||||
}
|
}
|
||||||
assert(scratch_reg != -1);
|
assert(scratch_reg != ~0u);
|
||||||
inst->src[i].nr = scratch_reg;
|
inst->src[i].nr = scratch_reg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1337,8 +1337,8 @@ vec4_visitor::emit_urb_slot(dst_reg reg, int varying)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static unsigned
|
||||||
align_interleaved_urb_mlen(const struct gen_device_info *devinfo, int mlen)
|
align_interleaved_urb_mlen(const struct gen_device_info *devinfo, unsigned mlen)
|
||||||
{
|
{
|
||||||
if (devinfo->gen >= 6) {
|
if (devinfo->gen >= 6) {
|
||||||
/* URB data written (does not include the message header reg) must
|
/* URB data written (does not include the message header reg) must
|
||||||
|
@@ -274,8 +274,8 @@ gen6_gs_visitor::emit_urb_write_header(int mrf)
|
|||||||
emit(GS_OPCODE_SET_DWORD_2, dst_reg(MRF, mrf), flags_data);
|
emit(GS_OPCODE_SET_DWORD_2, dst_reg(MRF, mrf), flags_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static unsigned
|
||||||
align_interleaved_urb_mlen(int mlen)
|
align_interleaved_urb_mlen(unsigned mlen)
|
||||||
{
|
{
|
||||||
/* URB data written (does not include the message header reg) must
|
/* URB data written (does not include the message header reg) must
|
||||||
* be a multiple of 256 bits, or 2 VS registers. See vol5c.5,
|
* be a multiple of 256 bits, or 2 VS registers. See vol5c.5,
|
||||||
|
Reference in New Issue
Block a user