freedreno: Drop foreach_bit() macro
Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-By: Mike Blumenkrantz <michael.blumenkrantz@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9191>
This commit is contained in:
@@ -133,7 +133,7 @@ fd5_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
|
|||||||
fd5_emit_cs_state(ctx, ring, v);
|
fd5_emit_cs_state(ctx, ring, v);
|
||||||
fd5_emit_cs_consts(v, ring, ctx, info);
|
fd5_emit_cs_consts(v, ring, ctx, info);
|
||||||
|
|
||||||
foreach_bit(i, ctx->global_bindings.enabled_mask)
|
u_foreach_bit(i, ctx->global_bindings.enabled_mask)
|
||||||
nglobal++;
|
nglobal++;
|
||||||
|
|
||||||
if (nglobal > 0) {
|
if (nglobal > 0) {
|
||||||
@@ -144,7 +144,7 @@ fd5_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
|
|||||||
* payload:
|
* payload:
|
||||||
*/
|
*/
|
||||||
OUT_PKT7(ring, CP_NOP, 2 * nglobal);
|
OUT_PKT7(ring, CP_NOP, 2 * nglobal);
|
||||||
foreach_bit(i, ctx->global_bindings.enabled_mask) {
|
u_foreach_bit(i, ctx->global_bindings.enabled_mask) {
|
||||||
struct pipe_resource *prsc = ctx->global_bindings.buf[i];
|
struct pipe_resource *prsc = ctx->global_bindings.buf[i];
|
||||||
OUT_RELOC(ring, fd_resource(prsc)->bo, 0, 0, 0);
|
OUT_RELOC(ring, fd_resource(prsc)->bo, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
@@ -120,7 +120,7 @@ fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
|
|||||||
fd6_emit_cs_state(ctx, ring, v);
|
fd6_emit_cs_state(ctx, ring, v);
|
||||||
fd6_emit_cs_consts(v, ring, ctx, info);
|
fd6_emit_cs_consts(v, ring, ctx, info);
|
||||||
|
|
||||||
foreach_bit(i, ctx->global_bindings.enabled_mask)
|
u_foreach_bit(i, ctx->global_bindings.enabled_mask)
|
||||||
nglobal++;
|
nglobal++;
|
||||||
|
|
||||||
if (nglobal > 0) {
|
if (nglobal > 0) {
|
||||||
@@ -131,7 +131,7 @@ fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
|
|||||||
* payload:
|
* payload:
|
||||||
*/
|
*/
|
||||||
OUT_PKT7(ring, CP_NOP, 2 * nglobal);
|
OUT_PKT7(ring, CP_NOP, 2 * nglobal);
|
||||||
foreach_bit(i, ctx->global_bindings.enabled_mask) {
|
u_foreach_bit(i, ctx->global_bindings.enabled_mask) {
|
||||||
struct pipe_resource *prsc = ctx->global_bindings.buf[i];
|
struct pipe_resource *prsc = ctx->global_bindings.buf[i];
|
||||||
OUT_RELOC(ring, fd_resource(prsc)->bo, 0, 0, 0);
|
OUT_RELOC(ring, fd_resource(prsc)->bo, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
@@ -531,7 +531,7 @@ fd6_clear(struct fd_context *ctx, unsigned buffers,
|
|||||||
if (ctx->batch->num_draws > 0)
|
if (ctx->batch->num_draws > 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
foreach_bit(i, color_buffers)
|
u_foreach_bit(i, color_buffers)
|
||||||
ctx->batch->clear_color[i] = *color;
|
ctx->batch->clear_color[i] = *color;
|
||||||
if (buffers & PIPE_CLEAR_DEPTH)
|
if (buffers & PIPE_CLEAR_DEPTH)
|
||||||
ctx->batch->clear_depth = depth;
|
ctx->batch->clear_depth = depth;
|
||||||
|
@@ -131,15 +131,15 @@ batch_draw_tracking_for_dirty_bits(struct fd_batch *batch)
|
|||||||
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) {
|
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) {
|
||||||
const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_FRAGMENT];
|
const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_FRAGMENT];
|
||||||
|
|
||||||
foreach_bit (i, so->enabled_mask & so->writable_mask)
|
u_foreach_bit (i, so->enabled_mask & so->writable_mask)
|
||||||
resource_written(batch, so->sb[i].buffer);
|
resource_written(batch, so->sb[i].buffer);
|
||||||
|
|
||||||
foreach_bit (i, so->enabled_mask & ~so->writable_mask)
|
u_foreach_bit (i, so->enabled_mask & ~so->writable_mask)
|
||||||
resource_read(batch, so->sb[i].buffer);
|
resource_read(batch, so->sb[i].buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) {
|
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) {
|
||||||
foreach_bit (i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
|
u_foreach_bit (i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
|
||||||
struct pipe_image_view *img =
|
struct pipe_image_view *img =
|
||||||
&ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
|
&ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
|
||||||
if (img->access & PIPE_IMAGE_ACCESS_WRITE)
|
if (img->access & PIPE_IMAGE_ACCESS_WRITE)
|
||||||
@@ -150,18 +150,18 @@ batch_draw_tracking_for_dirty_bits(struct fd_batch *batch)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) {
|
if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) {
|
||||||
foreach_bit (i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
|
u_foreach_bit (i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
|
||||||
resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
|
resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) {
|
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) {
|
||||||
foreach_bit (i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
|
u_foreach_bit (i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
|
||||||
resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
|
resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mark VBOs as being read */
|
/* Mark VBOs as being read */
|
||||||
if (ctx->dirty & FD_DIRTY_VTXBUF) {
|
if (ctx->dirty & FD_DIRTY_VTXBUF) {
|
||||||
foreach_bit (i, ctx->vtx.vertexbuf.enabled_mask) {
|
u_foreach_bit (i, ctx->vtx.vertexbuf.enabled_mask) {
|
||||||
assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
|
assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
|
||||||
resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
|
resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
|
||||||
}
|
}
|
||||||
@@ -169,12 +169,12 @@ batch_draw_tracking_for_dirty_bits(struct fd_batch *batch)
|
|||||||
|
|
||||||
/* Mark textures as being read */
|
/* Mark textures as being read */
|
||||||
if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
|
if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
|
||||||
foreach_bit (i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
|
u_foreach_bit (i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
|
||||||
resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
|
resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
|
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
|
||||||
foreach_bit (i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
|
u_foreach_bit (i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
|
||||||
resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
|
resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -535,13 +535,13 @@ fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
|
|||||||
fd_screen_lock(ctx->screen);
|
fd_screen_lock(ctx->screen);
|
||||||
|
|
||||||
/* Mark SSBOs */
|
/* Mark SSBOs */
|
||||||
foreach_bit (i, so->enabled_mask & so->writable_mask)
|
u_foreach_bit (i, so->enabled_mask & so->writable_mask)
|
||||||
resource_written(batch, so->sb[i].buffer);
|
resource_written(batch, so->sb[i].buffer);
|
||||||
|
|
||||||
foreach_bit (i, so->enabled_mask & ~so->writable_mask)
|
u_foreach_bit (i, so->enabled_mask & ~so->writable_mask)
|
||||||
resource_read(batch, so->sb[i].buffer);
|
resource_read(batch, so->sb[i].buffer);
|
||||||
|
|
||||||
foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
|
u_foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
|
||||||
struct pipe_image_view *img =
|
struct pipe_image_view *img =
|
||||||
&ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
|
&ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
|
||||||
if (img->access & PIPE_IMAGE_ACCESS_WRITE)
|
if (img->access & PIPE_IMAGE_ACCESS_WRITE)
|
||||||
@@ -551,17 +551,17 @@ fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* UBO's are read */
|
/* UBO's are read */
|
||||||
foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
|
u_foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
|
||||||
resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
|
resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
|
||||||
|
|
||||||
/* Mark textures as being read */
|
/* Mark textures as being read */
|
||||||
foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
|
u_foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
|
||||||
resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
|
resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
|
||||||
|
|
||||||
/* For global buffers, we don't really know if read or written, so assume
|
/* For global buffers, we don't really know if read or written, so assume
|
||||||
* the worst:
|
* the worst:
|
||||||
*/
|
*/
|
||||||
foreach_bit(i, ctx->global_bindings.enabled_mask)
|
u_foreach_bit(i, ctx->global_bindings.enabled_mask)
|
||||||
resource_written(batch, ctx->global_bindings.buf[i]);
|
resource_written(batch, ctx->global_bindings.buf[i]);
|
||||||
|
|
||||||
if (info->indirect)
|
if (info->indirect)
|
||||||
|
@@ -378,10 +378,6 @@ pack_rgba(enum pipe_format format, const float *rgba)
|
|||||||
#define swap(a, b) \
|
#define swap(a, b) \
|
||||||
do { __typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
do { __typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
||||||
|
|
||||||
#define foreach_bit(b, mask) \
|
|
||||||
for (uint32_t _m = (mask), b; _m && ({(b) = u_bit_scan(&_m); (void)(b); 1;});)
|
|
||||||
|
|
||||||
|
|
||||||
#define BIT(bit) (1u << bit)
|
#define BIT(bit) (1u << bit)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Reference in New Issue
Block a user