util+treewide: container_of() cleanup
Replace mesa's slightly different container_of() with one more aligned to the linux kernel's version which takes a type as the 2nd param. This avoids warnings like: freedreno_context.c:396:44: warning: variable 'batch' is uninitialized when used within its own initialization [-Wuninitialized] At the same time, we can add additional build-time type-checking asserts Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Erik Faye-Lund <erik.faye-lund@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7941>
This commit is contained in:
@@ -96,8 +96,7 @@ struct radv_shader_output_values {
|
|||||||
static inline struct radv_shader_context *
|
static inline struct radv_shader_context *
|
||||||
radv_shader_context_from_abi(struct ac_shader_abi *abi)
|
radv_shader_context_from_abi(struct ac_shader_abi *abi)
|
||||||
{
|
{
|
||||||
struct radv_shader_context *ctx = NULL;
|
return container_of(abi, struct radv_shader_context, abi);
|
||||||
return container_of(abi, ctx, abi);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
|
static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
|
||||||
|
@@ -48,8 +48,7 @@ struct radv_shader_args {
|
|||||||
static inline struct radv_shader_args *
|
static inline struct radv_shader_args *
|
||||||
radv_shader_args_from_ac(struct ac_shader_args *args)
|
radv_shader_args_from_ac(struct ac_shader_args *args)
|
||||||
{
|
{
|
||||||
struct radv_shader_args *radv_args = NULL;
|
return container_of(args, struct radv_shader_args, ac);
|
||||||
return (struct radv_shader_args *) container_of(args, radv_args, ac);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void radv_declare_shader_args(struct radv_shader_args *args,
|
void radv_declare_shader_args(struct radv_shader_args *args,
|
||||||
|
@@ -1490,7 +1490,7 @@ qpu_set_branch_targets(struct v3d_compile *c)
|
|||||||
struct list_head *entry = block->instructions.prev;
|
struct list_head *entry = block->instructions.prev;
|
||||||
for (int i = 0; i < 3; i++)
|
for (int i = 0; i < 3; i++)
|
||||||
entry = entry->prev;
|
entry = entry->prev;
|
||||||
struct qinst *branch = container_of(entry, branch, link);
|
struct qinst *branch = container_of(entry, struct qinst, link);
|
||||||
assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
|
assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
|
||||||
|
|
||||||
/* Make sure that the if-we-don't-jump
|
/* Make sure that the if-we-don't-jump
|
||||||
|
@@ -47,9 +47,6 @@
|
|||||||
#define U642VOID(x) ((void *)(uintptr_t)(x))
|
#define U642VOID(x) ((void *)(uintptr_t)(x))
|
||||||
#define VOID2U64(x) ((uint64_t)(uintptr_t)(x))
|
#define VOID2U64(x) ((uint64_t)(uintptr_t)(x))
|
||||||
|
|
||||||
#define container_of(ptr, type, field) \
|
|
||||||
(type*)((char*)ptr - offsetof(type, field))
|
|
||||||
|
|
||||||
struct rbug_rbug
|
struct rbug_rbug
|
||||||
{
|
{
|
||||||
struct rbug_screen *rb_screen;
|
struct rbug_screen *rb_screen;
|
||||||
|
@@ -393,7 +393,7 @@ static void
|
|||||||
fd_trace_record_ts(struct u_trace *ut, struct pipe_resource *timestamps,
|
fd_trace_record_ts(struct u_trace *ut, struct pipe_resource *timestamps,
|
||||||
unsigned idx)
|
unsigned idx)
|
||||||
{
|
{
|
||||||
struct fd_batch *batch = container_of(ut, batch, trace);
|
struct fd_batch *batch = container_of(ut, struct fd_batch, trace);
|
||||||
struct fd_ringbuffer *ring = batch->nondraw ? batch->draw : batch->gmem;
|
struct fd_ringbuffer *ring = batch->nondraw ? batch->draw : batch->gmem;
|
||||||
|
|
||||||
if (ring->cur == batch->last_timestamp_cmd) {
|
if (ring->cur == batch->last_timestamp_cmd) {
|
||||||
@@ -411,7 +411,7 @@ static uint64_t
|
|||||||
fd_trace_read_ts(struct u_trace_context *utctx,
|
fd_trace_read_ts(struct u_trace_context *utctx,
|
||||||
struct pipe_resource *timestamps, unsigned idx)
|
struct pipe_resource *timestamps, unsigned idx)
|
||||||
{
|
{
|
||||||
struct fd_context *ctx = container_of(utctx, ctx, trace_context);
|
struct fd_context *ctx = container_of(utctx, struct fd_context, trace_context);
|
||||||
struct fd_bo *ts_bo = fd_resource(timestamps)->bo;
|
struct fd_bo *ts_bo = fd_resource(timestamps)->bo;
|
||||||
|
|
||||||
/* Only need to stall on results for the first entry: */
|
/* Only need to stall on results for the first entry: */
|
||||||
|
@@ -81,12 +81,12 @@ iris_lost_context_state(struct iris_batch *batch)
|
|||||||
struct iris_context *ice = NULL;
|
struct iris_context *ice = NULL;
|
||||||
|
|
||||||
if (batch->name == IRIS_BATCH_RENDER) {
|
if (batch->name == IRIS_BATCH_RENDER) {
|
||||||
ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
|
ice = container_of(batch, struct iris_context, batches[IRIS_BATCH_RENDER]);
|
||||||
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
|
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
|
||||||
|
|
||||||
batch->screen->vtbl.init_render_context(batch);
|
batch->screen->vtbl.init_render_context(batch);
|
||||||
} else if (batch->name == IRIS_BATCH_COMPUTE) {
|
} else if (batch->name == IRIS_BATCH_COMPUTE) {
|
||||||
ice = container_of(batch, ice, batches[IRIS_BATCH_COMPUTE]);
|
ice = container_of(batch, struct iris_context, batches[IRIS_BATCH_COMPUTE]);
|
||||||
assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch);
|
assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch);
|
||||||
|
|
||||||
batch->screen->vtbl.init_compute_context(batch);
|
batch->screen->vtbl.init_compute_context(batch);
|
||||||
|
@@ -812,8 +812,7 @@ iris_upload_slice_hashing_state(struct iris_batch *batch)
|
|||||||
if (subslices_delta == 0)
|
if (subslices_delta == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
struct iris_context *ice = NULL;
|
struct iris_context *ice = container_of(batch, struct iris_context, batches[IRIS_BATCH_RENDER]);
|
||||||
ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
|
|
||||||
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
|
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
|
||||||
|
|
||||||
unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
|
unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
|
||||||
|
@@ -42,7 +42,7 @@ nv30_context_kick_notify(struct nouveau_pushbuf *push)
|
|||||||
|
|
||||||
if (!push->user_priv)
|
if (!push->user_priv)
|
||||||
return;
|
return;
|
||||||
nv30 = container_of(push->user_priv, nv30, bufctx);
|
nv30 = container_of(push->user_priv, struct nv30_context, bufctx);
|
||||||
screen = &nv30->screen->base;
|
screen = &nv30->screen->base;
|
||||||
|
|
||||||
nouveau_fence_next(screen);
|
nouveau_fence_next(screen);
|
||||||
|
@@ -436,7 +436,7 @@ static void compute_memory_move_item(struct compute_memory_pool *pool,
|
|||||||
|
|
||||||
if (pool->item_list != item->link.prev) {
|
if (pool->item_list != item->link.prev) {
|
||||||
ASSERTED struct compute_memory_item *prev;
|
ASSERTED struct compute_memory_item *prev;
|
||||||
prev = container_of(item->link.prev, item, link);
|
prev = container_of(item->link.prev, struct compute_memory_item, link);
|
||||||
assert(prev->start_in_dw + prev->size_in_dw <= new_start_in_dw);
|
assert(prev->start_in_dw + prev->size_in_dw <= new_start_in_dw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -634,8 +634,7 @@ void si_query_hw_destroy(struct si_context *sctx, struct si_query *squery)
|
|||||||
|
|
||||||
static bool si_query_hw_prepare_buffer(struct si_context *sctx, struct si_query_buffer *qbuf)
|
static bool si_query_hw_prepare_buffer(struct si_context *sctx, struct si_query_buffer *qbuf)
|
||||||
{
|
{
|
||||||
static const struct si_query_hw si_query_hw_s;
|
struct si_query_hw *query = container_of(qbuf, struct si_query_hw, buffer);
|
||||||
struct si_query_hw *query = container_of(qbuf, &si_query_hw_s, buffer);
|
|
||||||
struct si_screen *screen = sctx->screen;
|
struct si_screen *screen = sctx->screen;
|
||||||
|
|
||||||
/* The caller ensures that the buffer is currently unused by the GPU. */
|
/* The caller ensures that the buffer is currently unused by the GPU. */
|
||||||
|
@@ -164,8 +164,7 @@ struct si_shader_context {
|
|||||||
|
|
||||||
static inline struct si_shader_context *si_shader_context_from_abi(struct ac_shader_abi *abi)
|
static inline struct si_shader_context *si_shader_context_from_abi(struct ac_shader_abi *abi)
|
||||||
{
|
{
|
||||||
struct si_shader_context *ctx = NULL;
|
return container_of(abi, struct si_shader_context, abi);
|
||||||
return container_of(abi, ctx, abi);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* si_shader.c */
|
/* si_shader.c */
|
||||||
|
@@ -481,7 +481,7 @@ svga_buffer_upload_gb_command(struct svga_context *svga,
|
|||||||
if (!invalidate_cmd)
|
if (!invalidate_cmd)
|
||||||
return PIPE_ERROR_OUT_OF_MEMORY;
|
return PIPE_ERROR_OUT_OF_MEMORY;
|
||||||
|
|
||||||
cicmd = container_of(invalidate_cmd, cicmd, body);
|
cicmd = container_of(invalidate_cmd, struct svga_3d_invalidate_gb_image, body);
|
||||||
cicmd->header.size = sizeof(*invalidate_cmd);
|
cicmd->header.size = sizeof(*invalidate_cmd);
|
||||||
swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL,
|
swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL,
|
||||||
sbuf->handle,
|
sbuf->handle,
|
||||||
@@ -513,7 +513,7 @@ svga_buffer_upload_gb_command(struct svga_context *svga,
|
|||||||
/* The whole_update_command is a SVGA3dCmdHeader plus the
|
/* The whole_update_command is a SVGA3dCmdHeader plus the
|
||||||
* SVGA3dCmdUpdateGBImage command.
|
* SVGA3dCmdUpdateGBImage command.
|
||||||
*/
|
*/
|
||||||
whole_update_cmd = container_of(update_cmd, whole_update_cmd, body);
|
whole_update_cmd = container_of(update_cmd, struct svga_3d_update_gb_image, body);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Init the first UPDATE_GB_IMAGE command */
|
/* Init the first UPDATE_GB_IMAGE command */
|
||||||
|
@@ -2421,8 +2421,7 @@ void vid_dec_av1_FrameDecoded(OMX_COMPONENTTYPE *comp,
|
|||||||
|
|
||||||
if (task->is_sef_task) {
|
if (task->is_sef_task) {
|
||||||
if (task->buf_ref_count == 0) {
|
if (task->buf_ref_count == 0) {
|
||||||
struct dec_av1_task *t;
|
struct dec_av1_task *t = container_of(task->buf_ref, struct dec_av1_task, buf);
|
||||||
t = container_of(task->buf_ref, t, buf);
|
|
||||||
list_del(&task->list);
|
list_del(&task->list);
|
||||||
t->buf_ref_count--;
|
t->buf_ref_count--;
|
||||||
list_del(&t->list);
|
list_del(&t->list);
|
||||||
|
@@ -625,8 +625,7 @@ bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
|
|||||||
|
|
||||||
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
||||||
{
|
{
|
||||||
struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
|
struct amdgpu_winsys_bo *bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
|
||||||
bo = container_of(entry, bo, u.slab.entry);
|
|
||||||
|
|
||||||
return amdgpu_bo_can_reclaim(&bo->base);
|
return amdgpu_bo_can_reclaim(&bo->base);
|
||||||
}
|
}
|
||||||
@@ -1032,10 +1031,9 @@ static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (!list_is_empty(&bo->u.sparse.backing)) {
|
while (!list_is_empty(&bo->u.sparse.backing)) {
|
||||||
struct amdgpu_sparse_backing *dummy = NULL;
|
|
||||||
sparse_free_backing_buffer(bo,
|
sparse_free_backing_buffer(bo,
|
||||||
container_of(bo->u.sparse.backing.next,
|
container_of(bo->u.sparse.backing.next,
|
||||||
dummy, list));
|
struct amdgpu_sparse_backing, list));
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_va_range_free(bo->u.sparse.va_handle);
|
amdgpu_va_range_free(bo->u.sparse.va_handle);
|
||||||
@@ -1332,8 +1330,7 @@ amdgpu_bo_create(struct amdgpu_winsys *ws,
|
|||||||
if (!entry)
|
if (!entry)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
bo = NULL;
|
bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
|
||||||
bo = container_of(entry, bo, u.slab.entry);
|
|
||||||
|
|
||||||
pipe_reference_init(&bo->base.reference, 1);
|
pipe_reference_init(&bo->base.reference, 1);
|
||||||
|
|
||||||
|
@@ -296,7 +296,7 @@ static void radeon_bomgr_free_va(const struct radeon_info *info,
|
|||||||
heap->start = va;
|
heap->start = va;
|
||||||
/* Delete uppermost hole if it reaches the new top */
|
/* Delete uppermost hole if it reaches the new top */
|
||||||
if (!list_is_empty(&heap->holes)) {
|
if (!list_is_empty(&heap->holes)) {
|
||||||
hole = container_of(heap->holes.next, hole, list);
|
hole = container_of(heap->holes.next, struct radeon_bo_va_hole, list);
|
||||||
if ((hole->offset + hole->size) == va) {
|
if ((hole->offset + hole->size) == va) {
|
||||||
heap->start = hole->offset;
|
heap->start = hole->offset;
|
||||||
list_del(&hole->list);
|
list_del(&hole->list);
|
||||||
@@ -306,7 +306,7 @@ static void radeon_bomgr_free_va(const struct radeon_info *info,
|
|||||||
} else {
|
} else {
|
||||||
struct radeon_bo_va_hole *next;
|
struct radeon_bo_va_hole *next;
|
||||||
|
|
||||||
hole = container_of(&heap->holes, hole, list);
|
hole = container_of(&heap->holes, struct radeon_bo_va_hole, list);
|
||||||
LIST_FOR_EACH_ENTRY(next, &heap->holes, list) {
|
LIST_FOR_EACH_ENTRY(next, &heap->holes, list) {
|
||||||
if (next->offset < va)
|
if (next->offset < va)
|
||||||
break;
|
break;
|
||||||
@@ -749,8 +749,7 @@ bool radeon_bo_can_reclaim(struct pb_buffer *_buf)
|
|||||||
|
|
||||||
bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
||||||
{
|
{
|
||||||
struct radeon_bo *bo = NULL; /* fix container_of */
|
struct radeon_bo *bo = container_of(entry, struct radeon_bo, u.slab.entry);
|
||||||
bo = container_of(entry, bo, u.slab.entry);
|
|
||||||
|
|
||||||
return radeon_bo_can_reclaim(&bo->base);
|
return radeon_bo_can_reclaim(&bo->base);
|
||||||
}
|
}
|
||||||
@@ -1044,8 +1043,7 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
|
|||||||
if (!entry)
|
if (!entry)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
bo = NULL;
|
bo = container_of(entry, struct radeon_bo, u.slab.entry);
|
||||||
bo = container_of(entry, bo, u.slab.entry);
|
|
||||||
|
|
||||||
pipe_reference_init(&bo->base.reference, 1);
|
pipe_reference_init(&bo->base.reference, 1);
|
||||||
|
|
||||||
|
@@ -58,24 +58,21 @@ vbo_context_const(const struct gl_context *ctx)
|
|||||||
static inline struct gl_context *
|
static inline struct gl_context *
|
||||||
gl_context_from_vbo_exec(struct vbo_exec_context *exec)
|
gl_context_from_vbo_exec(struct vbo_exec_context *exec)
|
||||||
{
|
{
|
||||||
struct gl_context *ctx = NULL;
|
return container_of(exec, struct gl_context, vbo_context.exec);
|
||||||
return container_of(exec, ctx, vbo_context.exec);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline const struct gl_context *
|
static inline const struct gl_context *
|
||||||
gl_context_from_vbo_exec_const(const struct vbo_exec_context *exec)
|
gl_context_from_vbo_exec_const(const struct vbo_exec_context *exec)
|
||||||
{
|
{
|
||||||
struct gl_context *ctx = NULL;
|
return container_of(exec, struct gl_context, vbo_context.exec);
|
||||||
return container_of(exec, ctx, vbo_context.exec);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline struct gl_context *
|
static inline struct gl_context *
|
||||||
gl_context_from_vbo_save(struct vbo_save_context *save)
|
gl_context_from_vbo_save(struct vbo_save_context *save)
|
||||||
{
|
{
|
||||||
struct gl_context *ctx = NULL;
|
return container_of(save, struct gl_context, vbo_context.save);
|
||||||
return container_of(save, ctx, vbo_context.save);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -166,11 +166,9 @@ static inline void list_validate(const struct list_head *list)
|
|||||||
*
|
*
|
||||||
* 'sample' MUST be initialized, or else the result is undefined!
|
* 'sample' MUST be initialized, or else the result is undefined!
|
||||||
*/
|
*/
|
||||||
#ifndef container_of
|
#define list_container_of(ptr, sample, member) \
|
||||||
#define container_of(ptr, sample, member) \
|
|
||||||
(void *)((char *)(ptr) \
|
(void *)((char *)(ptr) \
|
||||||
- ((char *)&(sample)->member - (char *)(sample)))
|
- ((char *)&(sample)->member - (char *)(sample)))
|
||||||
#endif
|
|
||||||
|
|
||||||
#define list_first_entry(ptr, type, member) \
|
#define list_first_entry(ptr, type, member) \
|
||||||
LIST_ENTRY(type, (ptr)->next, member)
|
LIST_ENTRY(type, (ptr)->next, member)
|
||||||
@@ -180,31 +178,31 @@ static inline void list_validate(const struct list_head *list)
|
|||||||
|
|
||||||
|
|
||||||
#define LIST_FOR_EACH_ENTRY(pos, head, member) \
|
#define LIST_FOR_EACH_ENTRY(pos, head, member) \
|
||||||
for (pos = NULL, pos = container_of((head)->next, pos, member); \
|
for (pos = NULL, pos = list_container_of((head)->next, pos, member); \
|
||||||
&pos->member != (head); \
|
&pos->member != (head); \
|
||||||
pos = container_of(pos->member.next, pos, member))
|
pos = list_container_of(pos->member.next, pos, member))
|
||||||
|
|
||||||
#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
|
#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
|
||||||
for (pos = NULL, pos = container_of((head)->next, pos, member), \
|
for (pos = NULL, pos = list_container_of((head)->next, pos, member), \
|
||||||
storage = container_of(pos->member.next, pos, member); \
|
storage = list_container_of(pos->member.next, pos, member); \
|
||||||
&pos->member != (head); \
|
&pos->member != (head); \
|
||||||
pos = storage, storage = container_of(storage->member.next, storage, member))
|
pos = storage, storage = list_container_of(storage->member.next, storage, member))
|
||||||
|
|
||||||
#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
|
#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
|
||||||
for (pos = NULL, pos = container_of((head)->prev, pos, member), \
|
for (pos = NULL, pos = list_container_of((head)->prev, pos, member), \
|
||||||
storage = container_of(pos->member.prev, pos, member); \
|
storage = list_container_of(pos->member.prev, pos, member); \
|
||||||
&pos->member != (head); \
|
&pos->member != (head); \
|
||||||
pos = storage, storage = container_of(storage->member.prev, storage, member))
|
pos = storage, storage = list_container_of(storage->member.prev, storage, member))
|
||||||
|
|
||||||
#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
|
#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
|
||||||
for (pos = NULL, pos = container_of((start), pos, member); \
|
for (pos = NULL, pos = list_container_of((start), pos, member); \
|
||||||
&pos->member != (head); \
|
&pos->member != (head); \
|
||||||
pos = container_of(pos->member.next, pos, member))
|
pos = list_container_of(pos->member.next, pos, member))
|
||||||
|
|
||||||
#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
|
#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
|
||||||
for (pos = NULL, pos = container_of((start), pos, member); \
|
for (pos = NULL, pos = list_container_of((start), pos, member); \
|
||||||
&pos->member != (head); \
|
&pos->member != (head); \
|
||||||
pos = container_of(pos->member.prev, pos, member))
|
pos = list_container_of(pos->member.prev, pos, member))
|
||||||
|
|
||||||
#define list_for_each_entry(type, pos, head, member) \
|
#define list_for_each_entry(type, pos, head, member) \
|
||||||
for (type *pos = LIST_ENTRY(type, (head)->next, member), \
|
for (type *pos = LIST_ENTRY(type, (head)->next, member), \
|
||||||
|
@@ -87,6 +87,27 @@
|
|||||||
# define STATIC_ASSERT(COND) do { } while (0)
|
# define STATIC_ASSERT(COND) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* container_of - cast a member of a structure out to the containing structure
|
||||||
|
* @ptr: the pointer to the member.
|
||||||
|
* @type: the type of the container struct this is embedded in.
|
||||||
|
* @member: the name of the member within the struct.
|
||||||
|
*/
|
||||||
|
#ifndef __GNUC__
|
||||||
|
/* a grown-up compiler is required for the extra type checking: */
|
||||||
|
# define container_of(ptr, type, member) \
|
||||||
|
(type*)((uint8_t *)ptr - offsetof(type, member))
|
||||||
|
#else
|
||||||
|
# define __same_type(a, b) \
|
||||||
|
__builtin_types_compatible_p(__typeof__(a), __typeof__(b))
|
||||||
|
# define container_of(ptr, type, member) ({ \
|
||||||
|
uint8_t *__mptr = (uint8_t *)(ptr); \
|
||||||
|
STATIC_ASSERT(__same_type(*(ptr), ((type *)0)->member) || \
|
||||||
|
__same_type(*(ptr), void) || \
|
||||||
|
!"pointer type mismatch in container_of()"); \
|
||||||
|
((type *)(__mptr - offsetof(type, member))); \
|
||||||
|
})
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unreachable macro. Useful for suppressing "control reaches end of non-void
|
* Unreachable macro. Useful for suppressing "control reaches end of non-void
|
||||||
|
Reference in New Issue
Block a user