ilo: replace cp hooks by cp owner and flush callback

The problem with cp hooks is that when we switch from 3D ring to 2D ring, and
when there are active queries, we will emit 3D commands to 2D ring because
the new-batch hook is called.

This commit introduces the idea of cp owner.  When the cp is flushed, or when
another owner takes place, the current owner is notified, giving it a chance
to emit whatever commands there need to be.  With this mechanism, we can
resume queries when the 3D pipeline owns the cp, and pause queries when it
loses the cp.  Ring switch will just work.

As we still need to know when the cp bo is reallocated, a flush callback is
added.
This commit is contained in:
Chia-I Wu
2013-05-20 12:13:34 +08:00
parent a04d8574c6
commit 0d42a9e941
6 changed files with 284 additions and 288 deletions

View File

@@ -35,111 +35,6 @@
#include "ilo_state.h"
#include "ilo_3d.h"
/**
* Begin a query.
*/
void
ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
{
struct ilo_3d *hw3d = ilo->hw3d;
ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
/* XXX we should check the aperture size */
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
list_add(&q->list, &hw3d->occlusion_queries);
}
break;
case PIPE_QUERY_TIMESTAMP:
/* nop */
break;
case PIPE_QUERY_TIME_ELAPSED:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
ilo_cp_reserve_for_pre_flush(hw3d->cp, q->reg_cmd_size);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
/* XXX we should check the aperture size */
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
list_add(&q->list, &hw3d->time_elapsed_queries);
}
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
q->data.u64 = 0;
list_add(&q->list, &hw3d->prim_generated_queries);
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
q->data.u64 = 0;
list_add(&q->list, &hw3d->prim_emitted_queries);
break;
default:
assert(!"unknown query type");
break;
}
}
/**
* End a query.
*/
void
ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
{
struct ilo_3d *hw3d = ilo->hw3d;
ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
list_del(&q->list);
assert(q->reg_read < q->reg_total);
ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
break;
case PIPE_QUERY_TIMESTAMP:
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
}
break;
case PIPE_QUERY_TIME_ELAPSED:
list_del(&q->list);
assert(q->reg_read < q->reg_total);
ilo_cp_reserve_for_pre_flush(hw3d->cp, -q->reg_cmd_size);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
list_del(&q->list);
break;
default:
assert(!"unknown query type");
break;
}
}
static void
process_query_for_occlusion_counter(struct ilo_3d *hw3d,
struct ilo_query *q)
@@ -206,6 +101,178 @@ process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
q->reg_read = 0;
}
static void
ilo_3d_resume_queries(struct ilo_3d *hw3d)
{
struct ilo_query *q;
/* resume occlusion queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
/* accumulate the result if the bo is alreay full */
if (q->reg_read >= q->reg_total)
process_query_for_occlusion_counter(hw3d, q);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
}
/* resume timer queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
/* accumulate the result if the bo is alreay full */
if (q->reg_read >= q->reg_total)
process_query_for_time_elapsed(hw3d, q);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
}
}
static void
ilo_3d_pause_queries(struct ilo_3d *hw3d)
{
struct ilo_query *q;
/* pause occlusion queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
assert(q->reg_read < q->reg_total);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
}
/* pause timer queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
assert(q->reg_read < q->reg_total);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
}
}
static void
ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
{
struct ilo_3d *hw3d = data;
ilo_3d_pause_queries(hw3d);
}
static void
ilo_3d_own_render_ring(struct ilo_3d *hw3d)
{
ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
ilo_3d_resume_queries(hw3d);
}
/**
* Begin a query.
*/
void
ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
{
struct ilo_3d *hw3d = ilo->hw3d;
ilo_3d_own_render_ring(hw3d);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
hw3d->owner_reserve += q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
/* XXX we should check the aperture size */
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
list_add(&q->list, &hw3d->occlusion_queries);
}
break;
case PIPE_QUERY_TIMESTAMP:
/* nop */
break;
case PIPE_QUERY_TIME_ELAPSED:
/* reserve some space for pausing the query */
q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
hw3d->owner_reserve += q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
/* XXX we should check the aperture size */
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
list_add(&q->list, &hw3d->time_elapsed_queries);
}
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
q->data.u64 = 0;
list_add(&q->list, &hw3d->prim_generated_queries);
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
q->data.u64 = 0;
list_add(&q->list, &hw3d->prim_emitted_queries);
break;
default:
assert(!"unknown query type");
break;
}
}
/**
* End a query.
*/
void
ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
{
struct ilo_3d *hw3d = ilo->hw3d;
ilo_3d_own_render_ring(hw3d);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
list_del(&q->list);
assert(q->reg_read < q->reg_total);
hw3d->owner_reserve -= q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
break;
case PIPE_QUERY_TIMESTAMP:
q->data.u64 = 0;
if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
}
break;
case PIPE_QUERY_TIME_ELAPSED:
list_del(&q->list);
assert(q->reg_read < q->reg_total);
hw3d->owner_reserve -= q->reg_cmd_size;
ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
list_del(&q->list);
break;
default:
assert(!"unknown query type");
break;
}
}
/**
* Process the raw query data.
*/
@@ -240,11 +307,10 @@ ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
* Hook for CP new-batch.
*/
void
ilo_3d_new_cp_batch(struct ilo_3d *hw3d)
ilo_3d_cp_flushed(struct ilo_3d *hw3d)
{
struct ilo_query *q;
hw3d->new_batch = true;
if (ilo_debug & ILO_DEBUG_3D)
ilo_3d_pipeline_dump(hw3d->pipeline);
/* invalidate the pipeline */
ilo_3d_pipeline_invalidate(hw3d->pipeline,
@@ -255,58 +321,7 @@ ilo_3d_new_cp_batch(struct ilo_3d *hw3d)
ILO_3D_PIPELINE_INVALIDATE_HW);
}
/* resume occlusion queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
/* accumulate the result if the bo is alreay full */
if (q->reg_read >= q->reg_total)
process_query_for_occlusion_counter(hw3d, q);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
}
/* resume timer queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
/* accumulate the result if the bo is alreay full */
if (q->reg_read >= q->reg_total)
process_query_for_time_elapsed(hw3d, q);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
}
}
/**
* Hook for CP pre-flush.
*/
void
ilo_3d_pre_cp_flush(struct ilo_3d *hw3d)
{
struct ilo_query *q;
/* pause occlusion queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
assert(q->reg_read < q->reg_total);
ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
q->bo, q->reg_read++);
}
/* pause timer queries */
LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
assert(q->reg_read < q->reg_total);
ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
q->bo, q->reg_read++);
}
}
/**
* Hook for CP post-flush
*/
void
ilo_3d_post_cp_flush(struct ilo_3d *hw3d)
{
if (ilo_debug & ILO_DEBUG_3D)
ilo_3d_pipeline_dump(hw3d->pipeline);
hw3d->new_batch = true;
}
/**
@@ -322,6 +337,9 @@ ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
return NULL;
hw3d->cp = cp;
hw3d->owner.release_callback = ilo_3d_release_render_ring;
hw3d->owner.release_data = hw3d;
hw3d->new_batch = true;
list_inithead(&hw3d->occlusion_queries);
@@ -356,7 +374,7 @@ draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
bool need_flush;
int max_len;
ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
ilo_3d_own_render_ring(hw3d);
/*
* Without a better tracking mechanism, when the framebuffer changes, we

View File

@@ -29,10 +29,10 @@
#define ILO_3D_H
#include "ilo_common.h"
#include "ilo_cp.h"
struct ilo_3d_pipeline;
struct ilo_context;
struct ilo_cp;
struct ilo_query;
/**
@@ -40,6 +40,8 @@ struct ilo_query;
*/
struct ilo_3d {
struct ilo_cp *cp;
struct ilo_cp_owner owner;
int owner_reserve;
bool new_batch;
uint32_t shader_cache_seqno;
@@ -64,13 +66,7 @@ void
ilo_3d_destroy(struct ilo_3d *hw3d);
void
ilo_3d_new_cp_batch(struct ilo_3d *hw3d);
void
ilo_3d_pre_cp_flush(struct ilo_3d *hw3d);
void
ilo_3d_post_cp_flush(struct ilo_3d *hw3d);
ilo_3d_cp_flushed(struct ilo_3d *hw3d);
void
ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q);

View File

@@ -88,14 +88,15 @@ blitter_xy_color_blt(struct pipe_context *pipe,
stride /= 4;
}
ilo_cp_set_ring(ilo->cp, ILO_CP_RING_BLT);
ilo_cp_set_owner(ilo->cp, NULL, 0);
/* make room if necessary */
bo_check[0] = ilo->cp->bo;
bo_check[1] = tex->bo;
if (ilo->winsys->check_aperture_space(ilo->winsys, bo_check, 2))
ilo_cp_flush(ilo->cp);
ilo_cp_set_ring(ilo->cp, ILO_CP_RING_BLT);
ilo_cp_begin(ilo->cp, cmd_len);
ilo_cp_write(ilo->cp, cmd);
ilo_cp_write(ilo->cp, br13 | stride);

View File

@@ -42,25 +42,7 @@
#include "ilo_context.h"
static void
ilo_context_new_cp_batch(struct ilo_cp *cp, void *data)
{
struct ilo_context *ilo = ilo_context(data);
if (cp->ring == ILO_CP_RING_RENDER)
ilo_3d_new_cp_batch(ilo->hw3d);
}
static void
ilo_context_pre_cp_flush(struct ilo_cp *cp, void *data)
{
struct ilo_context *ilo = ilo_context(data);
if (cp->ring == ILO_CP_RING_RENDER)
ilo_3d_pre_cp_flush(ilo->hw3d);
}
static void
ilo_context_post_cp_flush(struct ilo_cp *cp, void *data)
ilo_context_cp_flushed(struct ilo_cp *cp, void *data)
{
struct ilo_context *ilo = ilo_context(data);
@@ -71,8 +53,7 @@ ilo_context_post_cp_flush(struct ilo_cp *cp, void *data)
ilo->last_cp_bo = cp->bo;
ilo->last_cp_bo->reference(ilo->last_cp_bo);
if (cp->ring == ILO_CP_RING_RENDER)
ilo_3d_post_cp_flush(ilo->hw3d);
ilo_3d_cp_flushed(ilo->hw3d);
}
static void
@@ -148,12 +129,8 @@ ilo_context_create(struct pipe_screen *screen, void *priv)
return NULL;
}
ilo_cp_set_hook(ilo->cp, ILO_CP_HOOK_NEW_BATCH,
ilo_context_new_cp_batch, (void *) ilo);
ilo_cp_set_hook(ilo->cp, ILO_CP_HOOK_PRE_FLUSH,
ilo_context_pre_cp_flush, (void *) ilo);
ilo_cp_set_hook(ilo->cp, ILO_CP_HOOK_POST_FLUSH,
ilo_context_post_cp_flush, (void *) ilo);
ilo_cp_set_flush_callback(ilo->cp,
ilo_context_cp_flushed, (void *) ilo);
ilo->dirty = ILO_DIRTY_ALL;

View File

@@ -34,8 +34,8 @@
static const int ilo_cp_private = 2;
/**
* Dump the contents of the parser bo. This must be called in a post-flush
* hook.
* Dump the contents of the parser bo. This can only be called in the flush
* callback.
*/
void
ilo_cp_dump(struct ilo_cp *cp)
@@ -94,10 +94,10 @@ ilo_cp_clear_buffer(struct ilo_cp *cp)
/*
* Recalculate cp->size. This is needed not only because cp->stolen is
* reset above, but also that we added cp->reserve_for_pre_flush and
* ilo_cp_private to cp->size in ilo_cp_flush().
* reset above, but also that ilo_cp_private are added to cp->size in
* ilo_cp_end_buffer().
*/
cp->size = cp->bo_size - (cp->reserve_for_pre_flush + ilo_cp_private);
cp->size = cp->bo_size - ilo_cp_private;
}
/**
@@ -131,6 +131,11 @@ ilo_cp_upload_buffer(struct ilo_cp *cp)
{
int err;
if (!cp->sys) {
cp->bo->unmap(cp->bo);
return 0;
}
err = cp->bo->pwrite(cp->bo, 0, cp->used * 4, cp->ptr);
if (likely(!err && cp->stolen)) {
const int offset = cp->bo_size - cp->stolen;
@@ -156,8 +161,11 @@ ilo_cp_realloc_bo(struct ilo_cp *cp)
*/
bo = cp->winsys->alloc_buffer(cp->winsys,
"batch buffer", cp->bo_size * 4, 0);
if (unlikely(!bo))
return;
if (unlikely(!bo)) {
/* reuse the old one */
bo = cp->bo;
bo->reference(bo);
}
if (cp->bo)
cp->bo->unreference(cp->bo);
@@ -207,67 +215,39 @@ ilo_cp_exec_bo(struct ilo_cp *cp)
return err;
}
static void
ilo_cp_call_hook(struct ilo_cp *cp, enum ilo_cp_hook hook)
{
const bool no_implicit_flush = cp->no_implicit_flush;
if (!cp->hooks[hook].func)
return;
/* no implicit flush in hooks */
cp->no_implicit_flush = true;
cp->hooks[hook].func(cp, cp->hooks[hook].data);
cp->no_implicit_flush = no_implicit_flush;
}
/**
* Flush the command parser and execute the commands. When the parser buffer
* is empty, the hooks are not invoked.
* is empty, the callback is not invoked.
*/
void
ilo_cp_flush(struct ilo_cp *cp)
{
int err;
ilo_cp_set_owner(cp, NULL, 0);
/* sanity check */
assert(cp->bo_size == cp->size +
cp->reserve_for_pre_flush + ilo_cp_private + cp->stolen);
assert(cp->bo_size == cp->size + cp->stolen + ilo_cp_private);
if (!cp->used) {
/* return the space stolen and etc. */
ilo_cp_clear_buffer(cp);
return;
}
/* make the reserved space available temporarily */
cp->size += cp->reserve_for_pre_flush;
ilo_cp_call_hook(cp, ILO_CP_HOOK_PRE_FLUSH);
ilo_cp_end_buffer(cp);
if (cp->sys) {
err = ilo_cp_upload_buffer(cp);
if (likely(!err))
err = ilo_cp_exec_bo(cp);
}
else {
cp->bo->unmap(cp->bo);
/* upload and execute */
err = ilo_cp_upload_buffer(cp);
if (likely(!err))
err = ilo_cp_exec_bo(cp);
}
if (likely(!err)) {
ilo_cp_call_hook(cp, ILO_CP_HOOK_POST_FLUSH);
ilo_cp_clear_buffer(cp);
}
else {
/* reset first so that post-flush hook knows nothing was executed */
ilo_cp_clear_buffer(cp);
ilo_cp_call_hook(cp, ILO_CP_HOOK_POST_FLUSH);
}
if (likely(!err && cp->flush_callback))
cp->flush_callback(cp, cp->flush_callback_data);
ilo_cp_clear_buffer(cp);
ilo_cp_realloc_bo(cp);
ilo_cp_call_hook(cp, ILO_CP_HOOK_NEW_BATCH);
}
/**
@@ -302,9 +282,6 @@ ilo_cp_create(struct intel_winsys *winsys, bool direct_map)
cp->ring = ILO_CP_RING_RENDER;
cp->no_implicit_flush = false;
cp->reserve_for_pre_flush = 0;
memset(cp->hooks, 0, sizeof(cp->hooks));
cp->bo_size = 8192;

View File

@@ -41,16 +41,13 @@ enum ilo_cp_ring {
ILO_CP_RING_COUNT,
};
enum ilo_cp_hook {
ILO_CP_HOOK_NEW_BATCH,
ILO_CP_HOOK_PRE_FLUSH,
ILO_CP_HOOK_POST_FLUSH,
typedef void (*ilo_cp_callback)(struct ilo_cp *cp, void *data);
ILO_CP_HOOK_COUNT,
struct ilo_cp_owner {
ilo_cp_callback release_callback;
void *release_data;
};
typedef void (*ilo_cp_hook_func)(struct ilo_cp *cp, void *data);
/**
* Command parser.
*/
@@ -58,16 +55,16 @@ struct ilo_cp {
struct intel_winsys *winsys;
struct intel_context *render_ctx;
ilo_cp_callback flush_callback;
void *flush_callback_data;
const struct ilo_cp_owner *owner;
int owner_reserve;
enum ilo_cp_ring ring;
bool no_implicit_flush;
int reserve_for_pre_flush;
unsigned one_off_flags;
struct {
ilo_cp_hook_func func;
void *data;
} hooks[ILO_CP_HOOK_COUNT];
int bo_size;
struct intel_bo *bo;
uint32_t *sys;
@@ -159,26 +156,6 @@ ilo_cp_assert_no_implicit_flush(struct ilo_cp *cp, bool enable)
cp->no_implicit_flush = enable;
}
/**
* Reserve the given size of space from the parser buffer. The reserved space
* will be made available temporarily for the pre-flush hook.
*
* \param reserve size in dwords to reserve. It may be negative.
*/
static inline void
ilo_cp_reserve_for_pre_flush(struct ilo_cp *cp, int reserve)
{
assert(cp->reserve_for_pre_flush + reserve >= 0);
if (cp->used > cp->size - reserve) {
ilo_cp_implicit_flush(cp);
assert(cp->used <= cp->size - reserve);
}
cp->size -= reserve;
cp->reserve_for_pre_flush += reserve;
}
/**
* Set one-off flags. They will be cleared after flushing.
*/
@@ -188,16 +165,66 @@ ilo_cp_set_one_off_flags(struct ilo_cp *cp, unsigned flags)
cp->one_off_flags |= flags;
}
/**
* Set a command parser hook.
* Set flush callback. The callback is invoked after the bo has been
* successfully executed, and before the bo is reallocated.
*/
static inline void
ilo_cp_set_hook(struct ilo_cp *cp, enum ilo_cp_hook hook,
ilo_cp_hook_func func, void *data)
ilo_cp_set_flush_callback(struct ilo_cp *cp, ilo_cp_callback callback,
void *data)
{
cp->hooks[hook].func = func;
cp->hooks[hook].data = data;
cp->flush_callback = callback;
cp->flush_callback_data = data;
}
/**
* Set the parser owner. If this is a new owner, the previous owner is
* notified and the space it reserved is reclaimed.
*
* \return true if this is a new owner
*/
static inline bool
ilo_cp_set_owner(struct ilo_cp *cp, const struct ilo_cp_owner *owner,
int reserve)
{
const bool new_owner = (cp->owner != owner);
/* release current owner */
if (new_owner && cp->owner) {
const bool no_implicit_flush = cp->no_implicit_flush;
/* reclaim the reserved space */
cp->size += cp->owner_reserve;
cp->owner_reserve = 0;
/* invoke the release callback */
cp->no_implicit_flush = true;
cp->owner->release_callback(cp, cp->owner->release_data);
cp->no_implicit_flush = no_implicit_flush;
cp->owner = NULL;
}
if (cp->owner_reserve != reserve) {
const int extra = reserve - cp->owner_reserve;
if (cp->used > cp->size - extra) {
ilo_cp_implicit_flush(cp);
assert(cp->used <= cp->size - reserve);
cp->size -= reserve;
cp->owner_reserve = reserve;
}
else {
cp->size -= extra;
cp->owner_reserve += extra;
}
}
/* set owner last because of the possible flush above */
cp->owner = owner;
return new_owner;
}
/**