intel/perf: make perf context private

Encapsulate the details of this data structure.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Mark Janes
2019-07-10 16:57:16 -07:00
parent 1f4f421ce0
commit deea3798b6
5 changed files with 136 additions and 103 deletions

View File

@@ -189,6 +189,86 @@ struct oa_sample_buf {
uint32_t last_timestamp;
};
struct gen_perf_context {
struct gen_perf_config *perf;
void * ctx; /* driver context (eg, brw_context) */
void * bufmgr;
const struct gen_device_info *devinfo;
uint32_t hw_ctx;
int drm_fd;
/* The i915 perf stream we open to setup + enable the OA counters */
int oa_stream_fd;
/* An i915 perf stream fd gives exclusive access to the OA unit that will
* report counter snapshots for a specific counter set/profile in a
* specific layout/format so we can only start OA queries that are
* compatible with the currently open fd...
*/
int current_oa_metrics_set_id;
int current_oa_format;
/* List of buffers containing OA reports */
struct exec_list sample_buffers;
/* Cached list of empty sample buffers */
struct exec_list free_sample_buffers;
int n_active_oa_queries;
int n_active_pipeline_stats_queries;
/* The number of queries depending on running OA counters which
* extends beyond brw_end_perf_query() since we need to wait until
* the last MI_RPC command has parsed by the GPU.
*
* Accurate accounting is important here as emitting an
* MI_REPORT_PERF_COUNT command while the OA unit is disabled will
* effectively hang the gpu.
*/
int n_oa_users;
/* To help catch an spurious problem with the hardware or perf
* forwarding samples, we emit each MI_REPORT_PERF_COUNT command
* with a unique ID that we can explicitly check for...
*/
int next_query_start_report_id;
/**
* An array of queries whose results haven't yet been assembled
* based on the data in buffer objects.
*
* These may be active, or have already ended. However, the
* results have not been requested.
*/
struct gen_perf_query_object **unaccumulated;
int unaccumulated_elements;
int unaccumulated_array_size;
/* The total number of query objects so we can relinquish
* our exclusive access to perf if the application deletes
* all of its objects. (NB: We only disable perf while
* there are no active queries)
*/
int n_query_instances;
};
struct gen_perf_context *
gen_perf_new_context(void *parent)
{
struct gen_perf_context *ctx = rzalloc(parent, struct gen_perf_context);
if (! ctx)
fprintf(stderr, "%s: failed to alloc context\n", __func__);
return ctx;
}
struct gen_perf_config *
gen_perf_config(struct gen_perf_context *ctx)
{
return ctx->perf;
}
struct gen_perf_query_object *
gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
{
@@ -206,6 +286,28 @@ gen_perf_new_query(struct gen_perf_context *perf_ctx, unsigned query_index)
return obj;
}
int
gen_perf_active_queries(struct gen_perf_context *perf_ctx,
const struct gen_perf_query_info *query)
{
assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
switch (query->kind) {
case GEN_PERF_QUERY_TYPE_OA:
case GEN_PERF_QUERY_TYPE_RAW:
return perf_ctx->n_active_oa_queries;
break;
case GEN_PERF_QUERY_TYPE_PIPELINE:
return perf_ctx->n_active_pipeline_stats_queries;
break;
default:
unreachable("Unknown query type");
break;
}
}
static bool
get_sysfs_dev_dir(struct gen_perf_config *perf, int fd)
{