util/u_queue: add an option to set the minimum thread priority

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák
2017-05-31 22:04:29 +02:00
parent 6f2947fa79
commit 89b6c93ae3
8 changed files with 29 additions and 8 deletions

View File

@@ -2203,7 +2203,7 @@ threaded_context_create(struct pipe_context *pipe,
* from the queue before being executed, so keep one tc_batch slot for that * from the queue before being executed, so keep one tc_batch slot for that
* execution. Also, keep one unused slot for an unflushed batch. * execution. Also, keep one unused slot for an unflushed batch.
*/ */
if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1)) if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1, 0))
goto fail; goto fail;
for (unsigned i = 0; i < TC_MAX_BATCHES; i++) { for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {

View File

@@ -276,7 +276,7 @@ batch_flush(struct fd_batch *batch)
fd_batch_reference(&tmp, batch); fd_batch_reference(&tmp, batch);
if (!util_queue_is_initialized(&batch->ctx->flush_queue)) if (!util_queue_is_initialized(&batch->ctx->flush_queue))
util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1); util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
util_queue_add_job(&batch->ctx->flush_queue, util_queue_add_job(&batch->ctx->flush_queue,
batch, &batch->flush_fence, batch, &batch->flush_fence,

View File

@@ -890,7 +890,7 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
num_compiler_threads = MIN2(num_cpus, ARRAY_SIZE(sscreen->tm)); num_compiler_threads = MIN2(num_cpus, ARRAY_SIZE(sscreen->tm));
if (!util_queue_init(&sscreen->shader_compiler_queue, "si_shader", if (!util_queue_init(&sscreen->shader_compiler_queue, "si_shader",
32, num_compiler_threads)) { 32, num_compiler_threads, 0)) {
si_destroy_shader_cache(sscreen); si_destroy_shader_cache(sscreen);
FREE(sscreen); FREE(sscreen);
return NULL; return NULL;

View File

@@ -305,7 +305,7 @@ amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
(void) mtx_init(&ws->global_bo_list_lock, mtx_plain); (void) mtx_init(&ws->global_bo_list_lock, mtx_plain);
(void) mtx_init(&ws->bo_fence_lock, mtx_plain); (void) mtx_init(&ws->bo_fence_lock, mtx_plain);
if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) { if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1, 0)) {
amdgpu_winsys_destroy(&ws->base); amdgpu_winsys_destroy(&ws->base);
mtx_unlock(&dev_tab_mutex); mtx_unlock(&dev_tab_mutex);
return NULL; return NULL;

View File

@@ -821,7 +821,7 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
ws->info.gart_page_size = sysconf(_SC_PAGESIZE); ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
if (ws->num_cpus > 1 && debug_get_option_thread()) if (ws->num_cpus > 1 && debug_get_option_thread())
util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1); util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1, 0);
/* Create the screen at the end. The winsys must be initialized /* Create the screen at the end. The winsys must be initialized
* completely. * completely.

View File

@@ -342,7 +342,7 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
* really care about getting things to disk quickly just that it's not * really care about getting things to disk quickly just that it's not
* blocking other tasks. * blocking other tasks.
*/ */
util_queue_init(&cache->cache_queue, "disk_cache", 32, 1); util_queue_init(&cache->cache_queue, "disk_cache", 32, 1, 0);
/* Create driver id keys */ /* Create driver id keys */
size_t ts_size = strlen(timestamp) + 1; size_t ts_size = strlen(timestamp) + 1;

View File

@@ -147,6 +147,21 @@ util_queue_thread_func(void *input)
u_thread_setname(name); u_thread_setname(name);
} }
if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
#if defined(__linux__)
struct sched_param sched_param = {0};
/* The nice() function can only set a maximum of 19.
* SCHED_IDLE is the same as nice = 20.
*
* Note that Linux only allows decreasing the priority. The original
* priority can't be restored.
*/
pthread_setschedparam(queue->threads[thread_index], SCHED_IDLE,
&sched_param);
#endif
}
while (1) { while (1) {
struct util_queue_job job; struct util_queue_job job;
@@ -197,13 +212,15 @@ bool
util_queue_init(struct util_queue *queue, util_queue_init(struct util_queue *queue,
const char *name, const char *name,
unsigned max_jobs, unsigned max_jobs,
unsigned num_threads) unsigned num_threads,
unsigned flags)
{ {
unsigned i; unsigned i;
memset(queue, 0, sizeof(*queue)); memset(queue, 0, sizeof(*queue));
queue->name = name; queue->name = name;
queue->num_threads = num_threads; queue->num_threads = num_threads;
queue->flags = flags;
queue->max_jobs = max_jobs; queue->max_jobs = max_jobs;
queue->jobs = (struct util_queue_job*) queue->jobs = (struct util_queue_job*)

View File

@@ -42,6 +42,8 @@
extern "C" { extern "C" {
#endif #endif
#define UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY (1 << 0)
/* Job completion fence. /* Job completion fence.
* Put this into your job structure. * Put this into your job structure.
*/ */
@@ -69,6 +71,7 @@ struct util_queue {
thrd_t *threads; thrd_t *threads;
int num_queued; int num_queued;
unsigned num_threads; unsigned num_threads;
unsigned flags;
int kill_threads; int kill_threads;
int max_jobs; int max_jobs;
int write_idx, read_idx; /* ring buffer pointers */ int write_idx, read_idx; /* ring buffer pointers */
@@ -81,7 +84,8 @@ struct util_queue {
bool util_queue_init(struct util_queue *queue, bool util_queue_init(struct util_queue *queue,
const char *name, const char *name,
unsigned max_jobs, unsigned max_jobs,
unsigned num_threads); unsigned num_threads,
unsigned flags);
void util_queue_destroy(struct util_queue *queue); void util_queue_destroy(struct util_queue *queue);
void util_queue_fence_init(struct util_queue_fence *fence); void util_queue_fence_init(struct util_queue_fence *fence);
void util_queue_fence_destroy(struct util_queue_fence *fence); void util_queue_fence_destroy(struct util_queue_fence *fence);