gallium/radeon: clean up (domain, flags) <-> (slab heap) translations

This is cleaner, and we are down to 4 slabs.

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák
2017-06-29 18:01:16 +02:00
parent b09a22ad21
commit 64e5577cac
5 changed files with 79 additions and 74 deletions

View File

@@ -658,4 +658,66 @@ static inline void radeon_emit_array(struct radeon_winsys_cs *cs,
cs->current.cdw += count;
}
enum radeon_heap {
RADEON_HEAP_VRAM,
RADEON_HEAP_VRAM_GTT, /* combined heaps */
RADEON_HEAP_GTT_WC,
RADEON_HEAP_GTT,
RADEON_MAX_SLAB_HEAPS,
};
static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
{
switch (heap) {
case RADEON_HEAP_VRAM:
return RADEON_DOMAIN_VRAM;
case RADEON_HEAP_VRAM_GTT:
return RADEON_DOMAIN_VRAM_GTT;
case RADEON_HEAP_GTT_WC:
case RADEON_HEAP_GTT:
return RADEON_DOMAIN_GTT;
default:
assert(0);
return 0;
}
}
static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
{
switch (heap) {
case RADEON_HEAP_VRAM:
case RADEON_HEAP_VRAM_GTT:
case RADEON_HEAP_GTT_WC:
return RADEON_FLAG_GTT_WC;
case RADEON_HEAP_GTT:
default:
return 0;
}
}
/* Return the heap index for winsys allocators, or -1 on failure. */
static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
{
/* VRAM implies WC (write combining) */
assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
/* Unsupported flags: NO_CPU_ACCESS, NO_SUBALLOC, SPARSE. */
if (flags & ~RADEON_FLAG_GTT_WC)
return -1;
switch (domain) {
case RADEON_DOMAIN_VRAM:
return RADEON_HEAP_VRAM;
case RADEON_DOMAIN_VRAM_GTT:
return RADEON_HEAP_VRAM_GTT;
case RADEON_DOMAIN_GTT:
if (flags & RADEON_FLAG_GTT_WC)
return RADEON_HEAP_GTT_WC;
else
return RADEON_HEAP_GTT;
}
return -1;
}
#endif

View File

@@ -495,29 +495,13 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
{
struct amdgpu_winsys *ws = priv;
struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
enum radeon_bo_domain domains;
enum radeon_bo_flag flags = 0;
enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
uint32_t base_id;
if (!slab)
return NULL;
if (heap & 1)
flags |= RADEON_FLAG_GTT_WC;
switch (heap >> 2) {
case 0:
domains = RADEON_DOMAIN_VRAM;
break;
default:
case 1:
domains = RADEON_DOMAIN_VRAM_GTT;
break;
case 2:
domains = RADEON_DOMAIN_GTT;
break;
}
slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
64 * 1024, 64 * 1024,
domains, flags));
@@ -1151,32 +1135,19 @@ amdgpu_bo_create(struct radeon_winsys *rws,
struct amdgpu_winsys_bo *bo;
unsigned usage = 0, pb_cache_bucket;
/* VRAM implies WC. This is not optional. */
assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
/* Sub-allocate small buffers from slabs. */
if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
struct pb_slab_entry *entry;
unsigned heap = 0;
int heap = radeon_get_heap_index(domain, flags);
if (flags & RADEON_FLAG_GTT_WC)
heap |= 1;
if (flags & ~RADEON_FLAG_GTT_WC)
if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
goto no_slab;
switch (domain) {
case RADEON_DOMAIN_VRAM:
heap |= 0 * 4;
break;
case RADEON_DOMAIN_VRAM_GTT:
heap |= 1 * 4;
break;
case RADEON_DOMAIN_GTT:
heap |= 2 * 4;
break;
default:
goto no_slab;
}
entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
if (!entry) {
/* Clear the cache and try again. */

View File

@@ -287,7 +287,7 @@ amdgpu_winsys_create(int fd, unsigned flags,
if (!pb_slabs_init(&ws->bo_slabs,
AMDGPU_SLAB_MIN_SIZE_LOG2, AMDGPU_SLAB_MAX_SIZE_LOG2,
12, /* number of heaps (domain/flags combinations) */
RADEON_MAX_SLAB_HEAPS,
ws,
amdgpu_bo_can_reclaim_slab,
amdgpu_bo_slab_alloc,

View File

@@ -729,29 +729,13 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
{
struct radeon_drm_winsys *ws = priv;
struct radeon_slab *slab = CALLOC_STRUCT(radeon_slab);
enum radeon_bo_domain domains;
enum radeon_bo_flag flags = 0;
enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
unsigned base_hash;
if (!slab)
return NULL;
if (heap & 1)
flags |= RADEON_FLAG_GTT_WC;
switch (heap >> 2) {
case 0:
domains = RADEON_DOMAIN_VRAM;
break;
default:
case 1:
domains = RADEON_DOMAIN_VRAM_GTT;
break;
case 2:
domains = RADEON_DOMAIN_GTT;
break;
}
slab->buffer = radeon_bo(radeon_winsys_bo_create(&ws->base,
64 * 1024, 64 * 1024,
domains, flags));
@@ -938,33 +922,21 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
if (size > UINT_MAX)
return NULL;
/* VRAM implies WC. This is not optional. */
if (domain & RADEON_DOMAIN_VRAM)
flags |= RADEON_FLAG_GTT_WC;
/* Sub-allocate small buffers from slabs. */
if (!(flags & RADEON_FLAG_NO_SUBALLOC) &&
size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&
ws->info.has_virtual_memory &&
alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
struct pb_slab_entry *entry;
unsigned heap = 0;
int heap = radeon_get_heap_index(domain, flags);
if (flags & RADEON_FLAG_GTT_WC)
heap |= 1;
if (flags & ~RADEON_FLAG_GTT_WC)
if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
goto no_slab;
switch (domain) {
case RADEON_DOMAIN_VRAM:
heap |= 0 * 4;
break;
case RADEON_DOMAIN_VRAM_GTT:
heap |= 1 * 4;
break;
case RADEON_DOMAIN_GTT:
heap |= 2 * 4;
break;
default:
goto no_slab;
}
entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
if (!entry) {
/* Clear the cache and try again. */

View File

@@ -776,7 +776,7 @@ radeon_drm_winsys_create(int fd, unsigned flags,
*/
if (!pb_slabs_init(&ws->bo_slabs,
RADEON_SLAB_MIN_SIZE_LOG2, RADEON_SLAB_MAX_SIZE_LOG2,
12,
RADEON_MAX_SLAB_HEAPS,
ws,
radeon_bo_can_reclaim_slab,
radeon_bo_slab_alloc,