anv: Move multialloc to common code
Reviewed-by: Eric Anholt <eric@anholt.net> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8857>
This commit is contained in:

committed by
Marge Bot

parent
2523c47720
commit
145444d265
@@ -380,13 +380,13 @@ VkResult anv_CreateDescriptorSetLayout(
|
||||
* with DEVICE scope because they are reference counted and may not be
|
||||
* destroyed when vkDestroyDescriptorSetLayout is called.
|
||||
*/
|
||||
ANV_MULTIALLOC(ma);
|
||||
anv_multialloc_add(&ma, &set_layout, 1);
|
||||
anv_multialloc_add(&ma, &bindings, max_binding + 1);
|
||||
anv_multialloc_add(&ma, &samplers, immutable_sampler_count);
|
||||
VK_MULTIALLOC(ma);
|
||||
vk_multialloc_add(&ma, &set_layout, 1);
|
||||
vk_multialloc_add(&ma, &bindings, max_binding + 1);
|
||||
vk_multialloc_add(&ma, &samplers, immutable_sampler_count);
|
||||
|
||||
if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
|
||||
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
memset(set_layout, 0, sizeof(*set_layout));
|
||||
|
@@ -247,11 +247,11 @@ VkResult anv_CreateRenderPass(
|
||||
struct anv_render_pass_attachment *attachments;
|
||||
enum anv_pipe_bits *subpass_flushes;
|
||||
|
||||
ANV_MULTIALLOC(ma);
|
||||
anv_multialloc_add(&ma, &pass, 1);
|
||||
anv_multialloc_add(&ma, &subpasses, pCreateInfo->subpassCount);
|
||||
anv_multialloc_add(&ma, &attachments, pCreateInfo->attachmentCount);
|
||||
anv_multialloc_add(&ma, &subpass_flushes, pCreateInfo->subpassCount + 1);
|
||||
VK_MULTIALLOC(ma);
|
||||
vk_multialloc_add(&ma, &pass, 1);
|
||||
vk_multialloc_add(&ma, &subpasses, pCreateInfo->subpassCount);
|
||||
vk_multialloc_add(&ma, &attachments, pCreateInfo->attachmentCount);
|
||||
vk_multialloc_add(&ma, &subpass_flushes, pCreateInfo->subpassCount + 1);
|
||||
|
||||
struct anv_subpass_attachment *subpass_attachments;
|
||||
uint32_t subpass_attachment_count = 0;
|
||||
@@ -259,10 +259,10 @@ VkResult anv_CreateRenderPass(
|
||||
subpass_attachment_count +=
|
||||
num_subpass_attachments(&pCreateInfo->pSubpasses[i]);
|
||||
}
|
||||
anv_multialloc_add(&ma, &subpass_attachments, subpass_attachment_count);
|
||||
vk_multialloc_add(&ma, &subpass_attachments, subpass_attachment_count);
|
||||
|
||||
if (!anv_multialloc_alloc2(&ma, &device->vk.alloc, pAllocator,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
|
||||
if (!vk_multialloc_alloc2(&ma, &device->vk.alloc, pAllocator,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
/* Clear the subpasses along with the parent pass. This required because
|
||||
@@ -486,11 +486,11 @@ VkResult anv_CreateRenderPass2(
|
||||
struct anv_render_pass_attachment *attachments;
|
||||
enum anv_pipe_bits *subpass_flushes;
|
||||
|
||||
ANV_MULTIALLOC(ma);
|
||||
anv_multialloc_add(&ma, &pass, 1);
|
||||
anv_multialloc_add(&ma, &subpasses, pCreateInfo->subpassCount);
|
||||
anv_multialloc_add(&ma, &attachments, pCreateInfo->attachmentCount);
|
||||
anv_multialloc_add(&ma, &subpass_flushes, pCreateInfo->subpassCount + 1);
|
||||
VK_MULTIALLOC(ma);
|
||||
vk_multialloc_add(&ma, &pass, 1);
|
||||
vk_multialloc_add(&ma, &subpasses, pCreateInfo->subpassCount);
|
||||
vk_multialloc_add(&ma, &attachments, pCreateInfo->attachmentCount);
|
||||
vk_multialloc_add(&ma, &subpass_flushes, pCreateInfo->subpassCount + 1);
|
||||
|
||||
struct anv_subpass_attachment *subpass_attachments;
|
||||
uint32_t subpass_attachment_count = 0;
|
||||
@@ -498,10 +498,10 @@ VkResult anv_CreateRenderPass2(
|
||||
subpass_attachment_count +=
|
||||
num_subpass_attachments2(&pCreateInfo->pSubpasses[i]);
|
||||
}
|
||||
anv_multialloc_add(&ma, &subpass_attachments, subpass_attachment_count);
|
||||
vk_multialloc_add(&ma, &subpass_attachments, subpass_attachment_count);
|
||||
|
||||
if (!anv_multialloc_alloc2(&ma, &device->vk.alloc, pAllocator,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
|
||||
if (!vk_multialloc_alloc2(&ma, &device->vk.alloc, pAllocator,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
/* Clear the subpasses along with the parent pass. This required because
|
||||
|
@@ -50,23 +50,23 @@ anv_shader_bin_create(struct anv_device *device,
|
||||
nir_xfb_info *xfb_info;
|
||||
struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
|
||||
|
||||
ANV_MULTIALLOC(ma);
|
||||
anv_multialloc_add(&ma, &shader, 1);
|
||||
anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
|
||||
anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
|
||||
anv_multialloc_add(&ma, &prog_data_relocs, prog_data_in->num_relocs);
|
||||
anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
|
||||
VK_MULTIALLOC(ma);
|
||||
vk_multialloc_add(&ma, &shader, 1);
|
||||
vk_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
|
||||
vk_multialloc_add_size(&ma, &prog_data, prog_data_size);
|
||||
vk_multialloc_add(&ma, &prog_data_relocs, prog_data_in->num_relocs);
|
||||
vk_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
|
||||
if (xfb_info_in) {
|
||||
uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
|
||||
anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
|
||||
vk_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
|
||||
}
|
||||
anv_multialloc_add(&ma, &surface_to_descriptor,
|
||||
bind_map->surface_count);
|
||||
anv_multialloc_add(&ma, &sampler_to_descriptor,
|
||||
bind_map->sampler_count);
|
||||
vk_multialloc_add(&ma, &surface_to_descriptor,
|
||||
bind_map->surface_count);
|
||||
vk_multialloc_add(&ma, &sampler_to_descriptor,
|
||||
bind_map->sampler_count);
|
||||
|
||||
if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
|
||||
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
|
||||
return NULL;
|
||||
|
||||
shader->ref_cnt = 1;
|
||||
|
@@ -454,104 +454,6 @@ void anv_loge_v(const char *format, va_list va);
|
||||
#define anv_assert(x)
|
||||
#endif
|
||||
|
||||
/* A multi-pointer allocator
|
||||
*
|
||||
* When copying data structures from the user (such as a render pass), it's
|
||||
* common to need to allocate data for a bunch of different things. Instead
|
||||
* of doing several allocations and having to handle all of the error checking
|
||||
* that entails, it can be easier to do a single allocation. This struct
|
||||
* helps facilitate that. The intended usage looks like this:
|
||||
*
|
||||
* ANV_MULTIALLOC(ma)
|
||||
* anv_multialloc_add(&ma, &main_ptr, 1);
|
||||
* anv_multialloc_add(&ma, &substruct1, substruct1Count);
|
||||
* anv_multialloc_add(&ma, &substruct2, substruct2Count);
|
||||
*
|
||||
* if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
|
||||
* return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
|
||||
*/
|
||||
struct anv_multialloc {
|
||||
size_t size;
|
||||
size_t align;
|
||||
|
||||
uint32_t ptr_count;
|
||||
void **ptrs[8];
|
||||
};
|
||||
|
||||
#define ANV_MULTIALLOC_INIT \
|
||||
((struct anv_multialloc) { 0, })
|
||||
|
||||
#define ANV_MULTIALLOC(_name) \
|
||||
struct anv_multialloc _name = ANV_MULTIALLOC_INIT
|
||||
|
||||
__attribute__((always_inline))
|
||||
static inline void
|
||||
_anv_multialloc_add(struct anv_multialloc *ma,
|
||||
void **ptr, size_t size, size_t align)
|
||||
{
|
||||
size_t offset = align_u64(ma->size, align);
|
||||
ma->size = offset + size;
|
||||
ma->align = MAX2(ma->align, align);
|
||||
|
||||
/* Store the offset in the pointer. */
|
||||
*ptr = (void *)(uintptr_t)offset;
|
||||
|
||||
assert(ma->ptr_count < ARRAY_SIZE(ma->ptrs));
|
||||
ma->ptrs[ma->ptr_count++] = ptr;
|
||||
}
|
||||
|
||||
#define anv_multialloc_add_size(_ma, _ptr, _size) \
|
||||
_anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
|
||||
|
||||
#define anv_multialloc_add(_ma, _ptr, _count) \
|
||||
anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
|
||||
|
||||
__attribute__((always_inline))
|
||||
static inline void *
|
||||
anv_multialloc_alloc(struct anv_multialloc *ma,
|
||||
const VkAllocationCallbacks *alloc,
|
||||
VkSystemAllocationScope scope)
|
||||
{
|
||||
void *ptr = vk_alloc(alloc, ma->size, ma->align, scope);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
/* Fill out each of the pointers with their final value.
|
||||
*
|
||||
* for (uint32_t i = 0; i < ma->ptr_count; i++)
|
||||
* *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
|
||||
*
|
||||
* Unfortunately, even though ma->ptr_count is basically guaranteed to be a
|
||||
* constant, GCC is incapable of figuring this out and unrolling the loop
|
||||
* so we have to give it a little help.
|
||||
*/
|
||||
STATIC_ASSERT(ARRAY_SIZE(ma->ptrs) == 8);
|
||||
#define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
|
||||
if ((_i) < ma->ptr_count) \
|
||||
*ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(0);
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(1);
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(2);
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(3);
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(4);
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(5);
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(6);
|
||||
_ANV_MULTIALLOC_UPDATE_POINTER(7);
|
||||
#undef _ANV_MULTIALLOC_UPDATE_POINTER
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
__attribute__((always_inline))
|
||||
static inline void *
|
||||
anv_multialloc_alloc2(struct anv_multialloc *ma,
|
||||
const VkAllocationCallbacks *parent_alloc,
|
||||
const VkAllocationCallbacks *alloc,
|
||||
VkSystemAllocationScope scope)
|
||||
{
|
||||
return anv_multialloc_alloc(ma, alloc ? alloc : parent_alloc, scope);
|
||||
}
|
||||
|
||||
struct anv_bo {
|
||||
const char *name;
|
||||
|
||||
|
@@ -2789,17 +2789,17 @@ VkResult anv_WaitSemaphores(
|
||||
struct anv_timeline **timelines;
|
||||
uint64_t *values;
|
||||
|
||||
ANV_MULTIALLOC(ma);
|
||||
VK_MULTIALLOC(ma);
|
||||
|
||||
anv_multialloc_add(&ma, &values, pWaitInfo->semaphoreCount);
|
||||
vk_multialloc_add(&ma, &values, pWaitInfo->semaphoreCount);
|
||||
if (device->has_thread_submit) {
|
||||
anv_multialloc_add(&ma, &handles, pWaitInfo->semaphoreCount);
|
||||
vk_multialloc_add(&ma, &handles, pWaitInfo->semaphoreCount);
|
||||
} else {
|
||||
anv_multialloc_add(&ma, &timelines, pWaitInfo->semaphoreCount);
|
||||
vk_multialloc_add(&ma, &timelines, pWaitInfo->semaphoreCount);
|
||||
}
|
||||
|
||||
if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
|
||||
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
uint32_t handle_count = 0;
|
||||
|
@@ -306,16 +306,16 @@ VkResult anv_QueuePresentKHR(
|
||||
/* Make sure all of the dependency semaphores have materialized when
|
||||
* using a threaded submission.
|
||||
*/
|
||||
ANV_MULTIALLOC(ma);
|
||||
VK_MULTIALLOC(ma);
|
||||
|
||||
uint64_t *values;
|
||||
uint32_t *syncobjs;
|
||||
|
||||
anv_multialloc_add(&ma, &values, pPresentInfo->waitSemaphoreCount);
|
||||
anv_multialloc_add(&ma, &syncobjs, pPresentInfo->waitSemaphoreCount);
|
||||
vk_multialloc_add(&ma, &values, pPresentInfo->waitSemaphoreCount);
|
||||
vk_multialloc_add(&ma, &syncobjs, pPresentInfo->waitSemaphoreCount);
|
||||
|
||||
if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
|
||||
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
uint32_t wait_count = 0;
|
||||
|
@@ -73,7 +73,7 @@ VkResult genX(CreateQueryPool)(
|
||||
#endif
|
||||
uint32_t data_offset = 0;
|
||||
struct anv_query_pool *pool;
|
||||
ANV_MULTIALLOC(ma);
|
||||
VK_MULTIALLOC(ma);
|
||||
VkResult result;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
|
||||
@@ -90,7 +90,7 @@ VkResult genX(CreateQueryPool)(
|
||||
*/
|
||||
uint32_t uint64s_per_slot = 0;
|
||||
|
||||
anv_multialloc_add(&ma, &pool, 1);
|
||||
vk_multialloc_add(&ma, &pool, 1);
|
||||
|
||||
VkQueryPipelineStatisticFlags pipeline_statistics = 0;
|
||||
switch (pCreateInfo->queryType) {
|
||||
@@ -142,8 +142,8 @@ VkResult genX(CreateQueryPool)(
|
||||
perf_query_info->pCounterIndices,
|
||||
perf_query_info->counterIndexCount,
|
||||
NULL);
|
||||
anv_multialloc_add(&ma, &counter_pass, perf_query_info->counterIndexCount);
|
||||
anv_multialloc_add(&ma, &pass_query, n_passes);
|
||||
vk_multialloc_add(&ma, &counter_pass, perf_query_info->counterIndexCount);
|
||||
vk_multialloc_add(&ma, &pass_query, n_passes);
|
||||
uint64s_per_slot = 4 /* availability + small batch */;
|
||||
/* Align to the requirement of the layout */
|
||||
uint64s_per_slot = align_u32(uint64s_per_slot,
|
||||
@@ -160,9 +160,9 @@ VkResult genX(CreateQueryPool)(
|
||||
assert(!"Invalid query type");
|
||||
}
|
||||
|
||||
if (!anv_multialloc_alloc2(&ma, &device->vk.alloc,
|
||||
pAllocator,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
|
||||
if (!vk_multialloc_alloc2(&ma, &device->vk.alloc,
|
||||
pAllocator,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_QUERY_POOL);
|
||||
|
@@ -28,6 +28,9 @@
|
||||
#include <string.h>
|
||||
#include <vulkan/vulkan.h>
|
||||
|
||||
#include "util/u_math.h"
|
||||
#include "util/macros.h"
|
||||
|
||||
static inline void *
|
||||
vk_alloc(const VkAllocationCallbacks *alloc,
|
||||
size_t size, size_t align,
|
||||
@@ -122,4 +125,103 @@ vk_free2(const VkAllocationCallbacks *parent_alloc,
|
||||
vk_free(parent_alloc, data);
|
||||
}
|
||||
|
||||
/* A multi-pointer allocator
|
||||
*
|
||||
* When copying data structures from the user (such as a render pass), it's
|
||||
* common to need to allocate data for a bunch of different things. Instead
|
||||
* of doing several allocations and having to handle all of the error checking
|
||||
* that entails, it can be easier to do a single allocation. This struct
|
||||
* helps facilitate that. The intended usage looks like this:
|
||||
*
|
||||
* VK_MULTIALLOC(ma)
|
||||
* vk_multialloc_add(&ma, &main_ptr, 1);
|
||||
* vk_multialloc_add(&ma, &substruct1, substruct1Count);
|
||||
* vk_multialloc_add(&ma, &substruct2, substruct2Count);
|
||||
*
|
||||
* if (!vk_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
|
||||
* return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
|
||||
*/
|
||||
struct vk_multialloc {
|
||||
size_t size;
|
||||
size_t align;
|
||||
|
||||
uint32_t ptr_count;
|
||||
void **ptrs[8];
|
||||
};
|
||||
|
||||
#define VK_MULTIALLOC_INIT \
|
||||
((struct vk_multialloc) { 0, })
|
||||
|
||||
#define VK_MULTIALLOC(_name) \
|
||||
struct vk_multialloc _name = VK_MULTIALLOC_INIT
|
||||
|
||||
__attribute__((always_inline))
|
||||
static inline void
|
||||
_vk_multialloc_add(struct vk_multialloc *ma,
|
||||
void **ptr, size_t size, size_t align)
|
||||
{
|
||||
assert(util_is_power_of_two_nonzero(align));
|
||||
size_t offset = ALIGN_POT(ma->size, align);
|
||||
ma->size = offset + size;
|
||||
ma->align = MAX2(ma->align, align);
|
||||
|
||||
/* Store the offset in the pointer. */
|
||||
*ptr = (void *)(uintptr_t)offset;
|
||||
|
||||
assert(ma->ptr_count < ARRAY_SIZE(ma->ptrs));
|
||||
ma->ptrs[ma->ptr_count++] = ptr;
|
||||
}
|
||||
|
||||
#define vk_multialloc_add_size(_ma, _ptr, _size) \
|
||||
_vk_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
|
||||
|
||||
#define vk_multialloc_add(_ma, _ptr, _count) \
|
||||
vk_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
|
||||
|
||||
__attribute__((always_inline))
|
||||
static inline void *
|
||||
vk_multialloc_alloc(struct vk_multialloc *ma,
|
||||
const VkAllocationCallbacks *alloc,
|
||||
VkSystemAllocationScope scope)
|
||||
{
|
||||
void *ptr = vk_alloc(alloc, ma->size, ma->align, scope);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
/* Fill out each of the pointers with their final value.
|
||||
*
|
||||
* for (uint32_t i = 0; i < ma->ptr_count; i++)
|
||||
* *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
|
||||
*
|
||||
* Unfortunately, even though ma->ptr_count is basically guaranteed to be a
|
||||
* constant, GCC is incapable of figuring this out and unrolling the loop
|
||||
* so we have to give it a little help.
|
||||
*/
|
||||
STATIC_ASSERT(ARRAY_SIZE(ma->ptrs) == 8);
|
||||
#define _VK_MULTIALLOC_UPDATE_POINTER(_i) \
|
||||
if ((_i) < ma->ptr_count) \
|
||||
*ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(0);
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(1);
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(2);
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(3);
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(4);
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(5);
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(6);
|
||||
_VK_MULTIALLOC_UPDATE_POINTER(7);
|
||||
#undef _VK_MULTIALLOC_UPDATE_POINTER
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
__attribute__((always_inline))
|
||||
static inline void *
|
||||
vk_multialloc_alloc2(struct vk_multialloc *ma,
|
||||
const VkAllocationCallbacks *parent_alloc,
|
||||
const VkAllocationCallbacks *alloc,
|
||||
VkSystemAllocationScope scope)
|
||||
{
|
||||
return vk_multialloc_alloc(ma, alloc ? alloc : parent_alloc, scope);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user