mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-19 20:05:08 +00:00
drm/xe/uapi: Add missing DRM_ prefix in uAPI constants
Most constants defined in xe_drm.h use DRM_XE_ as prefix which is helpful to identify the name space. Make this systematic and add this prefix where it was missing. v2: - fix vertical alignment of define values - remove double DRM_ in some variables (José Roberto de Souza) v3: Rebase Signed-off-by: Francois Dugast <francois.dugast@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
b646ce9ce9
commit
d5dc73dbd1
@ -209,7 +209,7 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
|
||||
|
||||
/* The order of placements should indicate preferred location */
|
||||
|
||||
if (bo->props.preferred_mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
|
||||
if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {
|
||||
try_add_system(bo, places, bo_flags, &c);
|
||||
try_add_vram(xe, bo, places, bo_flags, &c);
|
||||
} else {
|
||||
@ -1814,9 +1814,9 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->flags &
|
||||
~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
|
||||
XE_GEM_CREATE_FLAG_SCANOUT |
|
||||
XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
|
||||
~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
|
||||
DRM_XE_GEM_CREATE_FLAG_SCANOUT |
|
||||
DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
|
||||
xe->info.mem_region_mask)))
|
||||
return -EINVAL;
|
||||
|
||||
@ -1836,15 +1836,15 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
if (args->flags & XE_GEM_CREATE_FLAG_DEFER_BACKING)
|
||||
if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
|
||||
bo_flags |= XE_BO_DEFER_BACKING;
|
||||
|
||||
if (args->flags & XE_GEM_CREATE_FLAG_SCANOUT)
|
||||
if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
|
||||
bo_flags |= XE_BO_SCANOUT_BIT;
|
||||
|
||||
bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
|
||||
|
||||
if (args->flags & XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
|
||||
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
|
||||
if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -393,7 +393,7 @@ static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_q
|
||||
if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
|
||||
return -EINVAL;
|
||||
|
||||
if (value > XE_ACC_GRANULARITY_64M)
|
||||
if (value > DRM_XE_ACC_GRANULARITY_64M)
|
||||
return -EINVAL;
|
||||
|
||||
q->usm.acc_granularity = value;
|
||||
@ -406,14 +406,14 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
|
||||
u64 value, bool create);
|
||||
|
||||
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
|
||||
[XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
|
||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
|
||||
};
|
||||
|
||||
static int exec_queue_user_ext_set_property(struct xe_device *xe,
|
||||
@ -445,7 +445,7 @@ typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
|
||||
bool create);
|
||||
|
||||
static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
|
||||
[XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
|
||||
[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
|
||||
};
|
||||
|
||||
#define MAX_USER_EXTENSIONS 16
|
||||
@ -764,7 +764,7 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
|
||||
switch (args->property) {
|
||||
case XE_EXEC_QUEUE_GET_PROPERTY_BAN:
|
||||
case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
|
||||
args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -560,7 +560,7 @@ static void xe_uevent_gt_reset_failure(struct pci_dev *pdev, u8 tile_id, u8 gt_i
|
||||
{
|
||||
char *reset_event[4];
|
||||
|
||||
reset_event[0] = XE_RESET_FAILED_UEVENT "=NEEDS_RESET";
|
||||
reset_event[0] = DRM_XE_RESET_FAILED_UEVENT "=NEEDS_RESET";
|
||||
reset_event[1] = kasprintf(GFP_KERNEL, "TILE_ID=%d", tile_id);
|
||||
reset_event[2] = kasprintf(GFP_KERNEL, "GT_ID=%d", gt_id);
|
||||
reset_event[3] = NULL;
|
||||
|
@ -17,12 +17,12 @@ static unsigned int xe_pmu_target_cpu = -1;
|
||||
|
||||
static unsigned int config_gt_id(const u64 config)
|
||||
{
|
||||
return config >> __XE_PMU_GT_SHIFT;
|
||||
return config >> __DRM_XE_PMU_GT_SHIFT;
|
||||
}
|
||||
|
||||
static u64 config_counter(const u64 config)
|
||||
{
|
||||
return config & ~(~0ULL << __XE_PMU_GT_SHIFT);
|
||||
return config & ~(~0ULL << __DRM_XE_PMU_GT_SHIFT);
|
||||
}
|
||||
|
||||
static void xe_pmu_event_destroy(struct perf_event *event)
|
||||
@ -114,13 +114,13 @@ config_status(struct xe_device *xe, u64 config)
|
||||
return -ENOENT;
|
||||
|
||||
switch (config_counter(config)) {
|
||||
case XE_PMU_RENDER_GROUP_BUSY(0):
|
||||
case XE_PMU_COPY_GROUP_BUSY(0):
|
||||
case XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_RENDER_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_COPY_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
|
||||
if (gt->info.type == XE_GT_TYPE_MEDIA)
|
||||
return -ENOENT;
|
||||
break;
|
||||
case XE_PMU_MEDIA_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_MEDIA_GROUP_BUSY(0):
|
||||
if (!(gt->info.engine_mask & (BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VECS0))))
|
||||
return -ENOENT;
|
||||
break;
|
||||
@ -180,10 +180,10 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
|
||||
u64 val;
|
||||
|
||||
switch (config_counter(config)) {
|
||||
case XE_PMU_RENDER_GROUP_BUSY(0):
|
||||
case XE_PMU_COPY_GROUP_BUSY(0):
|
||||
case XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
|
||||
case XE_PMU_MEDIA_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_RENDER_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_COPY_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(0):
|
||||
case DRM_XE_PMU_MEDIA_GROUP_BUSY(0):
|
||||
val = engine_group_busyness_read(gt, config);
|
||||
break;
|
||||
default:
|
||||
@ -369,7 +369,7 @@ create_event_attributes(struct xe_pmu *pmu)
|
||||
/* Count how many counters we will be exposing. */
|
||||
for_each_gt(gt, xe, j) {
|
||||
for (i = 0; i < ARRAY_SIZE(events); i++) {
|
||||
u64 config = ___XE_PMU_OTHER(j, events[i].counter);
|
||||
u64 config = ___DRM_XE_PMU_OTHER(j, events[i].counter);
|
||||
|
||||
if (!config_status(xe, config))
|
||||
count++;
|
||||
@ -396,7 +396,7 @@ create_event_attributes(struct xe_pmu *pmu)
|
||||
|
||||
for_each_gt(gt, xe, j) {
|
||||
for (i = 0; i < ARRAY_SIZE(events); i++) {
|
||||
u64 config = ___XE_PMU_OTHER(j, events[i].counter);
|
||||
u64 config = ___DRM_XE_PMU_OTHER(j, events[i].counter);
|
||||
char *str;
|
||||
|
||||
if (config_status(xe, config))
|
||||
|
@ -261,7 +261,7 @@ static int query_memory_usage(struct xe_device *xe,
|
||||
return -ENOMEM;
|
||||
|
||||
man = ttm_manager_type(&xe->ttm, XE_PL_TT);
|
||||
usage->regions[0].mem_class = XE_MEM_REGION_CLASS_SYSMEM;
|
||||
usage->regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
|
||||
usage->regions[0].instance = 0;
|
||||
usage->regions[0].min_page_size = PAGE_SIZE;
|
||||
usage->regions[0].total_size = man->size << PAGE_SHIFT;
|
||||
@ -273,7 +273,7 @@ static int query_memory_usage(struct xe_device *xe,
|
||||
man = ttm_manager_type(&xe->ttm, i);
|
||||
if (man) {
|
||||
usage->regions[usage->num_regions].mem_class =
|
||||
XE_MEM_REGION_CLASS_VRAM;
|
||||
DRM_XE_MEM_REGION_CLASS_VRAM;
|
||||
usage->regions[usage->num_regions].instance =
|
||||
usage->num_regions;
|
||||
usage->regions[usage->num_regions].min_page_size =
|
||||
@ -305,7 +305,7 @@ static int query_memory_usage(struct xe_device *xe,
|
||||
|
||||
static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
|
||||
{
|
||||
const u32 num_params = XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
|
||||
const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
|
||||
size_t size =
|
||||
sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
|
||||
struct drm_xe_query_config __user *query_ptr =
|
||||
@ -324,15 +324,15 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
|
||||
return -ENOMEM;
|
||||
|
||||
config->num_params = num_params;
|
||||
config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
|
||||
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
|
||||
xe->info.devid | (xe->info.revid << 16);
|
||||
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
|
||||
config->info[XE_QUERY_CONFIG_FLAGS] =
|
||||
XE_QUERY_CONFIG_FLAGS_HAS_VRAM;
|
||||
config->info[XE_QUERY_CONFIG_MIN_ALIGNMENT] =
|
||||
config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
|
||||
DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM;
|
||||
config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
|
||||
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
|
||||
config->info[XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
|
||||
config->info[XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
|
||||
config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
|
||||
config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
|
||||
xe_exec_queue_device_get_max_priority(xe);
|
||||
|
||||
if (copy_to_user(query_ptr, config, size)) {
|
||||
@ -368,9 +368,9 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
|
||||
gt_list->num_gt = xe->info.gt_count;
|
||||
for_each_gt(gt, xe, id) {
|
||||
if (xe_gt_is_media_type(gt))
|
||||
gt_list->gt_list[id].type = XE_QUERY_GT_TYPE_MEDIA;
|
||||
gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
|
||||
else
|
||||
gt_list->gt_list[id].type = XE_QUERY_GT_TYPE_MAIN;
|
||||
gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
|
||||
gt_list->gt_list[id].gt_id = gt->info.id;
|
||||
gt_list->gt_list[id].clock_freq = gt->info.clock_freq;
|
||||
if (!IS_DGFX(xe))
|
||||
@ -468,21 +468,21 @@ static int query_gt_topology(struct xe_device *xe,
|
||||
for_each_gt(gt, xe, id) {
|
||||
topo.gt_id = id;
|
||||
|
||||
topo.type = XE_TOPO_DSS_GEOMETRY;
|
||||
topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
|
||||
query_ptr = copy_mask(query_ptr, &topo,
|
||||
gt->fuse_topo.g_dss_mask,
|
||||
sizeof(gt->fuse_topo.g_dss_mask));
|
||||
if (IS_ERR(query_ptr))
|
||||
return PTR_ERR(query_ptr);
|
||||
|
||||
topo.type = XE_TOPO_DSS_COMPUTE;
|
||||
topo.type = DRM_XE_TOPO_DSS_COMPUTE;
|
||||
query_ptr = copy_mask(query_ptr, &topo,
|
||||
gt->fuse_topo.c_dss_mask,
|
||||
sizeof(gt->fuse_topo.c_dss_mask));
|
||||
if (IS_ERR(query_ptr))
|
||||
return PTR_ERR(query_ptr);
|
||||
|
||||
topo.type = XE_TOPO_EU_PER_DSS;
|
||||
topo.type = DRM_XE_TOPO_EU_PER_DSS;
|
||||
query_ptr = copy_mask(query_ptr, &topo,
|
||||
gt->fuse_topo.eu_mask_per_dss,
|
||||
sizeof(gt->fuse_topo.eu_mask_per_dss));
|
||||
|
@ -2177,8 +2177,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
||||
(ULL)bo_offset_or_userptr);
|
||||
|
||||
switch (operation) {
|
||||
case XE_VM_BIND_OP_MAP:
|
||||
case XE_VM_BIND_OP_MAP_USERPTR:
|
||||
case DRM_XE_VM_BIND_OP_MAP:
|
||||
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
|
||||
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
|
||||
obj, bo_offset_or_userptr);
|
||||
if (IS_ERR(ops))
|
||||
@ -2189,13 +2189,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
||||
|
||||
op->tile_mask = tile_mask;
|
||||
op->map.immediate =
|
||||
flags & XE_VM_BIND_FLAG_IMMEDIATE;
|
||||
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
|
||||
op->map.read_only =
|
||||
flags & XE_VM_BIND_FLAG_READONLY;
|
||||
op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
|
||||
flags & DRM_XE_VM_BIND_FLAG_READONLY;
|
||||
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
|
||||
}
|
||||
break;
|
||||
case XE_VM_BIND_OP_UNMAP:
|
||||
case DRM_XE_VM_BIND_OP_UNMAP:
|
||||
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
|
||||
if (IS_ERR(ops))
|
||||
return ops;
|
||||
@ -2206,7 +2206,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
||||
op->tile_mask = tile_mask;
|
||||
}
|
||||
break;
|
||||
case XE_VM_BIND_OP_PREFETCH:
|
||||
case DRM_XE_VM_BIND_OP_PREFETCH:
|
||||
ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
|
||||
if (IS_ERR(ops))
|
||||
return ops;
|
||||
@ -2218,7 +2218,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
||||
op->prefetch.region = region;
|
||||
}
|
||||
break;
|
||||
case XE_VM_BIND_OP_UNMAP_ALL:
|
||||
case DRM_XE_VM_BIND_OP_UNMAP_ALL:
|
||||
xe_assert(vm->xe, bo);
|
||||
|
||||
err = xe_bo_lock(bo, true);
|
||||
@ -2828,13 +2828,13 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
|
||||
|
||||
#ifdef TEST_VM_ASYNC_OPS_ERROR
|
||||
#define SUPPORTED_FLAGS \
|
||||
(FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \
|
||||
XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \
|
||||
XE_VM_BIND_FLAG_NULL | 0xffff)
|
||||
(FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
|
||||
DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
|
||||
DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
|
||||
#else
|
||||
#define SUPPORTED_FLAGS \
|
||||
(XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \
|
||||
XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | \
|
||||
(DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
|
||||
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
|
||||
0xffff)
|
||||
#endif
|
||||
#define XE_64K_PAGE_MASK 0xffffull
|
||||
@ -2882,45 +2882,45 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
||||
u32 obj = (*bind_ops)[i].obj;
|
||||
u64 obj_offset = (*bind_ops)[i].obj_offset;
|
||||
u32 region = (*bind_ops)[i].region;
|
||||
bool is_null = flags & XE_VM_BIND_FLAG_NULL;
|
||||
bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
|
||||
|
||||
if (i == 0) {
|
||||
*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
|
||||
*async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
|
||||
if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
|
||||
err = -EINVAL;
|
||||
goto free_bind_ops;
|
||||
}
|
||||
} else if (XE_IOCTL_DBG(xe, *async !=
|
||||
!!(flags & XE_VM_BIND_FLAG_ASYNC))) {
|
||||
!!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
|
||||
err = -EINVAL;
|
||||
goto free_bind_ops;
|
||||
}
|
||||
|
||||
if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
|
||||
if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
|
||||
XE_IOCTL_DBG(xe, obj && is_null) ||
|
||||
XE_IOCTL_DBG(xe, obj_offset && is_null) ||
|
||||
XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
|
||||
XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
|
||||
is_null) ||
|
||||
XE_IOCTL_DBG(xe, !obj &&
|
||||
op == XE_VM_BIND_OP_MAP &&
|
||||
op == DRM_XE_VM_BIND_OP_MAP &&
|
||||
!is_null) ||
|
||||
XE_IOCTL_DBG(xe, !obj &&
|
||||
op == XE_VM_BIND_OP_UNMAP_ALL) ||
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
|
||||
XE_IOCTL_DBG(xe, addr &&
|
||||
op == XE_VM_BIND_OP_UNMAP_ALL) ||
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
|
||||
XE_IOCTL_DBG(xe, range &&
|
||||
op == XE_VM_BIND_OP_UNMAP_ALL) ||
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
|
||||
XE_IOCTL_DBG(xe, obj &&
|
||||
op == XE_VM_BIND_OP_MAP_USERPTR) ||
|
||||
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
|
||||
XE_IOCTL_DBG(xe, obj &&
|
||||
op == XE_VM_BIND_OP_PREFETCH) ||
|
||||
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, region &&
|
||||
op != XE_VM_BIND_OP_PREFETCH) ||
|
||||
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, !(BIT(region) &
|
||||
xe->info.mem_region_mask)) ||
|
||||
XE_IOCTL_DBG(xe, obj &&
|
||||
op == XE_VM_BIND_OP_UNMAP)) {
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP)) {
|
||||
err = -EINVAL;
|
||||
goto free_bind_ops;
|
||||
}
|
||||
@ -2929,7 +2929,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
||||
XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
|
||||
XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
|
||||
XE_IOCTL_DBG(xe, !range &&
|
||||
op != XE_VM_BIND_OP_UNMAP_ALL)) {
|
||||
op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
|
||||
err = -EINVAL;
|
||||
goto free_bind_ops;
|
||||
}
|
||||
|
@ -32,9 +32,9 @@
|
||||
* Operations
|
||||
* ----------
|
||||
*
|
||||
* XE_VM_BIND_OP_MAP - Create mapping for a BO
|
||||
* XE_VM_BIND_OP_UNMAP - Destroy mapping for a BO / userptr
|
||||
* XE_VM_BIND_OP_MAP_USERPTR - Create mapping for userptr
|
||||
* DRM_XE_VM_BIND_OP_MAP - Create mapping for a BO
|
||||
* DRM_XE_VM_BIND_OP_UNMAP - Destroy mapping for a BO / userptr
|
||||
* DRM_XE_VM_BIND_OP_MAP_USERPTR - Create mapping for userptr
|
||||
*
|
||||
* Implementation details
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -113,7 +113,7 @@
|
||||
* VM uses to report errors to. The ufence wait interface can be used to wait on
|
||||
* a VM going into an error state. Once an error is reported the VM's async
|
||||
* worker is paused. While the VM's async worker is paused sync,
|
||||
* XE_VM_BIND_OP_UNMAP operations are allowed (this can free memory). Once the
|
||||
* DRM_XE_VM_BIND_OP_UNMAP operations are allowed (this can free memory). Once the
|
||||
* uses believe the error state is fixed, the async worker can be resumed via
|
||||
* XE_VM_BIND_OP_RESTART operation. When VM async bind work is restarted, the
|
||||
* first operation processed is the operation that caused the original error.
|
||||
@ -193,7 +193,7 @@
|
||||
* In a VM is in fault mode (TODO: link to fault mode), new bind operations that
|
||||
* create mappings are by default are deferred to the page fault handler (first
|
||||
* use). This behavior can be overriden by setting the flag
|
||||
* XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
|
||||
* DRM_XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
|
||||
* immediately.
|
||||
*
|
||||
* User pointer
|
||||
@ -322,7 +322,7 @@
|
||||
*
|
||||
* By default, on a faulting VM binds just allocate the VMA and the actual
|
||||
* updating of the page tables is defered to the page fault handler. This
|
||||
* behavior can be overridden by setting the flag XE_VM_BIND_FLAG_IMMEDIATE in
|
||||
* behavior can be overridden by setting the flag DRM_XE_VM_BIND_FLAG_IMMEDIATE in
|
||||
* the VM bind which will then do the bind immediately.
|
||||
*
|
||||
* Page fault handler
|
||||
|
@ -19,12 +19,12 @@ extern "C" {
|
||||
/**
|
||||
* DOC: uevent generated by xe on it's pci node.
|
||||
*
|
||||
* XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
|
||||
* DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
|
||||
* fails. The value supplied with the event is always "NEEDS_RESET".
|
||||
* Additional information supplied is tile id and gt id of the gt unit for
|
||||
* which reset has failed.
|
||||
*/
|
||||
#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
|
||||
#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
|
||||
|
||||
/**
|
||||
* struct xe_user_extension - Base class for defining a chain of extensions
|
||||
@ -148,14 +148,14 @@ struct drm_xe_engine_class_instance {
|
||||
* enum drm_xe_memory_class - Supported memory classes.
|
||||
*/
|
||||
enum drm_xe_memory_class {
|
||||
/** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
|
||||
XE_MEM_REGION_CLASS_SYSMEM = 0,
|
||||
/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
|
||||
DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
|
||||
/**
|
||||
* @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
|
||||
* @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
|
||||
* represents the memory that is local to the device, which we
|
||||
* call VRAM. Not valid on integrated platforms.
|
||||
*/
|
||||
XE_MEM_REGION_CLASS_VRAM
|
||||
DRM_XE_MEM_REGION_CLASS_VRAM
|
||||
};
|
||||
|
||||
/**
|
||||
@ -215,7 +215,7 @@ struct drm_xe_query_mem_region {
|
||||
* always equal the @total_size, since all of it will be CPU
|
||||
* accessible.
|
||||
*
|
||||
* Note this is only tracked for XE_MEM_REGION_CLASS_VRAM
|
||||
* Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
|
||||
* regions (for other types the value here will always equal
|
||||
* zero).
|
||||
*/
|
||||
@ -227,7 +227,7 @@ struct drm_xe_query_mem_region {
|
||||
* Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
|
||||
* accounting. Without this the value here will always equal
|
||||
* zero. Note this is only currently tracked for
|
||||
* XE_MEM_REGION_CLASS_VRAM regions (for other types the value
|
||||
* DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
|
||||
* here will always be zero).
|
||||
*/
|
||||
__u64 cpu_visible_used;
|
||||
@ -320,12 +320,12 @@ struct drm_xe_query_config {
|
||||
/** @pad: MBZ */
|
||||
__u32 pad;
|
||||
|
||||
#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
|
||||
#define XE_QUERY_CONFIG_FLAGS 1
|
||||
#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0)
|
||||
#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2
|
||||
#define XE_QUERY_CONFIG_VA_BITS 3
|
||||
#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
|
||||
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
|
||||
#define DRM_XE_QUERY_CONFIG_FLAGS 1
|
||||
#define DRM_XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0)
|
||||
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
|
||||
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
|
||||
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
|
||||
/** @info: array of elements containing the config info */
|
||||
__u64 info[];
|
||||
};
|
||||
@ -339,8 +339,8 @@ struct drm_xe_query_config {
|
||||
* implementing graphics and/or media operations.
|
||||
*/
|
||||
struct drm_xe_query_gt {
|
||||
#define XE_QUERY_GT_TYPE_MAIN 0
|
||||
#define XE_QUERY_GT_TYPE_MEDIA 1
|
||||
#define DRM_XE_QUERY_GT_TYPE_MAIN 0
|
||||
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
|
||||
/** @type: GT type: Main or Media */
|
||||
__u16 type;
|
||||
/** @gt_id: Unique ID of this GT within the PCI Device */
|
||||
@ -400,7 +400,7 @@ struct drm_xe_query_topology_mask {
|
||||
* DSS_GEOMETRY ff ff ff ff 00 00 00 00
|
||||
* means 32 DSS are available for geometry.
|
||||
*/
|
||||
#define XE_TOPO_DSS_GEOMETRY (1 << 0)
|
||||
#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
|
||||
/*
|
||||
* To query the mask of Dual Sub Slices (DSS) available for compute
|
||||
* operations. For example a query response containing the following
|
||||
@ -408,7 +408,7 @@ struct drm_xe_query_topology_mask {
|
||||
* DSS_COMPUTE ff ff ff ff 00 00 00 00
|
||||
* means 32 DSS are available for compute.
|
||||
*/
|
||||
#define XE_TOPO_DSS_COMPUTE (1 << 1)
|
||||
#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
|
||||
/*
|
||||
* To query the mask of Execution Units (EU) available per Dual Sub
|
||||
* Slices (DSS). For example a query response containing the following
|
||||
@ -416,7 +416,7 @@ struct drm_xe_query_topology_mask {
|
||||
* EU_PER_DSS ff ff 00 00 00 00 00 00
|
||||
* means each DSS has 16 EU.
|
||||
*/
|
||||
#define XE_TOPO_EU_PER_DSS (1 << 2)
|
||||
#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
|
||||
/** @type: type of mask */
|
||||
__u16 type;
|
||||
|
||||
@ -497,8 +497,8 @@ struct drm_xe_gem_create {
|
||||
*/
|
||||
__u64 size;
|
||||
|
||||
#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
|
||||
#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
|
||||
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
|
||||
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
|
||||
/*
|
||||
* When using VRAM as a possible placement, ensure that the corresponding VRAM
|
||||
* allocation will always use the CPU accessible part of VRAM. This is important
|
||||
@ -514,7 +514,7 @@ struct drm_xe_gem_create {
|
||||
* display surfaces, therefore the kernel requires setting this flag for such
|
||||
* objects, otherwise an error is thrown on small-bar systems.
|
||||
*/
|
||||
#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
|
||||
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
|
||||
/**
|
||||
* @flags: Flags, currently a mask of memory instances of where BO can
|
||||
* be placed
|
||||
@ -581,14 +581,14 @@ struct drm_xe_ext_set_property {
|
||||
};
|
||||
|
||||
struct drm_xe_vm_create {
|
||||
#define XE_VM_EXTENSION_SET_PROPERTY 0
|
||||
#define DRM_XE_VM_EXTENSION_SET_PROPERTY 0
|
||||
/** @extensions: Pointer to the first extension struct, if any */
|
||||
__u64 extensions;
|
||||
|
||||
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
|
||||
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
|
||||
#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
|
||||
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
|
||||
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
|
||||
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
|
||||
#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
|
||||
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
|
||||
/** @flags: Flags */
|
||||
__u32 flags;
|
||||
|
||||
@ -644,29 +644,29 @@ struct drm_xe_vm_bind_op {
|
||||
*/
|
||||
__u64 tile_mask;
|
||||
|
||||
#define XE_VM_BIND_OP_MAP 0x0
|
||||
#define XE_VM_BIND_OP_UNMAP 0x1
|
||||
#define XE_VM_BIND_OP_MAP_USERPTR 0x2
|
||||
#define XE_VM_BIND_OP_UNMAP_ALL 0x3
|
||||
#define XE_VM_BIND_OP_PREFETCH 0x4
|
||||
#define DRM_XE_VM_BIND_OP_MAP 0x0
|
||||
#define DRM_XE_VM_BIND_OP_UNMAP 0x1
|
||||
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
|
||||
#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
|
||||
#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
|
||||
/** @op: Bind operation to perform */
|
||||
__u32 op;
|
||||
|
||||
#define XE_VM_BIND_FLAG_READONLY (0x1 << 0)
|
||||
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
|
||||
#define DRM_XE_VM_BIND_FLAG_READONLY (0x1 << 0)
|
||||
#define DRM_XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
|
||||
/*
|
||||
* Valid on a faulting VM only, do the MAP operation immediately rather
|
||||
* than deferring the MAP to the page fault handler.
|
||||
*/
|
||||
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
|
||||
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
|
||||
/*
|
||||
* When the NULL flag is set, the page tables are setup with a special
|
||||
* bit which indicates writes are dropped and all reads return zero. In
|
||||
* the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
|
||||
* the future, the NULL flags will only be valid for DRM_XE_VM_BIND_OP_MAP
|
||||
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
|
||||
* intended to implement VK sparse bindings.
|
||||
*/
|
||||
#define XE_VM_BIND_FLAG_NULL (0x1 << 3)
|
||||
#define DRM_XE_VM_BIND_FLAG_NULL (0x1 << 3)
|
||||
/** @flags: Bind flags */
|
||||
__u32 flags;
|
||||
|
||||
@ -721,19 +721,19 @@ struct drm_xe_vm_bind {
|
||||
__u64 reserved[2];
|
||||
};
|
||||
|
||||
/* For use with XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
|
||||
/* For use with DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
|
||||
|
||||
/* Monitor 128KB contiguous region with 4K sub-granularity */
|
||||
#define XE_ACC_GRANULARITY_128K 0
|
||||
#define DRM_XE_ACC_GRANULARITY_128K 0
|
||||
|
||||
/* Monitor 2MB contiguous region with 64KB sub-granularity */
|
||||
#define XE_ACC_GRANULARITY_2M 1
|
||||
#define DRM_XE_ACC_GRANULARITY_2M 1
|
||||
|
||||
/* Monitor 16MB contiguous region with 512KB sub-granularity */
|
||||
#define XE_ACC_GRANULARITY_16M 2
|
||||
#define DRM_XE_ACC_GRANULARITY_16M 2
|
||||
|
||||
/* Monitor 64MB contiguous region with 2M sub-granularity */
|
||||
#define XE_ACC_GRANULARITY_64M 3
|
||||
#define DRM_XE_ACC_GRANULARITY_64M 3
|
||||
|
||||
/**
|
||||
* struct drm_xe_exec_queue_set_property - exec queue set property
|
||||
@ -747,14 +747,14 @@ struct drm_xe_exec_queue_set_property {
|
||||
/** @exec_queue_id: Exec queue ID */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
|
||||
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
|
||||
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
|
||||
/** @property: property to set */
|
||||
__u32 property;
|
||||
|
||||
@ -766,7 +766,7 @@ struct drm_xe_exec_queue_set_property {
|
||||
};
|
||||
|
||||
struct drm_xe_exec_queue_create {
|
||||
#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
|
||||
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
|
||||
/** @extensions: Pointer to the first extension struct, if any */
|
||||
__u64 extensions;
|
||||
|
||||
@ -805,7 +805,7 @@ struct drm_xe_exec_queue_get_property {
|
||||
/** @exec_queue_id: Exec queue ID */
|
||||
__u32 exec_queue_id;
|
||||
|
||||
#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
|
||||
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
|
||||
/** @property: property to get */
|
||||
__u32 property;
|
||||
|
||||
@ -973,11 +973,11 @@ struct drm_xe_wait_user_fence {
|
||||
/**
|
||||
* DOC: XE PMU event config IDs
|
||||
*
|
||||
* Check 'man perf_event_open' to use the ID's XE_PMU_XXXX listed in xe_drm.h
|
||||
* Check 'man perf_event_open' to use the ID's DRM_XE_PMU_XXXX listed in xe_drm.h
|
||||
* in 'struct perf_event_attr' as part of perf_event_open syscall to read a
|
||||
* particular event.
|
||||
*
|
||||
* For example to open the XE_PMU_RENDER_GROUP_BUSY(0):
|
||||
* For example to open the DRMXE_PMU_RENDER_GROUP_BUSY(0):
|
||||
*
|
||||
* .. code-block:: C
|
||||
*
|
||||
@ -991,7 +991,7 @@ struct drm_xe_wait_user_fence {
|
||||
* attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
|
||||
* attr.use_clockid = 1;
|
||||
* attr.clockid = CLOCK_MONOTONIC;
|
||||
* attr.config = XE_PMU_RENDER_GROUP_BUSY(0);
|
||||
* attr.config = DRM_XE_PMU_RENDER_GROUP_BUSY(0);
|
||||
*
|
||||
* fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
|
||||
*/
|
||||
@ -999,15 +999,15 @@ struct drm_xe_wait_user_fence {
|
||||
/*
|
||||
* Top bits of every counter are GT id.
|
||||
*/
|
||||
#define __XE_PMU_GT_SHIFT (56)
|
||||
#define __DRM_XE_PMU_GT_SHIFT (56)
|
||||
|
||||
#define ___XE_PMU_OTHER(gt, x) \
|
||||
(((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT))
|
||||
#define ___DRM_XE_PMU_OTHER(gt, x) \
|
||||
(((__u64)(x)) | ((__u64)(gt) << __DRM_XE_PMU_GT_SHIFT))
|
||||
|
||||
#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 0)
|
||||
#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1)
|
||||
#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2)
|
||||
#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3)
|
||||
#define DRM_XE_PMU_RENDER_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 0)
|
||||
#define DRM_XE_PMU_COPY_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 1)
|
||||
#define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 2)
|
||||
#define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 3)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user