mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
drm: move amd_gpu_scheduler into common location
This moves and renames the AMDGPU scheduler to a common location in DRM in order to facilitate re-use by other drivers. This is mostly a straight forward rename with no code changes. One notable exception is the function to_drm_sched_fence(), which is no longer a inline header function to avoid the need to export the drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures. Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
9ce6aae12c
commit
1b1f42d8fd
@ -149,6 +149,10 @@ config DRM_VM
|
||||
bool
|
||||
depends on DRM && MMU
|
||||
|
||||
config DRM_SCHED
|
||||
tristate
|
||||
depends on DRM
|
||||
|
||||
source "drivers/gpu/drm/i2c/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/arm/Kconfig"
|
||||
@ -178,6 +182,7 @@ config DRM_AMDGPU
|
||||
depends on DRM && PCI && MMU
|
||||
select FW_LOADER
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_SCHED
|
||||
select DRM_TTM
|
||||
select POWER_SUPPLY
|
||||
select HWMON
|
||||
|
@ -101,3 +101,4 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
|
||||
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
|
||||
obj-$(CONFIG_DRM_PL111) += pl111/
|
||||
obj-$(CONFIG_DRM_TVE200) += tve200/
|
||||
obj-$(CONFIG_DRM_SCHED) += scheduler/
|
||||
|
@ -135,10 +135,7 @@ amdgpu-y += \
|
||||
amdgpu-y += amdgpu_cgs.o
|
||||
|
||||
# GPU scheduler
|
||||
amdgpu-y += \
|
||||
../scheduler/gpu_scheduler.o \
|
||||
../scheduler/sched_fence.o \
|
||||
amdgpu_job.o
|
||||
amdgpu-y += amdgpu_job.o
|
||||
|
||||
# ACP componet
|
||||
ifneq ($(CONFIG_DRM_AMD_ACP),)
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
#include <kgd_kfd_interface.h>
|
||||
#include "dm_pp_interface.h"
|
||||
@ -68,7 +69,6 @@
|
||||
#include "amdgpu_vcn.h"
|
||||
#include "amdgpu_mn.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "gpu_scheduler.h"
|
||||
#include "amdgpu_virt.h"
|
||||
#include "amdgpu_gart.h"
|
||||
|
||||
@ -689,7 +689,7 @@ struct amdgpu_ib {
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
|
||||
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_job **job, struct amdgpu_vm *vm);
|
||||
@ -699,7 +699,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct amd_sched_entity *entity, void *owner,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
struct dma_fence **f);
|
||||
|
||||
/*
|
||||
@ -732,7 +732,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_ctx_ring {
|
||||
uint64_t sequence;
|
||||
struct dma_fence **fences;
|
||||
struct amd_sched_entity entity;
|
||||
struct drm_sched_entity entity;
|
||||
};
|
||||
|
||||
struct amdgpu_ctx {
|
||||
@ -746,8 +746,8 @@ struct amdgpu_ctx {
|
||||
struct dma_fence **fences;
|
||||
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
|
||||
bool preamble_presented;
|
||||
enum amd_sched_priority init_priority;
|
||||
enum amd_sched_priority override_priority;
|
||||
enum drm_sched_priority init_priority;
|
||||
enum drm_sched_priority override_priority;
|
||||
struct mutex lock;
|
||||
atomic_t guilty;
|
||||
};
|
||||
@ -767,7 +767,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
||||
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
||||
struct amdgpu_ring *ring, uint64_t seq);
|
||||
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
||||
enum amd_sched_priority priority);
|
||||
enum drm_sched_priority priority);
|
||||
|
||||
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
@ -1116,7 +1116,7 @@ struct amdgpu_cs_parser {
|
||||
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
|
||||
|
||||
struct amdgpu_job {
|
||||
struct amd_sched_job base;
|
||||
struct drm_sched_job base;
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_ring *ring;
|
||||
|
@ -1150,7 +1150,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
union drm_amdgpu_cs *cs)
|
||||
{
|
||||
struct amdgpu_ring *ring = p->job->ring;
|
||||
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
||||
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
||||
struct amdgpu_job *job;
|
||||
unsigned i;
|
||||
uint64_t seq;
|
||||
@ -1173,7 +1173,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
job = p->job;
|
||||
p->job = NULL;
|
||||
|
||||
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
|
||||
r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
|
||||
if (r) {
|
||||
amdgpu_job_free(job);
|
||||
amdgpu_mn_unlock(p->mn);
|
||||
@ -1202,7 +1202,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
||||
|
||||
trace_amdgpu_cs_ioctl(job);
|
||||
amd_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||
amdgpu_mn_unlock(p->mn);
|
||||
|
@ -28,10 +28,10 @@
|
||||
#include "amdgpu_sched.h"
|
||||
|
||||
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
|
||||
enum amd_sched_priority priority)
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
/* NORMAL and below are accessible by everyone */
|
||||
if (priority <= AMD_SCHED_PRIORITY_NORMAL)
|
||||
if (priority <= DRM_SCHED_PRIORITY_NORMAL)
|
||||
return 0;
|
||||
|
||||
if (capable(CAP_SYS_NICE))
|
||||
@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||
enum amd_sched_priority priority,
|
||||
enum drm_sched_priority priority,
|
||||
struct drm_file *filp,
|
||||
struct amdgpu_ctx *ctx)
|
||||
{
|
||||
unsigned i, j;
|
||||
int r;
|
||||
|
||||
if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
|
||||
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_ctx_priority_permit(filp, priority);
|
||||
@ -78,19 +78,19 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||
ctx->reset_counter_query = ctx->reset_counter;
|
||||
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||
ctx->init_priority = priority;
|
||||
ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
|
||||
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
|
||||
|
||||
/* create context entity for each ring */
|
||||
for (i = 0; i < adev->num_rings; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
|
||||
rq = &ring->sched.sched_rq[priority];
|
||||
|
||||
if (ring == &adev->gfx.kiq.ring)
|
||||
continue;
|
||||
|
||||
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
||||
r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
|
||||
rq, amdgpu_sched_jobs, &ctx->guilty);
|
||||
if (r)
|
||||
goto failed;
|
||||
@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||
|
||||
failed:
|
||||
for (j = 0; j < i; j++)
|
||||
amd_sched_entity_fini(&adev->rings[j]->sched,
|
||||
drm_sched_entity_fini(&adev->rings[j]->sched,
|
||||
&ctx->rings[j].entity);
|
||||
kfree(ctx->fences);
|
||||
ctx->fences = NULL;
|
||||
@ -126,7 +126,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
||||
ctx->fences = NULL;
|
||||
|
||||
for (i = 0; i < adev->num_rings; i++)
|
||||
amd_sched_entity_fini(&adev->rings[i]->sched,
|
||||
drm_sched_entity_fini(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
|
||||
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
|
||||
@ -137,7 +137,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
||||
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
||||
struct amdgpu_fpriv *fpriv,
|
||||
struct drm_file *filp,
|
||||
enum amd_sched_priority priority,
|
||||
enum drm_sched_priority priority,
|
||||
uint32_t *id)
|
||||
{
|
||||
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
|
||||
@ -266,7 +266,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
int r;
|
||||
uint32_t id;
|
||||
enum amd_sched_priority priority;
|
||||
enum drm_sched_priority priority;
|
||||
|
||||
union drm_amdgpu_ctx *args = data;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
@ -278,8 +278,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
/* For backwards compatibility reasons, we need to accept
|
||||
* ioctls with garbage in the priority field */
|
||||
if (priority == AMD_SCHED_PRIORITY_INVALID)
|
||||
priority = AMD_SCHED_PRIORITY_NORMAL;
|
||||
if (priority == DRM_SCHED_PRIORITY_INVALID)
|
||||
priority = DRM_SCHED_PRIORITY_NORMAL;
|
||||
|
||||
switch (args->in.op) {
|
||||
case AMDGPU_CTX_OP_ALLOC_CTX:
|
||||
@ -385,18 +385,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
||||
}
|
||||
|
||||
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
||||
enum amd_sched_priority priority)
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
int i;
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
struct amd_sched_rq *rq;
|
||||
struct amd_sched_entity *entity;
|
||||
struct drm_sched_rq *rq;
|
||||
struct drm_sched_entity *entity;
|
||||
struct amdgpu_ring *ring;
|
||||
enum amd_sched_priority ctx_prio;
|
||||
enum drm_sched_priority ctx_prio;
|
||||
|
||||
ctx->override_priority = priority;
|
||||
|
||||
ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
|
||||
ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
|
||||
ctx->init_priority : ctx->override_priority;
|
||||
|
||||
for (i = 0; i < adev->num_rings; i++) {
|
||||
@ -407,7 +407,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
|
||||
continue;
|
||||
|
||||
amd_sched_entity_set_rq(entity, rq);
|
||||
drm_sched_entity_set_rq(entity, rq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3058,7 +3058,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
|
||||
continue;
|
||||
|
||||
kthread_park(ring->sched.thread);
|
||||
amd_sched_hw_job_reset(&ring->sched, &job->base);
|
||||
drm_sched_hw_job_reset(&ring->sched, &job->base);
|
||||
|
||||
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
@ -3111,7 +3111,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
|
||||
if (job && job->ring->idx != i)
|
||||
continue;
|
||||
|
||||
amd_sched_job_recovery(&ring->sched);
|
||||
drm_sched_job_recovery(&ring->sched);
|
||||
kthread_unpark(ring->sched.thread);
|
||||
}
|
||||
} else {
|
||||
|
@ -912,7 +912,7 @@ static int __init amdgpu_init(void)
|
||||
if (r)
|
||||
goto error_fence;
|
||||
|
||||
r = amd_sched_fence_slab_init();
|
||||
r = drm_sched_fence_slab_init();
|
||||
if (r)
|
||||
goto error_sched;
|
||||
|
||||
@ -944,7 +944,7 @@ static void __exit amdgpu_exit(void)
|
||||
pci_unregister_driver(pdriver);
|
||||
amdgpu_unregister_atpx_handler();
|
||||
amdgpu_sync_fini();
|
||||
amd_sched_fence_slab_fini();
|
||||
drm_sched_fence_slab_fini();
|
||||
amdgpu_fence_slab_fini();
|
||||
}
|
||||
|
||||
|
@ -445,7 +445,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
*/
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
}
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
num_hw_submission, amdgpu_job_hang_limit,
|
||||
timeout, ring->name);
|
||||
if (r) {
|
||||
@ -503,7 +503,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
||||
}
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
amd_sched_fini(&ring->sched);
|
||||
drm_sched_fini(&ring->sched);
|
||||
del_timer_sync(&ring->fence_drv.fallback_timer);
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
||||
dma_fence_put(ring->fence_drv.fences[j]);
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
static void amdgpu_job_timedout(struct amd_sched_job *s_job)
|
||||
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
||||
|
||||
@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||
amdgpu_ib_free(job->adev, &job->ibs[i], f);
|
||||
}
|
||||
|
||||
static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
|
||||
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
{
|
||||
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
|
||||
|
||||
@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
||||
}
|
||||
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct amd_sched_entity *entity, void *owner,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
struct dma_fence **f)
|
||||
{
|
||||
int r;
|
||||
@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
|
||||
r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
*f = dma_fence_get(&job->base.s_fence->finished);
|
||||
amdgpu_job_free_resources(job);
|
||||
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
|
||||
amd_sched_entity_push_job(&job->base, entity);
|
||||
drm_sched_entity_push_job(&job->base, entity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
|
||||
struct amd_sched_entity *s_entity)
|
||||
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||
struct amdgpu_vm *vm = job->vm;
|
||||
@ -151,7 +151,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
|
||||
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
|
||||
|
||||
if (fence && explicit) {
|
||||
if (amd_sched_dependency_optimized(fence, s_entity)) {
|
||||
if (drm_sched_dependency_optimized(fence, s_entity)) {
|
||||
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
|
||||
if (r)
|
||||
DRM_ERROR("Error adding fence to sync (%d)\n", r);
|
||||
@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
||||
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct dma_fence *fence = NULL, *finished;
|
||||
struct amdgpu_device *adev;
|
||||
@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
|
||||
return fence;
|
||||
}
|
||||
|
||||
const struct amd_sched_backend_ops amdgpu_sched_ops = {
|
||||
const struct drm_sched_backend_ops amdgpu_sched_ops = {
|
||||
.dependency = amdgpu_job_dependency,
|
||||
.run_job = amdgpu_job_run,
|
||||
.timedout_job = amdgpu_job_timedout,
|
||||
|
@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
|
||||
* Release a request for executing at @priority
|
||||
*/
|
||||
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
|
||||
enum amd_sched_priority priority)
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
|
||||
return;
|
||||
|
||||
/* no need to restore if the job is already at the lowest priority */
|
||||
if (priority == AMD_SCHED_PRIORITY_NORMAL)
|
||||
if (priority == DRM_SCHED_PRIORITY_NORMAL)
|
||||
return;
|
||||
|
||||
mutex_lock(&ring->priority_mutex);
|
||||
@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
|
||||
goto out_unlock;
|
||||
|
||||
/* decay priority to the next level with a job available */
|
||||
for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
|
||||
if (i == AMD_SCHED_PRIORITY_NORMAL
|
||||
for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
if (i == DRM_SCHED_PRIORITY_NORMAL
|
||||
|| atomic_read(&ring->num_jobs[i])) {
|
||||
ring->priority = i;
|
||||
ring->funcs->set_priority(ring, i);
|
||||
@ -206,7 +206,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
|
||||
* Request a ring's priority to be raised to @priority (refcounted).
|
||||
*/
|
||||
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
|
||||
enum amd_sched_priority priority)
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
if (!ring->funcs->set_priority)
|
||||
return;
|
||||
@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
}
|
||||
|
||||
ring->max_dw = max_dw;
|
||||
ring->priority = AMD_SCHED_PRIORITY_NORMAL;
|
||||
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
|
||||
mutex_init(&ring->priority_mutex);
|
||||
INIT_LIST_HEAD(&ring->lru_list);
|
||||
amdgpu_ring_lru_touch(adev, ring);
|
||||
|
||||
for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
|
||||
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
|
||||
atomic_set(&ring->num_jobs[i], 0);
|
||||
|
||||
if (amdgpu_debugfs_ring_init(adev, ring)) {
|
||||
|
@ -25,7 +25,7 @@
|
||||
#define __AMDGPU_RING_H__
|
||||
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "gpu_scheduler.h"
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
/* max number of rings */
|
||||
#define AMDGPU_MAX_RINGS 18
|
||||
@ -154,14 +154,14 @@ struct amdgpu_ring_funcs {
|
||||
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
|
||||
/* priority functions */
|
||||
void (*set_priority) (struct amdgpu_ring *ring,
|
||||
enum amd_sched_priority priority);
|
||||
enum drm_sched_priority priority);
|
||||
};
|
||||
|
||||
struct amdgpu_ring {
|
||||
struct amdgpu_device *adev;
|
||||
const struct amdgpu_ring_funcs *funcs;
|
||||
struct amdgpu_fence_driver fence_drv;
|
||||
struct amd_gpu_scheduler sched;
|
||||
struct drm_gpu_scheduler sched;
|
||||
struct list_head lru_list;
|
||||
|
||||
struct amdgpu_bo *ring_obj;
|
||||
@ -196,7 +196,7 @@ struct amdgpu_ring {
|
||||
unsigned vm_inv_eng;
|
||||
bool has_compute_vm_bug;
|
||||
|
||||
atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
|
||||
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
|
||||
struct mutex priority_mutex;
|
||||
/* protected by priority_mutex */
|
||||
int priority;
|
||||
@ -212,9 +212,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
||||
void amdgpu_ring_commit(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
|
||||
enum amd_sched_priority priority);
|
||||
enum drm_sched_priority priority);
|
||||
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
|
||||
enum amd_sched_priority priority);
|
||||
enum drm_sched_priority priority);
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
|
@ -29,29 +29,29 @@
|
||||
|
||||
#include "amdgpu_vm.h"
|
||||
|
||||
enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
|
||||
enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
|
||||
{
|
||||
switch (amdgpu_priority) {
|
||||
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
|
||||
return AMD_SCHED_PRIORITY_HIGH_HW;
|
||||
return DRM_SCHED_PRIORITY_HIGH_HW;
|
||||
case AMDGPU_CTX_PRIORITY_HIGH:
|
||||
return AMD_SCHED_PRIORITY_HIGH_SW;
|
||||
return DRM_SCHED_PRIORITY_HIGH_SW;
|
||||
case AMDGPU_CTX_PRIORITY_NORMAL:
|
||||
return AMD_SCHED_PRIORITY_NORMAL;
|
||||
return DRM_SCHED_PRIORITY_NORMAL;
|
||||
case AMDGPU_CTX_PRIORITY_LOW:
|
||||
case AMDGPU_CTX_PRIORITY_VERY_LOW:
|
||||
return AMD_SCHED_PRIORITY_LOW;
|
||||
return DRM_SCHED_PRIORITY_LOW;
|
||||
case AMDGPU_CTX_PRIORITY_UNSET:
|
||||
return AMD_SCHED_PRIORITY_UNSET;
|
||||
return DRM_SCHED_PRIORITY_UNSET;
|
||||
default:
|
||||
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
|
||||
return AMD_SCHED_PRIORITY_INVALID;
|
||||
return DRM_SCHED_PRIORITY_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
|
||||
int fd,
|
||||
enum amd_sched_priority priority)
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
struct file *filp = fcheck(fd);
|
||||
struct drm_file *file;
|
||||
@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
union drm_amdgpu_sched *args = data;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
enum amd_sched_priority priority;
|
||||
enum drm_sched_priority priority;
|
||||
int r;
|
||||
|
||||
priority = amdgpu_to_sched_priority(args->in.priority);
|
||||
if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
|
||||
if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
switch (args->in.op) {
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
|
||||
enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
|
||||
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *filp);
|
||||
|
||||
|
@ -64,7 +64,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
|
||||
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
|
||||
struct dma_fence *f)
|
||||
{
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||
|
||||
if (s_fence) {
|
||||
struct amdgpu_ring *ring;
|
||||
@ -85,7 +85,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
|
||||
*/
|
||||
static void *amdgpu_sync_get_owner(struct dma_fence *f)
|
||||
{
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||
|
||||
if (s_fence)
|
||||
return s_fence->owner;
|
||||
@ -248,7 +248,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
|
||||
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
||||
struct dma_fence *f = e->fence;
|
||||
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||
|
||||
if (dma_fence_is_signaled(f)) {
|
||||
hash_del(&e->node);
|
||||
|
@ -76,7 +76,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_global_reference *global_ref;
|
||||
struct amdgpu_ring *ring;
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
int r;
|
||||
|
||||
adev->mman.mem_global_referenced = false;
|
||||
@ -108,8 +108,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||
mutex_init(&adev->mman.gtt_window_lock);
|
||||
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
|
||||
@ -131,7 +131,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
|
||||
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.mem_global_referenced) {
|
||||
amd_sched_entity_fini(adev->mman.entity.sched,
|
||||
drm_sched_entity_fini(adev->mman.entity.sched,
|
||||
&adev->mman.entity);
|
||||
mutex_destroy(&adev->mman.gtt_window_lock);
|
||||
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
|
||||
|
@ -25,7 +25,7 @@
|
||||
#define __AMDGPU_TTM_H__
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "gpu_scheduler.h"
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
|
||||
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
|
||||
@ -55,7 +55,7 @@ struct amdgpu_mman {
|
||||
|
||||
struct mutex gtt_window_lock;
|
||||
/* Scheduler entity for buffer moves */
|
||||
struct amd_sched_entity entity;
|
||||
struct drm_sched_entity entity;
|
||||
};
|
||||
|
||||
struct amdgpu_copy_mem {
|
||||
|
@ -116,7 +116,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
unsigned long bo_size;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
@ -230,8 +230,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
ring = &adev->uvd.ring;
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up UVD run queue.\n");
|
||||
@ -272,7 +272,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||
int i;
|
||||
kfree(adev->uvd.saved_bo);
|
||||
|
||||
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
||||
drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
|
||||
&adev->uvd.gpu_addr,
|
||||
|
@ -51,8 +51,8 @@ struct amdgpu_uvd {
|
||||
struct amdgpu_irq_src irq;
|
||||
bool address_64_bit;
|
||||
bool use_ctx_buf;
|
||||
struct amd_sched_entity entity;
|
||||
struct amd_sched_entity entity_enc;
|
||||
struct drm_sched_entity entity;
|
||||
struct drm_sched_entity entity_enc;
|
||||
uint32_t srbm_soft_reset;
|
||||
unsigned num_enc_rings;
|
||||
};
|
||||
|
@ -85,7 +85,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
||||
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned ucode_version, version_major, version_minor, binary_id;
|
||||
@ -174,8 +174,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||
}
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCE run queue.\n");
|
||||
@ -207,7 +207,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
|
||||
drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
|
||||
(void **)&adev->vce.cpu_addr);
|
||||
|
@ -46,7 +46,7 @@ struct amdgpu_vce {
|
||||
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
|
||||
struct amdgpu_irq_src irq;
|
||||
unsigned harvest_config;
|
||||
struct amd_sched_entity entity;
|
||||
struct drm_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
unsigned num_rings;
|
||||
};
|
||||
|
@ -51,7 +51,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
||||
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
unsigned long bo_size;
|
||||
const char *fw_name;
|
||||
const struct common_firmware_header *hdr;
|
||||
@ -104,8 +104,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
ring = &adev->vcn.ring_dec;
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCN dec run queue.\n");
|
||||
@ -113,8 +113,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
ring = &adev->vcn.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up VCN enc run queue.\n");
|
||||
@ -130,9 +130,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
|
||||
|
||||
kfree(adev->vcn.saved_bo);
|
||||
|
||||
amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
|
||||
drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
|
||||
|
||||
amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
|
||||
drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
|
||||
&adev->vcn.gpu_addr,
|
||||
|
@ -56,8 +56,8 @@ struct amdgpu_vcn {
|
||||
struct amdgpu_ring ring_dec;
|
||||
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
|
||||
struct amdgpu_irq_src irq;
|
||||
struct amd_sched_entity entity_dec;
|
||||
struct amd_sched_entity entity_enc;
|
||||
struct drm_sched_entity entity_dec;
|
||||
struct drm_sched_entity entity_enc;
|
||||
unsigned num_enc_rings;
|
||||
};
|
||||
|
||||
|
@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
AMDGPU_VM_PTE_COUNT(adev) * 8);
|
||||
unsigned ring_instance;
|
||||
struct amdgpu_ring *ring;
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
int r, i;
|
||||
u64 flags;
|
||||
uint64_t init_pde_value = 0;
|
||||
@ -2663,8 +2663,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
|
||||
ring_instance %= adev->vm_manager.vm_pte_num_rings;
|
||||
ring = adev->vm_manager.vm_pte_rings[ring_instance];
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
|
||||
r = amd_sched_entity_init(&ring->sched, &vm->entity,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
r = drm_sched_entity_init(&ring->sched, &vm->entity,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
@ -2744,7 +2744,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
vm->root.base.bo = NULL;
|
||||
|
||||
error_free_sched_entity:
|
||||
amd_sched_entity_fini(&ring->sched, &vm->entity);
|
||||
drm_sched_entity_fini(&ring->sched, &vm->entity);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -2803,7 +2803,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
|
||||
}
|
||||
|
||||
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
|
||||
drm_sched_entity_fini(vm->entity.sched, &vm->entity);
|
||||
|
||||
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
|
||||
dev_err(adev->dev, "still active bo inside vm\n");
|
||||
|
@ -24,10 +24,11 @@
|
||||
#ifndef __AMDGPU_VM_H__
|
||||
#define __AMDGPU_VM_H__
|
||||
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
#include "gpu_scheduler.h"
|
||||
#include "amdgpu_sync.h"
|
||||
#include "amdgpu_ring.h"
|
||||
|
||||
@ -175,7 +176,7 @@ struct amdgpu_vm {
|
||||
spinlock_t freed_lock;
|
||||
|
||||
/* Scheduler entity for page table updates */
|
||||
struct amd_sched_entity entity;
|
||||
struct drm_sched_entity entity;
|
||||
|
||||
/* client id and PASID (TODO: replace client_id with PASID) */
|
||||
u64 client_id;
|
||||
|
@ -6472,10 +6472,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
|
||||
mutex_unlock(&adev->srbm_mutex);
|
||||
}
|
||||
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
|
||||
enum amd_sched_priority priority)
|
||||
enum drm_sched_priority priority)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
|
||||
bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
|
||||
|
||||
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
|
||||
return;
|
||||
|
@ -412,10 +412,10 @@ static int uvd_v6_0_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||
@ -456,7 +456,7 @@ static int uvd_v6_0_sw_fini(void *handle)
|
||||
return r;
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
|
||||
drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
|
||||
|
@ -385,7 +385,7 @@ static int uvd_v7_0_early_init(void *handle)
|
||||
static int uvd_v7_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct amd_sched_rq *rq;
|
||||
struct drm_sched_rq *rq;
|
||||
int i, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
@ -416,8 +416,8 @@ static int uvd_v7_0_sw_init(void *handle)
|
||||
}
|
||||
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
|
||||
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
|
||||
rq, amdgpu_sched_jobs, NULL);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
|
||||
@ -472,7 +472,7 @@ static int uvd_v7_0_sw_fini(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
|
||||
drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
|
||||
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
|
||||
|
@ -1,186 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GPU_SCHEDULER_H_
|
||||
#define _GPU_SCHEDULER_H_
|
||||
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/dma-fence.h>
|
||||
#include "spsc_queue.h"
|
||||
|
||||
struct amd_gpu_scheduler;
|
||||
struct amd_sched_rq;
|
||||
|
||||
enum amd_sched_priority {
|
||||
AMD_SCHED_PRIORITY_MIN,
|
||||
AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
|
||||
AMD_SCHED_PRIORITY_NORMAL,
|
||||
AMD_SCHED_PRIORITY_HIGH_SW,
|
||||
AMD_SCHED_PRIORITY_HIGH_HW,
|
||||
AMD_SCHED_PRIORITY_KERNEL,
|
||||
AMD_SCHED_PRIORITY_MAX,
|
||||
AMD_SCHED_PRIORITY_INVALID = -1,
|
||||
AMD_SCHED_PRIORITY_UNSET = -2
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A scheduler entity is a wrapper around a job queue or a group
|
||||
* of other entities. Entities take turns emitting jobs from their
|
||||
* job queues to corresponding hardware ring based on scheduling
|
||||
* policy.
|
||||
*/
|
||||
struct amd_sched_entity {
|
||||
struct list_head list;
|
||||
struct amd_sched_rq *rq;
|
||||
spinlock_t rq_lock;
|
||||
struct amd_gpu_scheduler *sched;
|
||||
|
||||
spinlock_t queue_lock;
|
||||
struct spsc_queue job_queue;
|
||||
|
||||
atomic_t fence_seq;
|
||||
uint64_t fence_context;
|
||||
|
||||
struct dma_fence *dependency;
|
||||
struct dma_fence_cb cb;
|
||||
atomic_t *guilty; /* points to ctx's guilty */
|
||||
};
|
||||
|
||||
/**
|
||||
* Run queue is a set of entities scheduling command submissions for
|
||||
* one specific ring. It implements the scheduling policy that selects
|
||||
* the next entity to emit commands from.
|
||||
*/
|
||||
struct amd_sched_rq {
|
||||
spinlock_t lock;
|
||||
struct list_head entities;
|
||||
struct amd_sched_entity *current_entity;
|
||||
};
|
||||
|
||||
struct amd_sched_fence {
|
||||
struct dma_fence scheduled;
|
||||
struct dma_fence finished;
|
||||
struct dma_fence_cb cb;
|
||||
struct dma_fence *parent;
|
||||
struct amd_gpu_scheduler *sched;
|
||||
spinlock_t lock;
|
||||
void *owner;
|
||||
};
|
||||
|
||||
struct amd_sched_job {
|
||||
struct spsc_node queue_node;
|
||||
struct amd_gpu_scheduler *sched;
|
||||
struct amd_sched_fence *s_fence;
|
||||
struct dma_fence_cb finish_cb;
|
||||
struct work_struct finish_work;
|
||||
struct list_head node;
|
||||
struct delayed_work work_tdr;
|
||||
uint64_t id;
|
||||
atomic_t karma;
|
||||
enum amd_sched_priority s_priority;
|
||||
};
|
||||
|
||||
extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
|
||||
extern const struct dma_fence_ops amd_sched_fence_ops_finished;
|
||||
static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
|
||||
{
|
||||
if (f->ops == &amd_sched_fence_ops_scheduled)
|
||||
return container_of(f, struct amd_sched_fence, scheduled);
|
||||
|
||||
if (f->ops == &amd_sched_fence_ops_finished)
|
||||
return container_of(f, struct amd_sched_fence, finished);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
|
||||
{
|
||||
return (s_job && atomic_inc_return(&s_job->karma) > threshold);
|
||||
}
|
||||
|
||||
/**
|
||||
* Define the backend operations called by the scheduler,
|
||||
* these functions should be implemented in driver side
|
||||
*/
|
||||
struct amd_sched_backend_ops {
|
||||
struct dma_fence *(*dependency)(struct amd_sched_job *sched_job,
|
||||
struct amd_sched_entity *s_entity);
|
||||
struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
|
||||
void (*timedout_job)(struct amd_sched_job *sched_job);
|
||||
void (*free_job)(struct amd_sched_job *sched_job);
|
||||
};
|
||||
|
||||
/**
|
||||
* One scheduler is implemented for each hardware ring
|
||||
*/
|
||||
struct amd_gpu_scheduler {
|
||||
const struct amd_sched_backend_ops *ops;
|
||||
uint32_t hw_submission_limit;
|
||||
long timeout;
|
||||
const char *name;
|
||||
struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX];
|
||||
wait_queue_head_t wake_up_worker;
|
||||
wait_queue_head_t job_scheduled;
|
||||
atomic_t hw_rq_count;
|
||||
atomic64_t job_id_count;
|
||||
struct task_struct *thread;
|
||||
struct list_head ring_mirror_list;
|
||||
spinlock_t job_list_lock;
|
||||
int hang_limit;
|
||||
};
|
||||
|
||||
int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
const struct amd_sched_backend_ops *ops,
|
||||
uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name);
|
||||
void amd_sched_fini(struct amd_gpu_scheduler *sched);
|
||||
|
||||
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
struct amd_sched_rq *rq,
|
||||
uint32_t jobs, atomic_t* guilty);
|
||||
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity);
|
||||
void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
|
||||
struct amd_sched_entity *entity);
|
||||
void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
|
||||
struct amd_sched_rq *rq);
|
||||
|
||||
int amd_sched_fence_slab_init(void);
|
||||
void amd_sched_fence_slab_fini(void);
|
||||
|
||||
struct amd_sched_fence *amd_sched_fence_create(
|
||||
struct amd_sched_entity *s_entity, void *owner);
|
||||
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
|
||||
void amd_sched_fence_finished(struct amd_sched_fence *fence);
|
||||
int amd_sched_job_init(struct amd_sched_job *job,
|
||||
struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
void *owner);
|
||||
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job);
|
||||
void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
|
||||
bool amd_sched_dependency_optimized(struct dma_fence* fence,
|
||||
struct amd_sched_entity *entity);
|
||||
void amd_sched_job_kickout(struct amd_sched_job *s_job);
|
||||
|
||||
#endif
|
4
drivers/gpu/drm/scheduler/Makefile
Normal file
4
drivers/gpu/drm/scheduler/Makefile
Normal file
@ -0,0 +1,4 @@
|
||||
ccflags-y := -Iinclude/drm
|
||||
gpu-sched-y := gpu_scheduler.o sched_fence.o
|
||||
|
||||
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
|
@ -19,37 +19,36 @@
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "gpu_scheduler.h"
|
||||
|
||||
#include "spsc_queue.h"
|
||||
#include <drm/gpu_scheduler.h>
|
||||
#include <drm/spsc_queue.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "gpu_sched_trace.h"
|
||||
#include <drm/gpu_scheduler_trace.h>
|
||||
|
||||
#define to_amd_sched_job(sched_job) \
|
||||
container_of((sched_job), struct amd_sched_job, queue_node)
|
||||
#define to_drm_sched_job(sched_job) \
|
||||
container_of((sched_job), struct drm_sched_job, queue_node)
|
||||
|
||||
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
|
||||
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
|
||||
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
|
||||
static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
|
||||
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
|
||||
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
|
||||
|
||||
/* Initialize a given run queue struct */
|
||||
static void amd_sched_rq_init(struct amd_sched_rq *rq)
|
||||
static void drm_sched_rq_init(struct drm_sched_rq *rq)
|
||||
{
|
||||
spin_lock_init(&rq->lock);
|
||||
INIT_LIST_HEAD(&rq->entities);
|
||||
rq->current_entity = NULL;
|
||||
}
|
||||
|
||||
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
|
||||
struct amd_sched_entity *entity)
|
||||
static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
if (!list_empty(&entity->list))
|
||||
return;
|
||||
@ -58,8 +57,8 @@ static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
|
||||
spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
|
||||
struct amd_sched_entity *entity)
|
||||
static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
if (list_empty(&entity->list))
|
||||
return;
|
||||
@ -77,17 +76,17 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
|
||||
*
|
||||
* Try to find a ready entity, returns NULL if none found.
|
||||
*/
|
||||
static struct amd_sched_entity *
|
||||
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
|
||||
static struct drm_sched_entity *
|
||||
drm_sched_rq_select_entity(struct drm_sched_rq *rq)
|
||||
{
|
||||
struct amd_sched_entity *entity;
|
||||
struct drm_sched_entity *entity;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
|
||||
entity = rq->current_entity;
|
||||
if (entity) {
|
||||
list_for_each_entry_continue(entity, &rq->entities, list) {
|
||||
if (amd_sched_entity_is_ready(entity)) {
|
||||
if (drm_sched_entity_is_ready(entity)) {
|
||||
rq->current_entity = entity;
|
||||
spin_unlock(&rq->lock);
|
||||
return entity;
|
||||
@ -97,7 +96,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
|
||||
|
||||
list_for_each_entry(entity, &rq->entities, list) {
|
||||
|
||||
if (amd_sched_entity_is_ready(entity)) {
|
||||
if (drm_sched_entity_is_ready(entity)) {
|
||||
rq->current_entity = entity;
|
||||
spin_unlock(&rq->lock);
|
||||
return entity;
|
||||
@ -116,22 +115,22 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
|
||||
* Init a context entity used by scheduler when submit to HW ring.
|
||||
*
|
||||
* @sched The pointer to the scheduler
|
||||
* @entity The pointer to a valid amd_sched_entity
|
||||
* @entity The pointer to a valid drm_sched_entity
|
||||
* @rq The run queue this entity belongs
|
||||
* @kernel If this is an entity for the kernel
|
||||
* @jobs The max number of jobs in the job queue
|
||||
*
|
||||
* return 0 if succeed. negative error code on failure
|
||||
*/
|
||||
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
struct amd_sched_rq *rq,
|
||||
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity,
|
||||
struct drm_sched_rq *rq,
|
||||
uint32_t jobs, atomic_t *guilty)
|
||||
{
|
||||
if (!(sched && entity && rq))
|
||||
return -EINVAL;
|
||||
|
||||
memset(entity, 0, sizeof(struct amd_sched_entity));
|
||||
memset(entity, 0, sizeof(struct drm_sched_entity));
|
||||
INIT_LIST_HEAD(&entity->list);
|
||||
entity->rq = rq;
|
||||
entity->sched = sched;
|
||||
@ -146,6 +145,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_init);
|
||||
|
||||
/**
|
||||
* Query if entity is initialized
|
||||
@ -155,8 +155,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||
*
|
||||
* return true if entity is initialized, false otherwise
|
||||
*/
|
||||
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity)
|
||||
static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
return entity->sched == sched &&
|
||||
entity->rq != NULL;
|
||||
@ -169,7 +169,7 @@ static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
|
||||
*
|
||||
* Return true if entity don't has any unscheduled jobs.
|
||||
*/
|
||||
static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
|
||||
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
|
||||
{
|
||||
rmb();
|
||||
if (spsc_queue_peek(&entity->job_queue) == NULL)
|
||||
@ -185,7 +185,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
|
||||
*
|
||||
* Return true if entity could provide a job.
|
||||
*/
|
||||
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
|
||||
static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
|
||||
{
|
||||
if (spsc_queue_peek(&entity->job_queue) == NULL)
|
||||
return false;
|
||||
@ -204,12 +204,12 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
|
||||
*
|
||||
* Cleanup and free the allocated resources.
|
||||
*/
|
||||
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity)
|
||||
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!amd_sched_entity_is_initialized(sched, entity))
|
||||
if (!drm_sched_entity_is_initialized(sched, entity))
|
||||
return;
|
||||
/**
|
||||
* The client will not queue more IBs during this fini, consume existing
|
||||
@ -219,10 +219,10 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
r = -ERESTARTSYS;
|
||||
else
|
||||
r = wait_event_killable(sched->job_scheduled,
|
||||
amd_sched_entity_is_idle(entity));
|
||||
amd_sched_entity_set_rq(entity, NULL);
|
||||
drm_sched_entity_is_idle(entity));
|
||||
drm_sched_entity_set_rq(entity, NULL);
|
||||
if (r) {
|
||||
struct amd_sched_job *job;
|
||||
struct drm_sched_job *job;
|
||||
|
||||
/* Park the kernel for a moment to make sure it isn't processing
|
||||
* our enity.
|
||||
@ -236,37 +236,38 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
entity->dependency = NULL;
|
||||
}
|
||||
|
||||
while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) {
|
||||
struct amd_sched_fence *s_fence = job->s_fence;
|
||||
amd_sched_fence_scheduled(s_fence);
|
||||
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
|
||||
struct drm_sched_fence *s_fence = job->s_fence;
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
dma_fence_set_error(&s_fence->finished, -ESRCH);
|
||||
amd_sched_fence_finished(s_fence);
|
||||
drm_sched_fence_finished(s_fence);
|
||||
WARN_ON(s_fence->parent);
|
||||
dma_fence_put(&s_fence->finished);
|
||||
sched->ops->free_job(job);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_fini);
|
||||
|
||||
static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||
static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||
{
|
||||
struct amd_sched_entity *entity =
|
||||
container_of(cb, struct amd_sched_entity, cb);
|
||||
struct drm_sched_entity *entity =
|
||||
container_of(cb, struct drm_sched_entity, cb);
|
||||
entity->dependency = NULL;
|
||||
dma_fence_put(f);
|
||||
amd_sched_wakeup(entity->sched);
|
||||
drm_sched_wakeup(entity->sched);
|
||||
}
|
||||
|
||||
static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||
{
|
||||
struct amd_sched_entity *entity =
|
||||
container_of(cb, struct amd_sched_entity, cb);
|
||||
struct drm_sched_entity *entity =
|
||||
container_of(cb, struct drm_sched_entity, cb);
|
||||
entity->dependency = NULL;
|
||||
dma_fence_put(f);
|
||||
}
|
||||
|
||||
void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
|
||||
struct amd_sched_rq *rq)
|
||||
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
|
||||
struct drm_sched_rq *rq)
|
||||
{
|
||||
if (entity->rq == rq)
|
||||
return;
|
||||
@ -274,37 +275,39 @@ void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
|
||||
spin_lock(&entity->rq_lock);
|
||||
|
||||
if (entity->rq)
|
||||
amd_sched_rq_remove_entity(entity->rq, entity);
|
||||
drm_sched_rq_remove_entity(entity->rq, entity);
|
||||
|
||||
entity->rq = rq;
|
||||
if (rq)
|
||||
amd_sched_rq_add_entity(rq, entity);
|
||||
drm_sched_rq_add_entity(rq, entity);
|
||||
|
||||
spin_unlock(&entity->rq_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_set_rq);
|
||||
|
||||
bool amd_sched_dependency_optimized(struct dma_fence* fence,
|
||||
struct amd_sched_entity *entity)
|
||||
bool drm_sched_dependency_optimized(struct dma_fence* fence,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = entity->sched;
|
||||
struct amd_sched_fence *s_fence;
|
||||
struct drm_gpu_scheduler *sched = entity->sched;
|
||||
struct drm_sched_fence *s_fence;
|
||||
|
||||
if (!fence || dma_fence_is_signaled(fence))
|
||||
return false;
|
||||
if (fence->context == entity->fence_context)
|
||||
return true;
|
||||
s_fence = to_amd_sched_fence(fence);
|
||||
s_fence = to_drm_sched_fence(fence);
|
||||
if (s_fence && s_fence->sched == sched)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_dependency_optimized);
|
||||
|
||||
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
|
||||
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = entity->sched;
|
||||
struct drm_gpu_scheduler *sched = entity->sched;
|
||||
struct dma_fence * fence = entity->dependency;
|
||||
struct amd_sched_fence *s_fence;
|
||||
struct drm_sched_fence *s_fence;
|
||||
|
||||
if (fence->context == entity->fence_context) {
|
||||
/* We can ignore fences from ourself */
|
||||
@ -312,7 +315,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
|
||||
return false;
|
||||
}
|
||||
|
||||
s_fence = to_amd_sched_fence(fence);
|
||||
s_fence = to_drm_sched_fence(fence);
|
||||
if (s_fence && s_fence->sched == sched) {
|
||||
|
||||
/*
|
||||
@ -323,7 +326,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
|
||||
dma_fence_put(entity->dependency);
|
||||
entity->dependency = fence;
|
||||
if (!dma_fence_add_callback(fence, &entity->cb,
|
||||
amd_sched_entity_clear_dep))
|
||||
drm_sched_entity_clear_dep))
|
||||
return true;
|
||||
|
||||
/* Ignore it when it is already scheduled */
|
||||
@ -332,25 +335,25 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
|
||||
}
|
||||
|
||||
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
|
||||
amd_sched_entity_wakeup))
|
||||
drm_sched_entity_wakeup))
|
||||
return true;
|
||||
|
||||
dma_fence_put(entity->dependency);
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct amd_sched_job *
|
||||
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
||||
static struct drm_sched_job *
|
||||
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = entity->sched;
|
||||
struct amd_sched_job *sched_job = to_amd_sched_job(
|
||||
struct drm_gpu_scheduler *sched = entity->sched;
|
||||
struct drm_sched_job *sched_job = to_drm_sched_job(
|
||||
spsc_queue_peek(&entity->job_queue));
|
||||
|
||||
if (!sched_job)
|
||||
return NULL;
|
||||
|
||||
while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
|
||||
if (amd_sched_entity_add_dependency_cb(entity))
|
||||
if (drm_sched_entity_add_dependency_cb(entity))
|
||||
return NULL;
|
||||
|
||||
/* skip jobs from entity that marked guilty */
|
||||
@ -368,13 +371,13 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
||||
*
|
||||
* Returns 0 for success, negative error code otherwise.
|
||||
*/
|
||||
void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
|
||||
struct amd_sched_entity *entity)
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *entity)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = sched_job->sched;
|
||||
struct drm_gpu_scheduler *sched = sched_job->sched;
|
||||
bool first = false;
|
||||
|
||||
trace_amd_sched_job(sched_job, entity);
|
||||
trace_drm_sched_job(sched_job, entity);
|
||||
|
||||
spin_lock(&entity->queue_lock);
|
||||
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
||||
@ -385,25 +388,26 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
|
||||
if (first) {
|
||||
/* Add the entity to the run queue */
|
||||
spin_lock(&entity->rq_lock);
|
||||
amd_sched_rq_add_entity(entity->rq, entity);
|
||||
drm_sched_rq_add_entity(entity->rq, entity);
|
||||
spin_unlock(&entity->rq_lock);
|
||||
amd_sched_wakeup(sched);
|
||||
drm_sched_wakeup(sched);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_entity_push_job);
|
||||
|
||||
/* job_finish is called after hw fence signaled
|
||||
*/
|
||||
static void amd_sched_job_finish(struct work_struct *work)
|
||||
static void drm_sched_job_finish(struct work_struct *work)
|
||||
{
|
||||
struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
|
||||
struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
|
||||
finish_work);
|
||||
struct amd_gpu_scheduler *sched = s_job->sched;
|
||||
struct drm_gpu_scheduler *sched = s_job->sched;
|
||||
|
||||
/* remove job from ring_mirror_list */
|
||||
spin_lock(&sched->job_list_lock);
|
||||
list_del_init(&s_job->node);
|
||||
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
|
||||
struct amd_sched_job *next;
|
||||
struct drm_sched_job *next;
|
||||
|
||||
spin_unlock(&sched->job_list_lock);
|
||||
cancel_delayed_work_sync(&s_job->work_tdr);
|
||||
@ -411,7 +415,7 @@ static void amd_sched_job_finish(struct work_struct *work)
|
||||
|
||||
/* queue TDR for next job */
|
||||
next = list_first_entry_or_null(&sched->ring_mirror_list,
|
||||
struct amd_sched_job, node);
|
||||
struct drm_sched_job, node);
|
||||
|
||||
if (next)
|
||||
schedule_delayed_work(&next->work_tdr, sched->timeout);
|
||||
@ -421,42 +425,42 @@ static void amd_sched_job_finish(struct work_struct *work)
|
||||
sched->ops->free_job(s_job);
|
||||
}
|
||||
|
||||
static void amd_sched_job_finish_cb(struct dma_fence *f,
|
||||
static void drm_sched_job_finish_cb(struct dma_fence *f,
|
||||
struct dma_fence_cb *cb)
|
||||
{
|
||||
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
|
||||
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
|
||||
finish_cb);
|
||||
schedule_work(&job->finish_work);
|
||||
}
|
||||
|
||||
static void amd_sched_job_begin(struct amd_sched_job *s_job)
|
||||
static void drm_sched_job_begin(struct drm_sched_job *s_job)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = s_job->sched;
|
||||
struct drm_gpu_scheduler *sched = s_job->sched;
|
||||
|
||||
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
|
||||
amd_sched_job_finish_cb);
|
||||
drm_sched_job_finish_cb);
|
||||
|
||||
spin_lock(&sched->job_list_lock);
|
||||
list_add_tail(&s_job->node, &sched->ring_mirror_list);
|
||||
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
|
||||
list_first_entry_or_null(&sched->ring_mirror_list,
|
||||
struct amd_sched_job, node) == s_job)
|
||||
struct drm_sched_job, node) == s_job)
|
||||
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
||||
spin_unlock(&sched->job_list_lock);
|
||||
}
|
||||
|
||||
static void amd_sched_job_timedout(struct work_struct *work)
|
||||
static void drm_sched_job_timedout(struct work_struct *work)
|
||||
{
|
||||
struct amd_sched_job *job = container_of(work, struct amd_sched_job,
|
||||
struct drm_sched_job *job = container_of(work, struct drm_sched_job,
|
||||
work_tdr.work);
|
||||
|
||||
job->sched->ops->timedout_job(job);
|
||||
}
|
||||
|
||||
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
|
||||
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
|
||||
{
|
||||
struct amd_sched_job *s_job;
|
||||
struct amd_sched_entity *entity, *tmp;
|
||||
struct drm_sched_job *s_job;
|
||||
struct drm_sched_entity *entity, *tmp;
|
||||
int i;;
|
||||
|
||||
spin_lock(&sched->job_list_lock);
|
||||
@ -471,14 +475,14 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
|
||||
}
|
||||
spin_unlock(&sched->job_list_lock);
|
||||
|
||||
if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) {
|
||||
if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
|
||||
atomic_inc(&bad->karma);
|
||||
/* don't increase @bad's karma if it's from KERNEL RQ,
|
||||
* becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
|
||||
* corrupt but keep in mind that kernel jobs always considered good.
|
||||
*/
|
||||
for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) {
|
||||
struct amd_sched_rq *rq = &sched->sched_rq[i];
|
||||
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
|
||||
struct drm_sched_rq *rq = &sched->sched_rq[i];
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
|
||||
@ -495,30 +499,22 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_hw_job_reset);
|
||||
|
||||
void amd_sched_job_kickout(struct amd_sched_job *s_job)
|
||||
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
struct amd_gpu_scheduler *sched = s_job->sched;
|
||||
|
||||
spin_lock(&sched->job_list_lock);
|
||||
list_del_init(&s_job->node);
|
||||
spin_unlock(&sched->job_list_lock);
|
||||
}
|
||||
|
||||
void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
|
||||
{
|
||||
struct amd_sched_job *s_job, *tmp;
|
||||
struct drm_sched_job *s_job, *tmp;
|
||||
bool found_guilty = false;
|
||||
int r;
|
||||
|
||||
spin_lock(&sched->job_list_lock);
|
||||
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
|
||||
struct amd_sched_job, node);
|
||||
struct drm_sched_job, node);
|
||||
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
|
||||
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
||||
|
||||
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
||||
struct amd_sched_fence *s_fence = s_job->s_fence;
|
||||
struct drm_sched_fence *s_fence = s_job->s_fence;
|
||||
struct dma_fence *fence;
|
||||
uint64_t guilty_context;
|
||||
|
||||
@ -536,45 +532,47 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
|
||||
if (fence) {
|
||||
s_fence->parent = dma_fence_get(fence);
|
||||
r = dma_fence_add_callback(fence, &s_fence->cb,
|
||||
amd_sched_process_job);
|
||||
drm_sched_process_job);
|
||||
if (r == -ENOENT)
|
||||
amd_sched_process_job(fence, &s_fence->cb);
|
||||
drm_sched_process_job(fence, &s_fence->cb);
|
||||
else if (r)
|
||||
DRM_ERROR("fence add callback failed (%d)\n",
|
||||
r);
|
||||
dma_fence_put(fence);
|
||||
} else {
|
||||
amd_sched_process_job(NULL, &s_fence->cb);
|
||||
drm_sched_process_job(NULL, &s_fence->cb);
|
||||
}
|
||||
spin_lock(&sched->job_list_lock);
|
||||
}
|
||||
spin_unlock(&sched->job_list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_job_recovery);
|
||||
|
||||
/* init a sched_job with basic field */
|
||||
int amd_sched_job_init(struct amd_sched_job *job,
|
||||
struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity,
|
||||
int drm_sched_job_init(struct drm_sched_job *job,
|
||||
struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
job->sched = sched;
|
||||
job->s_priority = entity->rq - sched->sched_rq;
|
||||
job->s_fence = amd_sched_fence_create(entity, owner);
|
||||
job->s_fence = drm_sched_fence_create(entity, owner);
|
||||
if (!job->s_fence)
|
||||
return -ENOMEM;
|
||||
job->id = atomic64_inc_return(&sched->job_id_count);
|
||||
|
||||
INIT_WORK(&job->finish_work, amd_sched_job_finish);
|
||||
INIT_WORK(&job->finish_work, drm_sched_job_finish);
|
||||
INIT_LIST_HEAD(&job->node);
|
||||
INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
|
||||
INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_job_init);
|
||||
|
||||
/**
|
||||
* Return ture if we can push more jobs to the hw.
|
||||
*/
|
||||
static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
|
||||
static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
return atomic_read(&sched->hw_rq_count) <
|
||||
sched->hw_submission_limit;
|
||||
@ -583,27 +581,27 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
|
||||
/**
|
||||
* Wake up the scheduler when it is ready
|
||||
*/
|
||||
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
|
||||
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
if (amd_sched_ready(sched))
|
||||
if (drm_sched_ready(sched))
|
||||
wake_up_interruptible(&sched->wake_up_worker);
|
||||
}
|
||||
|
||||
/**
|
||||
* Select next entity to process
|
||||
*/
|
||||
static struct amd_sched_entity *
|
||||
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
|
||||
static struct drm_sched_entity *
|
||||
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
struct amd_sched_entity *entity;
|
||||
struct drm_sched_entity *entity;
|
||||
int i;
|
||||
|
||||
if (!amd_sched_ready(sched))
|
||||
if (!drm_sched_ready(sched))
|
||||
return NULL;
|
||||
|
||||
/* Kernel run queue has higher priority than normal run queue*/
|
||||
for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
|
||||
entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
|
||||
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
|
||||
if (entity)
|
||||
break;
|
||||
}
|
||||
@ -611,22 +609,22 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
|
||||
return entity;
|
||||
}
|
||||
|
||||
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
|
||||
{
|
||||
struct amd_sched_fence *s_fence =
|
||||
container_of(cb, struct amd_sched_fence, cb);
|
||||
struct amd_gpu_scheduler *sched = s_fence->sched;
|
||||
struct drm_sched_fence *s_fence =
|
||||
container_of(cb, struct drm_sched_fence, cb);
|
||||
struct drm_gpu_scheduler *sched = s_fence->sched;
|
||||
|
||||
dma_fence_get(&s_fence->finished);
|
||||
atomic_dec(&sched->hw_rq_count);
|
||||
amd_sched_fence_finished(s_fence);
|
||||
drm_sched_fence_finished(s_fence);
|
||||
|
||||
trace_amd_sched_process_job(s_fence);
|
||||
trace_drm_sched_process_job(s_fence);
|
||||
dma_fence_put(&s_fence->finished);
|
||||
wake_up_interruptible(&sched->wake_up_worker);
|
||||
}
|
||||
|
||||
static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
|
||||
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
if (kthread_should_park()) {
|
||||
kthread_parkme();
|
||||
@ -636,52 +634,52 @@ static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int amd_sched_main(void *param)
|
||||
static int drm_sched_main(void *param)
|
||||
{
|
||||
struct sched_param sparam = {.sched_priority = 1};
|
||||
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
|
||||
struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
|
||||
int r;
|
||||
|
||||
sched_setscheduler(current, SCHED_FIFO, &sparam);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
struct amd_sched_entity *entity = NULL;
|
||||
struct amd_sched_fence *s_fence;
|
||||
struct amd_sched_job *sched_job;
|
||||
struct drm_sched_entity *entity = NULL;
|
||||
struct drm_sched_fence *s_fence;
|
||||
struct drm_sched_job *sched_job;
|
||||
struct dma_fence *fence;
|
||||
|
||||
wait_event_interruptible(sched->wake_up_worker,
|
||||
(!amd_sched_blocked(sched) &&
|
||||
(entity = amd_sched_select_entity(sched))) ||
|
||||
(!drm_sched_blocked(sched) &&
|
||||
(entity = drm_sched_select_entity(sched))) ||
|
||||
kthread_should_stop());
|
||||
|
||||
if (!entity)
|
||||
continue;
|
||||
|
||||
sched_job = amd_sched_entity_pop_job(entity);
|
||||
sched_job = drm_sched_entity_pop_job(entity);
|
||||
if (!sched_job)
|
||||
continue;
|
||||
|
||||
s_fence = sched_job->s_fence;
|
||||
|
||||
atomic_inc(&sched->hw_rq_count);
|
||||
amd_sched_job_begin(sched_job);
|
||||
drm_sched_job_begin(sched_job);
|
||||
|
||||
fence = sched->ops->run_job(sched_job);
|
||||
amd_sched_fence_scheduled(s_fence);
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
|
||||
if (fence) {
|
||||
s_fence->parent = dma_fence_get(fence);
|
||||
r = dma_fence_add_callback(fence, &s_fence->cb,
|
||||
amd_sched_process_job);
|
||||
drm_sched_process_job);
|
||||
if (r == -ENOENT)
|
||||
amd_sched_process_job(fence, &s_fence->cb);
|
||||
drm_sched_process_job(fence, &s_fence->cb);
|
||||
else if (r)
|
||||
DRM_ERROR("fence add callback failed (%d)\n",
|
||||
r);
|
||||
dma_fence_put(fence);
|
||||
} else {
|
||||
amd_sched_process_job(NULL, &s_fence->cb);
|
||||
drm_sched_process_job(NULL, &s_fence->cb);
|
||||
}
|
||||
|
||||
wake_up(&sched->job_scheduled);
|
||||
@ -699,8 +697,8 @@ static int amd_sched_main(void *param)
|
||||
*
|
||||
* Return 0 on success, otherwise error code.
|
||||
*/
|
||||
int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
const struct amd_sched_backend_ops *ops,
|
||||
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
||||
const struct drm_sched_backend_ops *ops,
|
||||
unsigned hw_submission,
|
||||
unsigned hang_limit,
|
||||
long timeout,
|
||||
@ -712,8 +710,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
sched->name = name;
|
||||
sched->timeout = timeout;
|
||||
sched->hang_limit = hang_limit;
|
||||
for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
|
||||
amd_sched_rq_init(&sched->sched_rq[i]);
|
||||
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
|
||||
drm_sched_rq_init(&sched->sched_rq[i]);
|
||||
|
||||
init_waitqueue_head(&sched->wake_up_worker);
|
||||
init_waitqueue_head(&sched->job_scheduled);
|
||||
@ -723,7 +721,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
atomic64_set(&sched->job_id_count, 0);
|
||||
|
||||
/* Each scheduler will run on a seperate kernel thread */
|
||||
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
|
||||
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
|
||||
if (IS_ERR(sched->thread)) {
|
||||
DRM_ERROR("Failed to create scheduler for %s.\n", name);
|
||||
return PTR_ERR(sched->thread);
|
||||
@ -731,14 +729,16 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_init);
|
||||
|
||||
/**
|
||||
* Destroy a gpu scheduler
|
||||
*
|
||||
* @sched The pointer to the scheduler
|
||||
*/
|
||||
void amd_sched_fini(struct amd_gpu_scheduler *sched)
|
||||
void drm_sched_fini(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
if (sched->thread)
|
||||
kthread_stop(sched->thread);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_fini);
|
@ -19,57 +19,36 @@
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "gpu_scheduler.h"
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
||||
static struct kmem_cache *sched_fence_slab;
|
||||
|
||||
int amd_sched_fence_slab_init(void)
|
||||
int drm_sched_fence_slab_init(void)
|
||||
{
|
||||
sched_fence_slab = kmem_cache_create(
|
||||
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
|
||||
"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!sched_fence_slab)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_sched_fence_slab_init);
|
||||
|
||||
void amd_sched_fence_slab_fini(void)
|
||||
void drm_sched_fence_slab_fini(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(sched_fence_slab);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_sched_fence_slab_fini);
|
||||
|
||||
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
struct amd_sched_fence *fence = NULL;
|
||||
unsigned seq;
|
||||
|
||||
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return NULL;
|
||||
|
||||
fence->owner = owner;
|
||||
fence->sched = entity->sched;
|
||||
spin_lock_init(&fence->lock);
|
||||
|
||||
seq = atomic_inc_return(&entity->fence_seq);
|
||||
dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
|
||||
&fence->lock, entity->fence_context, seq);
|
||||
dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
|
||||
&fence->lock, entity->fence_context + 1, seq);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
||||
{
|
||||
int ret = dma_fence_signal(&fence->scheduled);
|
||||
|
||||
@ -81,7 +60,7 @@ void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
|
||||
"was already signaled\n");
|
||||
}
|
||||
|
||||
void amd_sched_fence_finished(struct amd_sched_fence *fence)
|
||||
void drm_sched_fence_finished(struct drm_sched_fence *fence)
|
||||
{
|
||||
int ret = dma_fence_signal(&fence->finished);
|
||||
|
||||
@ -93,18 +72,18 @@ void amd_sched_fence_finished(struct amd_sched_fence *fence)
|
||||
"was already signaled\n");
|
||||
}
|
||||
|
||||
static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
|
||||
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
|
||||
{
|
||||
return "amd_sched";
|
||||
return "drm_sched";
|
||||
}
|
||||
|
||||
static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
|
||||
static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
|
||||
{
|
||||
struct amd_sched_fence *fence = to_amd_sched_fence(f);
|
||||
struct drm_sched_fence *fence = to_drm_sched_fence(f);
|
||||
return (const char *)fence->sched->name;
|
||||
}
|
||||
|
||||
static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
|
||||
static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -116,10 +95,10 @@ static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
|
||||
*
|
||||
* Free up the fence memory after the RCU grace period.
|
||||
*/
|
||||
static void amd_sched_fence_free(struct rcu_head *rcu)
|
||||
static void drm_sched_fence_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
|
||||
struct amd_sched_fence *fence = to_amd_sched_fence(f);
|
||||
struct drm_sched_fence *fence = to_drm_sched_fence(f);
|
||||
|
||||
dma_fence_put(fence->parent);
|
||||
kmem_cache_free(sched_fence_slab, fence);
|
||||
@ -133,11 +112,11 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
|
||||
* This function is called when the reference count becomes zero.
|
||||
* It just RCU schedules freeing up the fence.
|
||||
*/
|
||||
static void amd_sched_fence_release_scheduled(struct dma_fence *f)
|
||||
static void drm_sched_fence_release_scheduled(struct dma_fence *f)
|
||||
{
|
||||
struct amd_sched_fence *fence = to_amd_sched_fence(f);
|
||||
struct drm_sched_fence *fence = to_drm_sched_fence(f);
|
||||
|
||||
call_rcu(&fence->finished.rcu, amd_sched_fence_free);
|
||||
call_rcu(&fence->finished.rcu, drm_sched_fence_free);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -147,27 +126,62 @@ static void amd_sched_fence_release_scheduled(struct dma_fence *f)
|
||||
*
|
||||
* Drop the extra reference from the scheduled fence to the base fence.
|
||||
*/
|
||||
static void amd_sched_fence_release_finished(struct dma_fence *f)
|
||||
static void drm_sched_fence_release_finished(struct dma_fence *f)
|
||||
{
|
||||
struct amd_sched_fence *fence = to_amd_sched_fence(f);
|
||||
struct drm_sched_fence *fence = to_drm_sched_fence(f);
|
||||
|
||||
dma_fence_put(&fence->scheduled);
|
||||
}
|
||||
|
||||
const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
|
||||
.get_driver_name = amd_sched_fence_get_driver_name,
|
||||
.get_timeline_name = amd_sched_fence_get_timeline_name,
|
||||
.enable_signaling = amd_sched_fence_enable_signaling,
|
||||
const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
|
||||
.get_driver_name = drm_sched_fence_get_driver_name,
|
||||
.get_timeline_name = drm_sched_fence_get_timeline_name,
|
||||
.enable_signaling = drm_sched_fence_enable_signaling,
|
||||
.signaled = NULL,
|
||||
.wait = dma_fence_default_wait,
|
||||
.release = amd_sched_fence_release_scheduled,
|
||||
.release = drm_sched_fence_release_scheduled,
|
||||
};
|
||||
|
||||
const struct dma_fence_ops amd_sched_fence_ops_finished = {
|
||||
.get_driver_name = amd_sched_fence_get_driver_name,
|
||||
.get_timeline_name = amd_sched_fence_get_timeline_name,
|
||||
.enable_signaling = amd_sched_fence_enable_signaling,
|
||||
const struct dma_fence_ops drm_sched_fence_ops_finished = {
|
||||
.get_driver_name = drm_sched_fence_get_driver_name,
|
||||
.get_timeline_name = drm_sched_fence_get_timeline_name,
|
||||
.enable_signaling = drm_sched_fence_enable_signaling,
|
||||
.signaled = NULL,
|
||||
.wait = dma_fence_default_wait,
|
||||
.release = amd_sched_fence_release_finished,
|
||||
.release = drm_sched_fence_release_finished,
|
||||
};
|
||||
|
||||
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
|
||||
{
|
||||
if (f->ops == &drm_sched_fence_ops_scheduled)
|
||||
return container_of(f, struct drm_sched_fence, scheduled);
|
||||
|
||||
if (f->ops == &drm_sched_fence_ops_finished)
|
||||
return container_of(f, struct drm_sched_fence, finished);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(to_drm_sched_fence);
|
||||
|
||||
struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
|
||||
void *owner)
|
||||
{
|
||||
struct drm_sched_fence *fence = NULL;
|
||||
unsigned seq;
|
||||
|
||||
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return NULL;
|
||||
|
||||
fence->owner = owner;
|
||||
fence->sched = entity->sched;
|
||||
spin_lock_init(&fence->lock);
|
||||
|
||||
seq = atomic_inc_return(&entity->fence_seq);
|
||||
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
|
||||
&fence->lock, entity->fence_context, seq);
|
||||
dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
|
||||
&fence->lock, entity->fence_context + 1, seq);
|
||||
|
||||
return fence;
|
||||
}
|
176
include/drm/gpu_scheduler.h
Normal file
176
include/drm/gpu_scheduler.h
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _DRM_GPU_SCHEDULER_H_
|
||||
#define _DRM_GPU_SCHEDULER_H_
|
||||
|
||||
#include <drm/spsc_queue.h>
|
||||
#include <linux/dma-fence.h>
|
||||
|
||||
struct drm_gpu_scheduler;
|
||||
struct drm_sched_rq;
|
||||
|
||||
enum drm_sched_priority {
|
||||
DRM_SCHED_PRIORITY_MIN,
|
||||
DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
|
||||
DRM_SCHED_PRIORITY_NORMAL,
|
||||
DRM_SCHED_PRIORITY_HIGH_SW,
|
||||
DRM_SCHED_PRIORITY_HIGH_HW,
|
||||
DRM_SCHED_PRIORITY_KERNEL,
|
||||
DRM_SCHED_PRIORITY_MAX,
|
||||
DRM_SCHED_PRIORITY_INVALID = -1,
|
||||
DRM_SCHED_PRIORITY_UNSET = -2
|
||||
};
|
||||
|
||||
/**
|
||||
* A scheduler entity is a wrapper around a job queue or a group
|
||||
* of other entities. Entities take turns emitting jobs from their
|
||||
* job queues to corresponding hardware ring based on scheduling
|
||||
* policy.
|
||||
*/
|
||||
struct drm_sched_entity {
|
||||
struct list_head list;
|
||||
struct drm_sched_rq *rq;
|
||||
spinlock_t rq_lock;
|
||||
struct drm_gpu_scheduler *sched;
|
||||
|
||||
spinlock_t queue_lock;
|
||||
struct spsc_queue job_queue;
|
||||
|
||||
atomic_t fence_seq;
|
||||
uint64_t fence_context;
|
||||
|
||||
struct dma_fence *dependency;
|
||||
struct dma_fence_cb cb;
|
||||
atomic_t *guilty; /* points to ctx's guilty */
|
||||
};
|
||||
|
||||
/**
|
||||
* Run queue is a set of entities scheduling command submissions for
|
||||
* one specific ring. It implements the scheduling policy that selects
|
||||
* the next entity to emit commands from.
|
||||
*/
|
||||
struct drm_sched_rq {
|
||||
spinlock_t lock;
|
||||
struct list_head entities;
|
||||
struct drm_sched_entity *current_entity;
|
||||
};
|
||||
|
||||
struct drm_sched_fence {
|
||||
struct dma_fence scheduled;
|
||||
struct dma_fence finished;
|
||||
struct dma_fence_cb cb;
|
||||
struct dma_fence *parent;
|
||||
struct drm_gpu_scheduler *sched;
|
||||
spinlock_t lock;
|
||||
void *owner;
|
||||
};
|
||||
|
||||
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
|
||||
|
||||
struct drm_sched_job {
|
||||
struct spsc_node queue_node;
|
||||
struct drm_gpu_scheduler *sched;
|
||||
struct drm_sched_fence *s_fence;
|
||||
struct dma_fence_cb finish_cb;
|
||||
struct work_struct finish_work;
|
||||
struct list_head node;
|
||||
struct delayed_work work_tdr;
|
||||
uint64_t id;
|
||||
atomic_t karma;
|
||||
enum drm_sched_priority s_priority;
|
||||
};
|
||||
|
||||
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
|
||||
int threshold)
|
||||
{
|
||||
return (s_job && atomic_inc_return(&s_job->karma) > threshold);
|
||||
}
|
||||
|
||||
/**
|
||||
* Define the backend operations called by the scheduler,
|
||||
* these functions should be implemented in driver side
|
||||
*/
|
||||
struct drm_sched_backend_ops {
|
||||
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity);
|
||||
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
|
||||
void (*timedout_job)(struct drm_sched_job *sched_job);
|
||||
void (*free_job)(struct drm_sched_job *sched_job);
|
||||
};
|
||||
|
||||
/**
|
||||
* One scheduler is implemented for each hardware ring
|
||||
*/
|
||||
struct drm_gpu_scheduler {
|
||||
const struct drm_sched_backend_ops *ops;
|
||||
uint32_t hw_submission_limit;
|
||||
long timeout;
|
||||
const char *name;
|
||||
struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
|
||||
wait_queue_head_t wake_up_worker;
|
||||
wait_queue_head_t job_scheduled;
|
||||
atomic_t hw_rq_count;
|
||||
atomic64_t job_id_count;
|
||||
struct task_struct *thread;
|
||||
struct list_head ring_mirror_list;
|
||||
spinlock_t job_list_lock;
|
||||
int hang_limit;
|
||||
};
|
||||
|
||||
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
||||
const struct drm_sched_backend_ops *ops,
|
||||
uint32_t hw_submission, unsigned hang_limit, long timeout,
|
||||
const char *name);
|
||||
void drm_sched_fini(struct drm_gpu_scheduler *sched);
|
||||
|
||||
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity,
|
||||
struct drm_sched_rq *rq,
|
||||
uint32_t jobs, atomic_t *guilty);
|
||||
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *entity);
|
||||
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
|
||||
struct drm_sched_rq *rq);
|
||||
|
||||
int drm_sched_fence_slab_init(void);
|
||||
void drm_sched_fence_slab_fini(void);
|
||||
|
||||
struct drm_sched_fence *drm_sched_fence_create(
|
||||
struct drm_sched_entity *s_entity, void *owner);
|
||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
|
||||
void drm_sched_fence_finished(struct drm_sched_fence *fence);
|
||||
int drm_sched_job_init(struct drm_sched_job *job,
|
||||
struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_entity *entity,
|
||||
void *owner);
|
||||
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
|
||||
struct drm_sched_job *job);
|
||||
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
|
||||
bool drm_sched_dependency_optimized(struct dma_fence* fence,
|
||||
struct drm_sched_entity *entity);
|
||||
void drm_sched_job_kickout(struct drm_sched_job *s_job);
|
||||
|
||||
#endif
|
@ -31,14 +31,14 @@
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM gpu_sched
|
||||
#define TRACE_INCLUDE_FILE gpu_sched_trace
|
||||
#define TRACE_SYSTEM gpu_scheduler
|
||||
#define TRACE_INCLUDE_FILE gpu_scheduler_trace
|
||||
|
||||
TRACE_EVENT(amd_sched_job,
|
||||
TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity),
|
||||
TRACE_EVENT(drm_sched_job,
|
||||
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
|
||||
TP_ARGS(sched_job, entity),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct amd_sched_entity *, entity)
|
||||
__field(struct drm_sched_entity *, entity)
|
||||
__field(struct dma_fence *, fence)
|
||||
__field(const char *, name)
|
||||
__field(uint64_t, id)
|
||||
@ -61,8 +61,8 @@ TRACE_EVENT(amd_sched_job,
|
||||
__entry->job_count, __entry->hw_job_count)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amd_sched_process_job,
|
||||
TP_PROTO(struct amd_sched_fence *fence),
|
||||
TRACE_EVENT(drm_sched_process_job,
|
||||
TP_PROTO(struct drm_sched_fence *fence),
|
||||
TP_ARGS(fence),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct dma_fence *, fence)
|
@ -21,10 +21,11 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_
|
||||
#define AMD_SCHEDULER_SPSC_QUEUE_H_
|
||||
#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
|
||||
#define DRM_SCHEDULER_SPSC_QUEUE_H_
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/preempt.h>
|
||||
|
||||
/** SPSC lockless queue */
|
||||
|
||||
@ -118,4 +119,4 @@ static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
|
||||
|
||||
|
||||
|
||||
#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */
|
||||
#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
|
Loading…
Reference in New Issue
Block a user