mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
dma-buf: drop excl_fence parameter from dma_resv_get_fences
Returning the exclusive fence separately is no longer used. Instead add a write parameter to indicate the use case. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20211207123411.167006-4-christian.koenig@amd.com
This commit is contained in:
parent
acde6234f6
commit
75ab2b3633
@ -542,57 +542,45 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
|
||||
* dma_resv_get_fences - Get an object's shared and exclusive
|
||||
* fences without update side lock held
|
||||
* @obj: the reservation object
|
||||
* @fence_excl: the returned exclusive fence (or NULL)
|
||||
* @shared_count: the number of shared fences returned
|
||||
* @shared: the array of shared fence ptrs returned (array is krealloc'd to
|
||||
* the required size, and must be freed by caller)
|
||||
* @write: true if we should return all fences
|
||||
* @num_fences: the number of fences returned
|
||||
* @fences: the array of fence ptrs returned (array is krealloc'd to the
|
||||
* required size, and must be freed by caller)
|
||||
*
|
||||
* Retrieve all fences from the reservation object. If the pointer for the
|
||||
* exclusive fence is not specified the fence is put into the array of the
|
||||
* shared fences as well. Returns either zero or -ENOMEM.
|
||||
* Retrieve all fences from the reservation object.
|
||||
* Returns either zero or -ENOMEM.
|
||||
*/
|
||||
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
|
||||
unsigned int *shared_count, struct dma_fence ***shared)
|
||||
int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
unsigned int *num_fences, struct dma_fence ***fences)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
*shared_count = 0;
|
||||
*shared = NULL;
|
||||
*num_fences = 0;
|
||||
*fences = NULL;
|
||||
|
||||
if (fence_excl)
|
||||
*fence_excl = NULL;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj, true);
|
||||
dma_resv_iter_begin(&cursor, obj, write);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
|
||||
if (dma_resv_iter_is_restarted(&cursor)) {
|
||||
unsigned int count;
|
||||
|
||||
while (*shared_count)
|
||||
dma_fence_put((*shared)[--(*shared_count)]);
|
||||
while (*num_fences)
|
||||
dma_fence_put((*fences)[--(*num_fences)]);
|
||||
|
||||
if (fence_excl)
|
||||
dma_fence_put(*fence_excl);
|
||||
|
||||
count = cursor.shared_count;
|
||||
count += fence_excl ? 0 : 1;
|
||||
count = cursor.shared_count + 1;
|
||||
|
||||
/* Eventually re-allocate the array */
|
||||
*shared = krealloc_array(*shared, count,
|
||||
*fences = krealloc_array(*fences, count,
|
||||
sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (count && !*shared) {
|
||||
if (count && !*fences) {
|
||||
dma_resv_iter_end(&cursor);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
dma_fence_get(fence);
|
||||
if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
|
||||
*fence_excl = fence;
|
||||
else
|
||||
(*shared)[(*shared_count)++] = fence;
|
||||
(*fences)[(*num_fences)++] = dma_fence_get(fence);
|
||||
}
|
||||
dma_resv_iter_end(&cursor);
|
||||
|
||||
|
@ -275,7 +275,7 @@ static int test_shared_for_each_unlocked(void *arg)
|
||||
|
||||
static int test_get_fences(void *arg, bool shared)
|
||||
{
|
||||
struct dma_fence *f, *excl = NULL, **fences = NULL;
|
||||
struct dma_fence *f, **fences = NULL;
|
||||
struct dma_resv resv;
|
||||
int r, i;
|
||||
|
||||
@ -304,35 +304,19 @@ static int test_get_fences(void *arg, bool shared)
|
||||
}
|
||||
dma_resv_unlock(&resv);
|
||||
|
||||
r = dma_resv_get_fences(&resv, &excl, &i, &fences);
|
||||
r = dma_resv_get_fences(&resv, shared, &i, &fences);
|
||||
if (r) {
|
||||
pr_err("get_fences failed\n");
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (shared) {
|
||||
if (excl != NULL) {
|
||||
pr_err("get_fences returned unexpected excl fence\n");
|
||||
goto err_free;
|
||||
}
|
||||
if (i != 1 || fences[0] != f) {
|
||||
pr_err("get_fences returned unexpected shared fence\n");
|
||||
goto err_free;
|
||||
}
|
||||
} else {
|
||||
if (excl != f) {
|
||||
pr_err("get_fences returned unexpected excl fence\n");
|
||||
goto err_free;
|
||||
}
|
||||
if (i != 0) {
|
||||
pr_err("get_fences returned unexpected shared fence\n");
|
||||
goto err_free;
|
||||
}
|
||||
if (i != 1 || fences[0] != f) {
|
||||
pr_err("get_fences returned unexpected fence\n");
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
dma_fence_signal(f);
|
||||
err_free:
|
||||
dma_fence_put(excl);
|
||||
while (i--)
|
||||
dma_fence_put(fences[i]);
|
||||
kfree(fences);
|
||||
|
@ -200,8 +200,10 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
|
||||
&work->shared_count, &work->shared);
|
||||
/* TODO: Unify this with other drivers */
|
||||
r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
|
||||
&work->shared_count,
|
||||
&work->shared);
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("failed to get fences for buffer\n");
|
||||
goto unpin;
|
||||
|
@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
r = dma_resv_get_fences(resv, NULL, &count, &fences);
|
||||
r = dma_resv_get_fences(resv, true, &count, &fences);
|
||||
if (r)
|
||||
goto fallback;
|
||||
|
||||
|
@ -189,8 +189,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
|
||||
continue;
|
||||
|
||||
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
|
||||
ret = dma_resv_get_fences(robj, NULL,
|
||||
&bo->nr_shared,
|
||||
ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
|
||||
&bo->shared);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -458,8 +458,8 @@ void dma_resv_fini(struct dma_resv *obj);
|
||||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
|
||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
|
||||
unsigned *pshared_count, struct dma_fence ***pshared);
|
||||
int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
unsigned int *num_fences, struct dma_fence ***fences);
|
||||
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
|
||||
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
|
||||
unsigned long timeout);
|
||||
|
Loading…
Reference in New Issue
Block a user