This commit is contained in:
Stephen Rothwell 2024-12-20 12:14:10 +11:00
commit 75ff0b4c3d
5 changed files with 49 additions and 22 deletions

View File

@ -9,7 +9,6 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
@ -100,7 +99,7 @@ int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
mutex_unlock(&suballoc->lock);
ret = wait_event_interruptible_timeout(suballoc->free_event,
suballoc->free_space,
msecs_to_jiffies(10 * 1000));
secs_to_jiffies(10));
if (!ret) {
dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");

View File

@ -488,7 +488,16 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_FOPS(fops);
static void etnaviv_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
drm_show_memory_stats(p, file);
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
DRM_GEM_FOPS,
.show_fdinfo = drm_show_fdinfo,
};
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
@ -498,6 +507,7 @@ static const struct drm_driver etnaviv_drm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
.show_fdinfo = etnaviv_show_fdinfo,
.ioctls = etnaviv_ioctls,
.num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
.fops = &fops,

View File

@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
struct page **pages;
pgprot_t prot;
lockdep_assert_held(&obj->lock);
@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
if (IS_ERR(pages))
return NULL;
return vmap(pages, obj->base.size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
switch (obj->flags & ETNA_BO_CACHE_MASK) {
case ETNA_BO_CACHED:
prot = PAGE_KERNEL;
break;
case ETNA_BO_UNCACHED:
prot = pgprot_noncached(PAGE_KERNEL);
break;
case ETNA_BO_WC:
default:
prot = pgprot_writecombine(PAGE_KERNEL);
}
return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
}
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
@ -528,6 +540,17 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_unlock(&priv->gem_lock);
}
static enum drm_gem_object_status etnaviv_gem_status(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
enum drm_gem_object_status status = 0;
if (etnaviv_obj->pages)
status |= DRM_GEM_OBJECT_RESIDENT;
return status;
}
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
@ -541,6 +564,7 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.mmap = etnaviv_gem_mmap,
.status = etnaviv_gem_status,
.vm_ops = &vm_ops,
};

View File

@ -44,9 +44,7 @@ struct etnaviv_gem_object {
u32 flags;
struct list_head gem_node;
struct etnaviv_gpu *gpu; /* non-null if active */
atomic_t gpu_active;
u32 access;
struct page **pages;
struct sg_table *sgt;

View File

@ -19,12 +19,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
if (!IS_ALIGNED(iova | size, pgsize)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
iova, size, pgsize);
return;
}
while (unmapped < size) {
unmapped_page = context->global->ops->unmap(context, iova,
pgsize);
@ -45,12 +39,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
size_t orig_size = size;
int ret = 0;
if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
iova, &paddr, size, pgsize);
return -EINVAL;
}
while (size) {
ret = context->global->ops->map(context, iova, paddr, pgsize,
prot);
@ -82,11 +70,19 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
return -EINVAL;
for_each_sgtable_dma_sg(sgt, sg, i) {
phys_addr_t pa = sg_dma_address(sg) - sg->offset;
unsigned int da_len = sg_dma_len(sg) + sg->offset;
phys_addr_t pa = sg_dma_address(sg);
unsigned int da_len = sg_dma_len(sg);
unsigned int bytes = min_t(unsigned int, da_len, va_len);
VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
dev_err(context->global->dev,
"unaligned: iova 0x%x pa %pa size 0x%x\n",
iova, &pa, bytes);
ret = -EINVAL;
goto fail;
}
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)