mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 16:29:05 +00:00
drm: use anon-inode instead of relying on cdevs
DRM drivers share a common address_space across all character-devices of a single DRM device. This allows simple buffer eviction and mapping-control. However, DRM core currently waits for the first ->open() on any char-dev to mark the underlying inode as backing inode of the device. This delayed initialization causes ugly conditions all over the place: if (dev->dev_mapping) do_sth(); To avoid delayed initialization and to stop reusing the inode of the char-dev, we allocate an anonymous inode for each DRM device and reset filp->f_mapping to it on ->open(). Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
This commit is contained in:
parent
31bbe16f6d
commit
6796cb16c0
@ -324,7 +324,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
astbo->bo.bdev = &ast->ttm.bdev;
|
||||
astbo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
astbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -359,7 +359,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
bochsbo->bo.bdev = &bochs->ttm.bdev;
|
||||
bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -329,7 +329,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
|
||||
cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
cirrusbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -84,8 +84,6 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
struct drm_minor *minor;
|
||||
int retcode = 0;
|
||||
int need_setup = 0;
|
||||
struct address_space *old_mapping;
|
||||
struct address_space *old_imapping;
|
||||
|
||||
minor = idr_find(&drm_minors_idr, minor_id);
|
||||
if (!minor)
|
||||
@ -99,16 +97,9 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
|
||||
if (!dev->open_count++)
|
||||
need_setup = 1;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
old_imapping = inode->i_mapping;
|
||||
old_mapping = dev->dev_mapping;
|
||||
if (old_mapping == NULL)
|
||||
dev->dev_mapping = &inode->i_data;
|
||||
/* ihold ensures nobody can remove inode with our i_data */
|
||||
ihold(container_of(dev->dev_mapping, struct inode, i_data));
|
||||
inode->i_mapping = dev->dev_mapping;
|
||||
filp->f_mapping = dev->dev_mapping;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* share address_space across all char-devs of a single device */
|
||||
filp->f_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
retcode = drm_open_helper(inode, filp, dev);
|
||||
if (retcode)
|
||||
@ -121,12 +112,6 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
|
||||
err_undo:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
filp->f_mapping = old_imapping;
|
||||
inode->i_mapping = old_imapping;
|
||||
iput(container_of(dev->dev_mapping, struct inode, i_data));
|
||||
dev->dev_mapping = old_mapping;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
dev->open_count--;
|
||||
return retcode;
|
||||
}
|
||||
@ -434,7 +419,6 @@ int drm_lastclose(struct drm_device * dev)
|
||||
|
||||
drm_legacy_dma_takedown(dev);
|
||||
|
||||
dev->dev_mapping = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
drm_legacy_dev_reinit(dev);
|
||||
@ -549,9 +533,6 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(dev->dev_mapping == NULL);
|
||||
iput(container_of(dev->dev_mapping, struct inode, i_data));
|
||||
|
||||
/* drop the reference held my the file priv */
|
||||
if (file_priv->master)
|
||||
drm_master_put(&file_priv->master);
|
||||
|
@ -526,8 +526,15 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
|
||||
mutex_init(&dev->struct_mutex);
|
||||
mutex_init(&dev->ctxlist_mutex);
|
||||
|
||||
if (drm_ht_create(&dev->map_hash, 12))
|
||||
dev->anon_inode = drm_fs_inode_new();
|
||||
if (IS_ERR(dev->anon_inode)) {
|
||||
ret = PTR_ERR(dev->anon_inode);
|
||||
DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (drm_ht_create(&dev->map_hash, 12))
|
||||
goto err_inode;
|
||||
|
||||
ret = drm_ctxbitmap_init(dev);
|
||||
if (ret) {
|
||||
@ -549,6 +556,8 @@ err_ctxbitmap:
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
err_ht:
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
err_inode:
|
||||
drm_fs_inode_free(dev->anon_inode);
|
||||
err_free:
|
||||
kfree(dev);
|
||||
return NULL;
|
||||
@ -576,6 +585,7 @@ void drm_dev_free(struct drm_device *dev)
|
||||
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
drm_fs_inode_free(dev->anon_inode);
|
||||
|
||||
kfree(dev->devname);
|
||||
kfree(dev);
|
||||
|
@ -1508,7 +1508,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
|
||||
if (!obj->fault_mappable)
|
||||
return;
|
||||
|
||||
drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
|
||||
drm_vma_node_unmap(&obj->base.vma_node,
|
||||
obj->base.dev->anon_inode->i_mapping);
|
||||
obj->fault_mappable = false;
|
||||
}
|
||||
|
||||
|
@ -324,7 +324,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
mgabo->bo.bdev = &mdev->ttm.bdev;
|
||||
mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
mgabo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -228,7 +228,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
||||
struct nouveau_bo *nvbo = NULL;
|
||||
int ret = 0;
|
||||
|
||||
drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
|
||||
drm->ttm.bdev.dev_mapping = drm->dev->anon_inode->i_mapping;
|
||||
|
||||
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
|
||||
NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
|
||||
|
@ -153,24 +153,24 @@ static struct {
|
||||
static void evict_entry(struct drm_gem_object *obj,
|
||||
enum tiler_fmt fmt, struct usergart_entry *entry)
|
||||
{
|
||||
if (obj->dev->dev_mapping) {
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
int n = usergart[fmt].height;
|
||||
size_t size = PAGE_SIZE * n;
|
||||
loff_t off = mmap_offset(obj) +
|
||||
(entry->obj_pgoff << PAGE_SHIFT);
|
||||
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
|
||||
if (m > 1) {
|
||||
int i;
|
||||
/* if stride > than PAGE_SIZE then sparse mapping: */
|
||||
for (i = n; i > 0; i--) {
|
||||
unmap_mapping_range(obj->dev->dev_mapping,
|
||||
off, PAGE_SIZE, 1);
|
||||
off += PAGE_SIZE * m;
|
||||
}
|
||||
} else {
|
||||
unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
int n = usergart[fmt].height;
|
||||
size_t size = PAGE_SIZE * n;
|
||||
loff_t off = mmap_offset(obj) +
|
||||
(entry->obj_pgoff << PAGE_SHIFT);
|
||||
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
|
||||
|
||||
if (m > 1) {
|
||||
int i;
|
||||
/* if stride > than PAGE_SIZE then sparse mapping: */
|
||||
for (i = n; i > 0; i--) {
|
||||
unmap_mapping_range(obj->dev->anon_inode->i_mapping,
|
||||
off, PAGE_SIZE, 1);
|
||||
off += PAGE_SIZE * m;
|
||||
}
|
||||
} else {
|
||||
unmap_mapping_range(obj->dev->anon_inode->i_mapping,
|
||||
off, size, 1);
|
||||
}
|
||||
|
||||
entry->obj = NULL;
|
||||
|
@ -82,8 +82,7 @@ int qxl_bo_create(struct qxl_device *qdev,
|
||||
enum ttm_bo_type type;
|
||||
int r;
|
||||
|
||||
if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
|
||||
qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
|
||||
qdev->mman.bdev.dev_mapping = qdev->ddev->anon_inode->i_mapping;
|
||||
if (kernel)
|
||||
type = ttm_bo_type_kernel;
|
||||
else
|
||||
|
@ -518,8 +518,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
|
||||
((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
|
||||
DRM_INFO("qxl: %uM of Surface memory size\n",
|
||||
(unsigned)qdev->surfaceram_size / (1024 * 1024));
|
||||
if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
|
||||
qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
|
||||
qdev->mman.bdev.dev_mapping = qdev->ddev->anon_inode->i_mapping;
|
||||
r = qxl_ttm_debugfs_init(qdev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to init debugfs\n");
|
||||
|
@ -145,7 +145,7 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->anon_inode->i_mapping;
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else if (sg) {
|
||||
|
@ -745,7 +745,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
}
|
||||
DRM_INFO("radeon: %uM of GTT memory ready.\n",
|
||||
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->anon_inode->i_mapping;
|
||||
|
||||
r = radeon_ttm_debugfs_init(rdev);
|
||||
if (r) {
|
||||
|
@ -969,7 +969,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
||||
goto out_no_shman;
|
||||
|
||||
file_priv->driver_priv = vmw_fp;
|
||||
dev_priv->bdev.dev_mapping = dev->dev_mapping;
|
||||
dev_priv->bdev.dev_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1183,7 +1183,7 @@ struct drm_device {
|
||||
struct drm_sg_mem *sg; /**< Scatter gather memory */
|
||||
unsigned int num_crtcs; /**< Number of CRTCs on this device */
|
||||
void *dev_private; /**< device private data */
|
||||
struct address_space *dev_mapping;
|
||||
struct inode *anon_inode;
|
||||
struct drm_sigdata sigdata; /**< For block_all_signals */
|
||||
sigset_t sigmask;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user