mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 10:17:32 +00:00
drm-fixes for 6.2-rc3
drivers: - i915-gvt fixes - amdgpu/kfd fixes - panfrost bo refcounting fix - meson afbc corruption fix - imx plane width fix core: - drm/sched fixes - drm/mm kunit test fix - dma-buf export error handling fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEb4nG6jLu8Y5XI+PfTA9ye/CYqnEFAmO4qI0ACgkQTA9ye/CY qnGr8hAAklp255Xvu+ZgqpZ7HNC7ARk9vmhpkksqgF5CD0AyHxmZiHsen+nNJXxs CuaIEBxbylJTDV+YZrSGXibU79JcyoIo3HyEH0yST1mkXsb/hrTfgWtrXheMiMKM Bbx0aqycd9nA0I+djOBoZT8A7hlixrs4kopO4I728DAK0pTXcT5eM+zQw2SLvP6g RYiEuilpqf+AAo1JJMzCZ913nqvX/ZWndFWqc9iK/GWm56LrrtQT6EZ0EJRhTcN+ yQjj9YczdqRJ2EXV5O9M3NjNCnJf3Bu/v18yKAt+8qcn6isgFHzwnU76mCXoBZLk BWP1mws4n6l5r9sKPK8IesnjjGCsDk6qWdJJlWMEYECjMpEWUWRf+q4m3B3FljIi UfUZ0y1v3YphCmyhCkyzApGPcDPMIPUvlGtb/Zz/bCHF0WlGp1oR1ujfz26JflHD IAsjNNUay2LUgHd+7qjrBa0eSn3cN/0IKiNxxGvzq2uio6a1mlp3NHR3zFTa+/ZS aLBQth8S9Ttr30pPDshGaIngjvo91Vqc+GlcwFnuCkM+ZwaADkDfNvl6O32vXvkO SPKhqCxMa+/d2RxfyFahp7YLehQO6S0Ifr/EO7+063k8cRU9D2T5Fv2aPGd7HXXb Np6f4iG7xXwTDYvh5PjfJ8LL9HAI3dVBnO4NS82lB4DPVVJQ2Tw= =F/VA -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2023-01-06' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Daniel Vetter: "Still not much, but more than last week. Dave should be back next week from the beaching. drivers: - i915-gvt fixes - amdgpu/kfd fixes - panfrost bo refcounting fix - meson afbc corruption fix - imx plane width fix core: - drm/sched fixes - drm/mm kunit test fix - dma-buf export error handling fixes" * tag 'drm-fixes-2023-01-06' of git://anongit.freedesktop.org/drm/drm: Revert "drm/amd/display: Enable Freesync Video Mode by default" drm/i915/gvt: fix double free bug in split_2MB_gtt_entry drm/i915/gvt: use atomic operations to change the vGPU status drm/i915/gvt: fix vgpu debugfs clean in remove drm/i915/gvt: fix gvt debugfs destroy drm/i915: unpin on error in intel_vgpu_shadow_mm_pin() drm/amd/display: Uninitialized variables causing 4k60 UCLK to stay at DPM1 and not DPM0 drm/amdkfd: Fix kernel warning during topology setup drm/scheduler: Fix lockup in drm_sched_entity_kill() drm/imx: ipuv3-plane: Fix overlay plane width drm/scheduler: Fix lockup in drm_sched_entity_kill() drm/virtio: Fix memory leak in virtio_gpu_object_create() drm/meson: Reduce the FIFO lines held when AFBC is not used drm/tests: reduce drm_mm_test stack usage drm/panfrost: Fix GEM handle creation ref-counting drm/plane-helper: Add the missing declaration of drm_atomic_state dma-buf: fix dma_buf_export init order v2
This commit is contained in:
commit
0a71553536
@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void)
|
||||
kset_unregister(dma_buf_stats_kset);
|
||||
}
|
||||
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf)
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
|
||||
{
|
||||
struct dma_buf_sysfs_entry *sysfs_entry;
|
||||
int ret;
|
||||
|
||||
if (!dmabuf || !dmabuf->file)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dmabuf->exp_name) {
|
||||
pr_err("exporter name must not be empty if stats needed\n");
|
||||
return -EINVAL;
|
||||
@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
|
||||
|
||||
/* create the directory for buffer stats */
|
||||
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
|
||||
"%lu", file_inode(dmabuf->file)->i_ino);
|
||||
"%lu", file_inode(file)->i_ino);
|
||||
if (ret)
|
||||
goto err_sysfs_dmabuf;
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
int dma_buf_init_sysfs_statistics(void);
|
||||
void dma_buf_uninit_sysfs_statistics(void);
|
||||
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf);
|
||||
int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
|
||||
|
||||
void dma_buf_stats_teardown(struct dma_buf *dmabuf);
|
||||
#else
|
||||
@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void)
|
||||
|
||||
static inline void dma_buf_uninit_sysfs_statistics(void) {}
|
||||
|
||||
static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
|
||||
static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
|
||||
return -EINVAL;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
if (dmabuf) {
|
||||
mutex_lock(&db_list.lock);
|
||||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -528,17 +529,17 @@ static inline int is_dma_buf_file(struct file *file)
|
||||
return file->f_op == &dma_buf_fops;
|
||||
}
|
||||
|
||||
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
static struct file *dma_buf_getfile(size_t size, int flags)
|
||||
{
|
||||
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
|
||||
struct file *file;
|
||||
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
|
||||
struct file *file;
|
||||
|
||||
if (IS_ERR(inode))
|
||||
return ERR_CAST(inode);
|
||||
|
||||
inode->i_size = dmabuf->size;
|
||||
inode_set_bytes(inode, dmabuf->size);
|
||||
inode->i_size = size;
|
||||
inode_set_bytes(inode, size);
|
||||
|
||||
/*
|
||||
* The ->i_ino acquired from get_next_ino() is not unique thus
|
||||
@ -552,8 +553,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
flags, &dma_buf_fops);
|
||||
if (IS_ERR(file))
|
||||
goto err_alloc_file;
|
||||
file->private_data = dmabuf;
|
||||
file->f_path.dentry->d_fsdata = dmabuf;
|
||||
|
||||
return file;
|
||||
|
||||
@ -619,19 +618,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
size_t alloc_size = sizeof(struct dma_buf);
|
||||
int ret;
|
||||
|
||||
if (!exp_info->resv)
|
||||
alloc_size += sizeof(struct dma_resv);
|
||||
else
|
||||
/* prevent &dma_buf[1] == dma_buf->resv */
|
||||
alloc_size += 1;
|
||||
|
||||
if (WARN_ON(!exp_info->priv
|
||||
|| !exp_info->ops
|
||||
|| !exp_info->ops->map_dma_buf
|
||||
|| !exp_info->ops->unmap_dma_buf
|
||||
|| !exp_info->ops->release)) {
|
||||
if (WARN_ON(!exp_info->priv || !exp_info->ops
|
||||
|| !exp_info->ops->map_dma_buf
|
||||
|| !exp_info->ops->unmap_dma_buf
|
||||
|| !exp_info->ops->release))
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
|
||||
(exp_info->ops->pin || exp_info->ops->unpin)))
|
||||
@ -643,10 +634,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
if (!try_module_get(exp_info->owner))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
file = dma_buf_getfile(exp_info->size, exp_info->flags);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
goto err_module;
|
||||
}
|
||||
|
||||
if (!exp_info->resv)
|
||||
alloc_size += sizeof(struct dma_resv);
|
||||
else
|
||||
/* prevent &dma_buf[1] == dma_buf->resv */
|
||||
alloc_size += 1;
|
||||
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!dmabuf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_module;
|
||||
goto err_file;
|
||||
}
|
||||
|
||||
dmabuf->priv = exp_info->priv;
|
||||
@ -658,43 +660,35 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
init_waitqueue_head(&dmabuf->poll);
|
||||
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
|
||||
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
|
||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||
|
||||
if (!resv) {
|
||||
resv = (struct dma_resv *)&dmabuf[1];
|
||||
dma_resv_init(resv);
|
||||
dmabuf->resv = (struct dma_resv *)&dmabuf[1];
|
||||
dma_resv_init(dmabuf->resv);
|
||||
} else {
|
||||
dmabuf->resv = resv;
|
||||
}
|
||||
dmabuf->resv = resv;
|
||||
|
||||
file = dma_buf_getfile(dmabuf, exp_info->flags);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
ret = dma_buf_stats_setup(dmabuf, file);
|
||||
if (ret)
|
||||
goto err_dmabuf;
|
||||
}
|
||||
|
||||
file->private_data = dmabuf;
|
||||
file->f_path.dentry->d_fsdata = dmabuf;
|
||||
dmabuf->file = file;
|
||||
|
||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_add(&dmabuf->list_node, &db_list.head);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
ret = dma_buf_stats_setup(dmabuf);
|
||||
if (ret)
|
||||
goto err_sysfs;
|
||||
|
||||
return dmabuf;
|
||||
|
||||
err_sysfs:
|
||||
/*
|
||||
* Set file->f_path.dentry->d_fsdata to NULL so that when
|
||||
* dma_buf_release() gets invoked by dentry_ops, it exits
|
||||
* early before calling the release() dma_buf op.
|
||||
*/
|
||||
file->f_path.dentry->d_fsdata = NULL;
|
||||
fput(file);
|
||||
err_dmabuf:
|
||||
if (!resv)
|
||||
dma_resv_fini(dmabuf->resv);
|
||||
kfree(dmabuf);
|
||||
err_file:
|
||||
fput(file);
|
||||
err_module:
|
||||
module_put(exp_info->owner);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -195,6 +195,7 @@ extern int amdgpu_emu_mode;
|
||||
extern uint amdgpu_smu_memory_pool_size;
|
||||
extern int amdgpu_smu_pptable_id;
|
||||
extern uint amdgpu_dc_feature_mask;
|
||||
extern uint amdgpu_freesync_vid_mode;
|
||||
extern uint amdgpu_dc_debug_mask;
|
||||
extern uint amdgpu_dc_visual_confirm;
|
||||
extern uint amdgpu_dm_abm_level;
|
||||
|
@ -181,6 +181,7 @@ int amdgpu_mes_kiq;
|
||||
int amdgpu_noretry = -1;
|
||||
int amdgpu_force_asic_type = -1;
|
||||
int amdgpu_tmz = -1; /* auto */
|
||||
uint amdgpu_freesync_vid_mode;
|
||||
int amdgpu_reset_method = -1; /* auto */
|
||||
int amdgpu_num_kcq = -1;
|
||||
int amdgpu_smartshift_bias;
|
||||
@ -879,6 +880,32 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444);
|
||||
MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
|
||||
module_param_named(tmz, amdgpu_tmz, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: freesync_video (uint)
|
||||
* Enable the optimization to adjust front porch timing to achieve seamless
|
||||
* mode change experience when setting a freesync supported mode for which full
|
||||
* modeset is not needed.
|
||||
*
|
||||
* The Display Core will add a set of modes derived from the base FreeSync
|
||||
* video mode into the corresponding connector's mode list based on commonly
|
||||
* used refresh rates and VRR range of the connected display, when users enable
|
||||
* this feature. From the userspace perspective, they can see a seamless mode
|
||||
* change experience when the change between different refresh rates under the
|
||||
* same resolution. Additionally, userspace applications such as Video playback
|
||||
* can read this modeset list and change the refresh rate based on the video
|
||||
* frame rate. Finally, the userspace can also derive an appropriate mode for a
|
||||
* particular refresh rate based on the FreeSync Mode and add it to the
|
||||
* connector's mode list.
|
||||
*
|
||||
* Note: This is an experimental feature.
|
||||
*
|
||||
* The default value: 0 (off).
|
||||
*/
|
||||
MODULE_PARM_DESC(
|
||||
freesync_video,
|
||||
"Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
|
||||
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: reset_method (int)
|
||||
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
|
||||
|
@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
|
||||
|
||||
p2plink->attr.name = "properties";
|
||||
p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
|
||||
sysfs_attr_init(&iolink->attr);
|
||||
sysfs_attr_init(&p2plink->attr);
|
||||
ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -5835,7 +5835,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("No preferred mode found\n");
|
||||
} else {
|
||||
recalculate_timing = is_freesync_video_mode(&mode, aconnector);
|
||||
recalculate_timing = amdgpu_freesync_vid_mode &&
|
||||
is_freesync_video_mode(&mode, aconnector);
|
||||
if (recalculate_timing) {
|
||||
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
drm_mode_copy(&saved_mode, &mode);
|
||||
@ -6986,7 +6987,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector =
|
||||
to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!edid)
|
||||
if (!(amdgpu_freesync_vid_mode && edid))
|
||||
return;
|
||||
|
||||
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
|
||||
@ -8850,7 +8851,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
* TODO: Refactor this function to allow this check to work
|
||||
* in all conditions.
|
||||
*/
|
||||
if (dm_new_crtc_state->stream &&
|
||||
if (amdgpu_freesync_vid_mode &&
|
||||
dm_new_crtc_state->stream &&
|
||||
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
|
||||
goto skip_modeset;
|
||||
|
||||
@ -8885,7 +8887,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
if (!dm_old_crtc_state->stream)
|
||||
goto skip_modeset;
|
||||
|
||||
if (dm_new_crtc_state->stream &&
|
||||
if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
|
||||
is_timing_unchanged_for_freesync(new_crtc_state,
|
||||
old_crtc_state)) {
|
||||
new_crtc_state->mode_changed = false;
|
||||
@ -8897,7 +8899,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
|
||||
goto skip_modeset;
|
||||
} else if (aconnector &&
|
||||
} else if (amdgpu_freesync_vid_mode && aconnector &&
|
||||
is_freesync_video_mode(&new_crtc_state->mode,
|
||||
aconnector)) {
|
||||
struct drm_display_mode *high_mode;
|
||||
|
@ -6257,12 +6257,12 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
|
||||
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
|
||||
bool NotEnoughDETSwathFillLatencyHiding = false;
|
||||
|
||||
/* calculate sum of single swath size for all pipes in bytes*/
|
||||
/* calculate sum of single swath size for all pipes in bytes */
|
||||
for (k = 0; k < NumberOfActiveSurfaces; k++) {
|
||||
SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
|
||||
SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
|
||||
|
||||
if (SwathHeightC[k] != 0)
|
||||
SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
|
||||
SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
|
||||
else
|
||||
SwathSizePerSurfaceC[k] = 0;
|
||||
|
||||
|
@ -151,6 +151,22 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
|
||||
vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
|
||||
"0x%llx\n");
|
||||
|
||||
static int vgpu_status_get(void *data, u64 *val)
|
||||
{
|
||||
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
|
||||
|
||||
*val = 0;
|
||||
|
||||
if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
*val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
|
||||
if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
|
||||
*val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
|
||||
|
||||
/**
|
||||
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
|
||||
* @vgpu: a vGPU
|
||||
@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
|
||||
snprintf(name, 16, "vgpu%d", vgpu->id);
|
||||
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
|
||||
|
||||
debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
|
||||
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
|
||||
&vgpu_mmio_diff_fops);
|
||||
debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_scan_nonprivbb_fops);
|
||||
debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_status_fops);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
|
||||
*/
|
||||
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
debugfs_remove_recursive(vgpu->debugfs);
|
||||
vgpu->debugfs = NULL;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_minor *minor = gvt->gt->i915->drm.primary;
|
||||
|
||||
if (minor->debugfs_root && gvt->debugfs_root) {
|
||||
debugfs_remove_recursive(vgpu->debugfs);
|
||||
vgpu->debugfs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
|
||||
*/
|
||||
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
|
||||
{
|
||||
debugfs_remove_recursive(gvt->debugfs_root);
|
||||
gvt->debugfs_root = NULL;
|
||||
struct drm_minor *minor = gvt->gt->i915->drm.primary;
|
||||
|
||||
if (minor->debugfs_root) {
|
||||
debugfs_remove_recursive(gvt->debugfs_root);
|
||||
gvt->debugfs_root = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +134,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
|
||||
struct list_head *pos;
|
||||
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
|
||||
|
||||
if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
|
||||
if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
|
||||
!list_empty(&vgpu->dmabuf_obj_list_head)) {
|
||||
list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
|
||||
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
|
||||
if (dmabuf_obj == obj) {
|
||||
|
@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
int idx;
|
||||
bool ret;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return false;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
|
||||
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
|
||||
return 0;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -EINVAL;
|
||||
pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||
ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
|
||||
PAGE_SIZE, &dma_addr);
|
||||
if (ret) {
|
||||
ppgtt_invalidate_spt(spt);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
sub_se.val64 = se->val64;
|
||||
|
||||
/* Copy the PAT field from PDE. */
|
||||
@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
ops->set_pfn(se, sub_spt->shadow_page.mfn);
|
||||
ppgtt_set_shadow_entry(spt, se, index);
|
||||
return 0;
|
||||
err:
|
||||
/* Cancel the existing addess mappings of DMA addr. */
|
||||
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||
gvt_vdbg_mm("invalidate 4K entry\n");
|
||||
ppgtt_invalidate_pte(sub_spt, &sub_se);
|
||||
}
|
||||
/* Release the new allocated spt. */
|
||||
trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
|
||||
sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
|
||||
ppgtt_free_spt(sub_spt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
|
@ -172,13 +172,18 @@ struct intel_vgpu_submission {
|
||||
|
||||
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
|
||||
|
||||
enum {
|
||||
INTEL_VGPU_STATUS_ATTACHED = 0,
|
||||
INTEL_VGPU_STATUS_ACTIVE,
|
||||
INTEL_VGPU_STATUS_NR_BITS,
|
||||
};
|
||||
|
||||
struct intel_vgpu {
|
||||
struct vfio_device vfio_device;
|
||||
struct intel_gvt *gvt;
|
||||
struct mutex vgpu_lock;
|
||||
int id;
|
||||
bool active;
|
||||
bool attached;
|
||||
DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
unsigned int resetting_eng;
|
||||
@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
|
||||
#define for_each_active_vgpu(gvt, vgpu, id) \
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
|
||||
for_each_if(vgpu->active)
|
||||
for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
|
||||
|
||||
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
|
||||
u32 offset, u32 val, bool low)
|
||||
@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
|
||||
static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
||||
void *buf, unsigned long len)
|
||||
{
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -ESRCH;
|
||||
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
|
||||
}
|
||||
@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
|
||||
static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
|
||||
unsigned long gpa, void *buf, unsigned long len)
|
||||
{
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -ESRCH;
|
||||
return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
|
||||
}
|
||||
|
@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
||||
* enabled by guest. so if msi_trigger is null, success is still
|
||||
* returned and don't inject interrupt into guest.
|
||||
*/
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -ESRCH;
|
||||
if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
|
||||
return -EFAULT;
|
||||
|
@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
|
||||
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
for_each_active_vgpu(vgpu->gvt, itr, id) {
|
||||
if (!itr->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
|
||||
continue;
|
||||
|
||||
if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
|
||||
@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
|
||||
{
|
||||
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
|
||||
|
||||
if (vgpu->attached)
|
||||
return -EEXIST;
|
||||
|
||||
if (!vgpu->vfio_device.kvm ||
|
||||
vgpu->vfio_device.kvm->mm != current->mm) {
|
||||
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
|
||||
@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
|
||||
if (__kvmgt_vgpu_exist(vgpu))
|
||||
return -EEXIST;
|
||||
|
||||
vgpu->attached = true;
|
||||
|
||||
vgpu->track_node.track_write = kvmgt_page_track_write;
|
||||
vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
|
||||
kvm_get_kvm(vgpu->vfio_device.kvm);
|
||||
kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
|
||||
&vgpu->track_node);
|
||||
|
||||
set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
|
||||
|
||||
debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
|
||||
&vgpu->nr_cache_entries);
|
||||
|
||||
@ -698,11 +695,10 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
|
||||
{
|
||||
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
|
||||
|
||||
if (!vgpu->attached)
|
||||
return;
|
||||
|
||||
intel_gvt_release_vgpu(vgpu);
|
||||
|
||||
clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
|
||||
|
||||
debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
|
||||
|
||||
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
|
||||
@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
|
||||
vgpu->dma_addr_cache = RB_ROOT;
|
||||
|
||||
intel_vgpu_release_msi_eventfd_ctx(vgpu);
|
||||
|
||||
vgpu->attached = false;
|
||||
}
|
||||
|
||||
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
|
||||
@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
|
||||
{
|
||||
struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
|
||||
|
||||
if (WARN_ON_ONCE(vgpu->attached))
|
||||
return;
|
||||
|
||||
vfio_unregister_group_dev(&vgpu->vfio_device);
|
||||
vfio_put_device(&vgpu->vfio_device);
|
||||
}
|
||||
@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
|
||||
struct kvm_memory_slot *slot;
|
||||
int idx;
|
||||
|
||||
if (!info->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
|
||||
return -ESRCH;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
|
||||
struct kvm_memory_slot *slot;
|
||||
int idx;
|
||||
|
||||
if (!info->attached)
|
||||
return 0;
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
|
||||
return -ESRCH;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
slot = gfn_to_memslot(kvm, gfn);
|
||||
@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
struct gvt_dma *entry;
|
||||
int ret;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&vgpu->cache_lock);
|
||||
@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
|
||||
struct gvt_dma *entry;
|
||||
int ret = 0;
|
||||
|
||||
if (!vgpu->attached)
|
||||
return -ENODEV;
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&vgpu->cache_lock);
|
||||
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
|
||||
@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
|
||||
{
|
||||
struct gvt_dma *entry;
|
||||
|
||||
if (!vgpu->attached)
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||
return;
|
||||
|
||||
mutex_lock(&vgpu->cache_lock);
|
||||
@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
|
||||
(void *)&gvt->service_request)) {
|
||||
if (vgpu->active)
|
||||
if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
|
||||
intel_vgpu_emulate_vblank(vgpu);
|
||||
}
|
||||
}
|
||||
|
@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
|
||||
|
||||
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
|
||||
!workload->shadow_mm->ppgtt_mm.shadowed) {
|
||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!scheduler->current_vgpu->active ||
|
||||
if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
|
||||
scheduler->current_vgpu->status) ||
|
||||
list_empty(workload_q_head(scheduler->current_vgpu, engine)))
|
||||
goto out;
|
||||
|
||||
|
@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
|
||||
*/
|
||||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
vgpu->active = true;
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
|
||||
vgpu->active = false;
|
||||
clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
|
||||
|
||||
if (atomic_read(&vgpu->submission.running_workload_num)) {
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *i915 = gvt->gt->i915;
|
||||
|
||||
drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
|
||||
drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
|
||||
"vGPU is still active!\n");
|
||||
|
||||
/*
|
||||
* remove idr first so later clean can judge if need to stop
|
||||
@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
|
||||
if (ret)
|
||||
goto out_free_vgpu;
|
||||
|
||||
vgpu->active = false;
|
||||
|
||||
clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
|
||||
return vgpu;
|
||||
|
||||
out_free_vgpu:
|
||||
|
@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
||||
break;
|
||||
}
|
||||
|
||||
if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
|
||||
width = ipu_src_rect_width(new_state);
|
||||
else
|
||||
width = drm_rect_width(&new_state->src) >> 16;
|
||||
|
||||
eba = drm_plane_state_to_eba(new_state, 0);
|
||||
|
||||
/*
|
||||
@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
||||
*/
|
||||
if (ipu_state->use_pre) {
|
||||
axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
|
||||
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
|
||||
ipu_src_rect_width(new_state),
|
||||
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
|
||||
drm_rect_height(&new_state->src) >> 16,
|
||||
fb->pitches[0], fb->format->format,
|
||||
fb->modifier, &eba);
|
||||
@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
||||
break;
|
||||
}
|
||||
|
||||
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
|
||||
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
|
||||
|
||||
width = ipu_src_rect_width(new_state);
|
||||
height = drm_rect_height(&new_state->src) >> 16;
|
||||
info = drm_format_info(fb->format->format);
|
||||
ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
|
||||
@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
||||
ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
|
||||
|
||||
ipu_cpmem_zero(ipu_plane->alpha_ch);
|
||||
ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
|
||||
ipu_src_rect_width(new_state),
|
||||
ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
|
||||
drm_rect_height(&new_state->src) >> 16);
|
||||
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
|
||||
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
|
||||
|
@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv)
|
||||
|
||||
/* Initialize OSD1 fifo control register */
|
||||
reg = VIU_OSD_DDR_PRIORITY_URGENT |
|
||||
VIU_OSD_HOLD_FIFO_LINES(31) |
|
||||
VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
|
||||
VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
|
||||
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
|
||||
|
||||
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
|
||||
reg |= VIU_OSD_BURST_LENGTH_32;
|
||||
reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31));
|
||||
else
|
||||
reg |= VIU_OSD_BURST_LENGTH_64;
|
||||
reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4));
|
||||
|
||||
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
|
||||
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
|
||||
|
@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
|
||||
struct panfrost_gem_object *bo;
|
||||
struct drm_panfrost_create_bo *args = data;
|
||||
struct panfrost_gem_mapping *mapping;
|
||||
int ret;
|
||||
|
||||
if (!args->size || args->pad ||
|
||||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
|
||||
@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
|
||||
!(args->flags & PANFROST_BO_NOEXEC))
|
||||
return -EINVAL;
|
||||
|
||||
bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
|
||||
&args->handle);
|
||||
bo = panfrost_gem_create(dev, args->size, args->flags);
|
||||
if (IS_ERR(bo))
|
||||
return PTR_ERR(bo);
|
||||
|
||||
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
mapping = panfrost_gem_mapping_get(bo, priv);
|
||||
if (!mapping) {
|
||||
drm_gem_object_put(&bo->base.base);
|
||||
return -EINVAL;
|
||||
if (mapping) {
|
||||
args->offset = mapping->mmnode.start << PAGE_SHIFT;
|
||||
panfrost_gem_mapping_put(mapping);
|
||||
} else {
|
||||
/* This can only happen if the handle from
|
||||
* drm_gem_handle_create() has already been guessed and freed
|
||||
* by user space
|
||||
*/
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
args->offset = mapping->mmnode.start << PAGE_SHIFT;
|
||||
panfrost_gem_mapping_put(mapping);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
drm_gem_object_put(&bo->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -235,12 +235,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
|
||||
}
|
||||
|
||||
struct panfrost_gem_object *
|
||||
panfrost_gem_create_with_handle(struct drm_file *file_priv,
|
||||
struct drm_device *dev, size_t size,
|
||||
u32 flags,
|
||||
uint32_t *handle)
|
||||
panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
|
||||
{
|
||||
int ret;
|
||||
struct drm_gem_shmem_object *shmem;
|
||||
struct panfrost_gem_object *bo;
|
||||
|
||||
@ -256,16 +252,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
|
||||
bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
|
||||
bo->is_heap = !!(flags & PANFROST_BO_HEAP);
|
||||
|
||||
/*
|
||||
* Allocate an id of idr table where the obj is registered
|
||||
* and handle has the id what user can see.
|
||||
*/
|
||||
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
|
||||
/* drop reference from allocate - handle holds it now. */
|
||||
drm_gem_object_put(&shmem->base);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
|
@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct sg_table *sgt);
|
||||
|
||||
struct panfrost_gem_object *
|
||||
panfrost_gem_create_with_handle(struct drm_file *file_priv,
|
||||
struct drm_device *dev, size_t size,
|
||||
u32 flags,
|
||||
uint32_t *handle);
|
||||
panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags);
|
||||
|
||||
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
|
||||
void panfrost_gem_close(struct drm_gem_object *obj,
|
||||
|
@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
|
||||
init_completion(&entity->entity_idle);
|
||||
|
||||
/* We start in an idle state. */
|
||||
complete(&entity->entity_idle);
|
||||
complete_all(&entity->entity_idle);
|
||||
|
||||
spin_lock_init(&entity->rq_lock);
|
||||
spsc_queue_init(&entity->job_queue);
|
||||
|
@ -987,7 +987,7 @@ static int drm_sched_main(void *param)
|
||||
sched_job = drm_sched_entity_pop_job(entity);
|
||||
|
||||
if (!sched_job) {
|
||||
complete(&entity->entity_idle);
|
||||
complete_all(&entity->entity_idle);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -998,7 +998,7 @@ static int drm_sched_main(void *param)
|
||||
|
||||
trace_drm_run_job(sched_job, entity);
|
||||
fence = sched->ops->run_job(sched_job);
|
||||
complete(&entity->entity_idle);
|
||||
complete_all(&entity->entity_idle);
|
||||
drm_sched_fence_scheduled(s_fence);
|
||||
|
||||
if (!IS_ERR_OR_NULL(fence)) {
|
||||
|
@ -12,3 +12,5 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
|
||||
drm_mm_test.o \
|
||||
drm_plane_helper_test.o \
|
||||
drm_rect_test.o
|
||||
|
||||
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
|
||||
|
@ -298,9 +298,9 @@ static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct dr
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
|
||||
unsigned int count,
|
||||
u64 size)
|
||||
static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
|
||||
unsigned int count,
|
||||
u64 size)
|
||||
{
|
||||
const struct boundary {
|
||||
u64 start, size;
|
||||
|
@ -184,7 +184,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object_array *objs = NULL;
|
||||
struct drm_gem_shmem_object *shmem_obj;
|
||||
struct virtio_gpu_object *bo;
|
||||
struct virtio_gpu_mem_entry *ents;
|
||||
struct virtio_gpu_mem_entry *ents = NULL;
|
||||
unsigned int nents;
|
||||
int ret;
|
||||
|
||||
@ -210,7 +210,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
||||
ret = -ENOMEM;
|
||||
objs = virtio_gpu_array_alloc(1);
|
||||
if (!objs)
|
||||
goto err_put_id;
|
||||
goto err_free_entry;
|
||||
virtio_gpu_array_add_obj(objs, &bo->base.base);
|
||||
|
||||
ret = virtio_gpu_array_lock_resv(objs);
|
||||
@ -239,6 +239,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
||||
|
||||
err_put_objs:
|
||||
virtio_gpu_array_put_free(objs);
|
||||
err_free_entry:
|
||||
kvfree(ents);
|
||||
err_put_id:
|
||||
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
|
||||
err_free_gem:
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_atomic_state;
|
||||
struct drm_crtc;
|
||||
struct drm_framebuffer;
|
||||
struct drm_modeset_acquire_ctx;
|
||||
|
Loading…
x
Reference in New Issue
Block a user