mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-17 22:05:08 +00:00
drm fixes for 5.4-rc3
i915: - execlist access fixes - list deletion fix - CML display fix - HSW workaround extension to GT2 - chicken bit whitelist - GGTT resume issue - SKL GPU hangs for Vulkan compute amdgpu: - memory leak fix panel: - spi aliases tc358767: - bridge artifacts fix. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJdoAWcAAoJEAx081l5xIa++sAP/3gz9/er5CsAZxUU9HYTxmvK b0B7RTAExKQreqiGZTbmIxnFZ4pDyJCSW+jxJzHe6gapZ1jqPoLI52qQvNpdm2Er 9Uhb41g/BeNbMsd8Xq3pfCKv/vzmWe1dh4vBumqJ8D4jROmu3G3CIvzqTvVbkvi+ 8cOQYMLXvpb/mwvy8zGHc9YhfcLaeFTIXDi2r68lKRXGLJiDfK5sM+YSX2Bfw/WJ 1DTa+/eCN0+WLKQGfwrxM6RScpGWwZ7QvkPDWw3Pg0VYZ8XZ3GCM2j1rMf1U8982 Ylerc0+Vfio74yUnvcyOv0zYwTSQEh7r8n+thnqdFB2qD9WVyRD9OQhJcbbnGofm m2kuwU8dtFk+G4jVBt7e8aXSTFe9MiLREsYcs5ji+gd+3Z0EfDfiYEeIxL9rpd6Z k2qwCcv3ciEVedKF1+aIyz6y5DGlTS8U6rSOCXMuqfRb7BaTVVNWb8hD/hs4baUu LURV7rjMK4EvzjYckXwDazDPQq6aFGv+WjF7Q6RWiqU7eNRiwSlk6CelqDsuE/JI TTbKPeFnwRzgiXcS3Z9yG26DQZeQ5T2P6+uzB4k3dAGBHQJRFIWWyLEoOjN/mvWr uf2ft0niQrnnagEQvPSiL2pt5pg5d/yx7J9Rl8cmgqqtMqezU26hj4pKR36/DBfm H827IEzbooDCt9ocLaYR =u4hb -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-10-11' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "The regular fixes pull for rc3. The i915 team found some fixes they (or I) missed for rc1, which is why this is a bit bigger than usual, otherwise there is a single amdgpu fix, some spi panel aliases, and a bridge fix. i915: - execlist access fixes - list deletion fix - CML display fix - HSW workaround extension to GT2 - chicken bit whitelist - GGTT resume issue - SKL GPU hangs for Vulkan compute amdgpu: - memory leak fix panel: - spi aliases tc358767: - bridge artifacts fix" * tag 'drm-fixes-2019-10-11' of git://anongit.freedesktop.org/drm/drm: (22 commits) drm/bridge: tc358767: fix max_tu_symbol value drm/i915/gt: execlists->active is serialised by the tasklet drm/i915/execlists: Protect peeking at execlists->active drm/i915: Fixup preempt-to-busy vs reset of a virtual request drm/i915: Only enqueue already completed requests drm/i915/execlists: Drop redundant list_del_init(&rq->sched.link) drm/i915/cml: Add second PCH ID for CMP drm/amdgpu: fix memory leak drm/panel: tpo-td043mtea1: Fix SPI alias drm/panel: tpo-td028ttec1: Fix SPI alias drm/panel: sony-acx565akm: Fix SPI alias drm/panel: nec-nl8048hl11: Fix SPI alias drm/panel: lg-lb035q02: Fix SPI alias drm/i915: Mark contents as dirty on a write fault drm/i915: Prevent bonded requests from overtaking each other on preemption drm/i915: Bump skl+ max plane width to 5k for linear/x-tiled drm/i915: Verify the engine after acquiring the active.lock drm/i915: Extend Haswell GT1 PSMI workaround to all drm/i915: Don't mix srcu tag and negative error codes drm/i915: Whitelist COMMON_SLICE_CHICKEN2 ...
This commit is contained in:
commit
9892f9f6cf
@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
|
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
return r;
|
||||||
|
|
||||||
switch (args->in.operation) {
|
switch (args->in.operation) {
|
||||||
case AMDGPU_BO_LIST_OP_CREATE:
|
case AMDGPU_BO_LIST_OP_CREATE:
|
||||||
@ -283,8 +283,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
|
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
|
||||||
mutex_unlock(&fpriv->bo_list_lock);
|
mutex_unlock(&fpriv->bo_list_lock);
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
amdgpu_bo_list_put(list);
|
goto error_put_list;
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
handle = r;
|
handle = r;
|
||||||
@ -306,9 +305,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
mutex_unlock(&fpriv->bo_list_lock);
|
mutex_unlock(&fpriv->bo_list_lock);
|
||||||
|
|
||||||
if (IS_ERR(old)) {
|
if (IS_ERR(old)) {
|
||||||
amdgpu_bo_list_put(list);
|
|
||||||
r = PTR_ERR(old);
|
r = PTR_ERR(old);
|
||||||
goto error_free;
|
goto error_put_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
amdgpu_bo_list_put(old);
|
amdgpu_bo_list_put(old);
|
||||||
@ -325,8 +323,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
error_put_list:
|
||||||
|
amdgpu_bo_list_put(list);
|
||||||
|
|
||||||
error_free:
|
error_free:
|
||||||
if (info)
|
kvfree(info);
|
||||||
kvfree(info);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -728,6 +728,8 @@ static int tc_set_video_mode(struct tc_data *tc,
|
|||||||
int lower_margin = mode->vsync_start - mode->vdisplay;
|
int lower_margin = mode->vsync_start - mode->vdisplay;
|
||||||
int vsync_len = mode->vsync_end - mode->vsync_start;
|
int vsync_len = mode->vsync_end - mode->vsync_start;
|
||||||
u32 dp0_syncval;
|
u32 dp0_syncval;
|
||||||
|
u32 bits_per_pixel = 24;
|
||||||
|
u32 in_bw, out_bw;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Recommended maximum number of symbols transferred in a transfer unit:
|
* Recommended maximum number of symbols transferred in a transfer unit:
|
||||||
@ -735,7 +737,10 @@ static int tc_set_video_mode(struct tc_data *tc,
|
|||||||
* (output active video bandwidth in bytes))
|
* (output active video bandwidth in bytes))
|
||||||
* Must be less than tu_size.
|
* Must be less than tu_size.
|
||||||
*/
|
*/
|
||||||
max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
|
|
||||||
|
in_bw = mode->clock * bits_per_pixel / 8;
|
||||||
|
out_bw = tc->link.base.num_lanes * tc->link.base.rate;
|
||||||
|
max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
|
||||||
|
|
||||||
dev_dbg(tc->dev, "set mode %dx%d\n",
|
dev_dbg(tc->dev, "set mode %dx%d\n",
|
||||||
mode->hdisplay, mode->vdisplay);
|
mode->hdisplay, mode->vdisplay);
|
||||||
|
@ -3280,7 +3280,20 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
|
|||||||
switch (fb->modifier) {
|
switch (fb->modifier) {
|
||||||
case DRM_FORMAT_MOD_LINEAR:
|
case DRM_FORMAT_MOD_LINEAR:
|
||||||
case I915_FORMAT_MOD_X_TILED:
|
case I915_FORMAT_MOD_X_TILED:
|
||||||
return 4096;
|
/*
|
||||||
|
* Validated limit is 4k, but has 5k should
|
||||||
|
* work apart from the following features:
|
||||||
|
* - Ytile (already limited to 4k)
|
||||||
|
* - FP16 (already limited to 4k)
|
||||||
|
* - render compression (already limited to 4k)
|
||||||
|
* - KVMR sprite and cursor (don't care)
|
||||||
|
* - horizontal panning (TODO verify this)
|
||||||
|
* - pipe and plane scaling (TODO verify this)
|
||||||
|
*/
|
||||||
|
if (cpp == 8)
|
||||||
|
return 4096;
|
||||||
|
else
|
||||||
|
return 5120;
|
||||||
case I915_FORMAT_MOD_Y_TILED_CCS:
|
case I915_FORMAT_MOD_Y_TILED_CCS:
|
||||||
case I915_FORMAT_MOD_Yf_TILED_CCS:
|
case I915_FORMAT_MOD_Yf_TILED_CCS:
|
||||||
/* FIXME AUX plane? */
|
/* FIXME AUX plane? */
|
||||||
|
@ -245,11 +245,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
|||||||
|
|
||||||
wakeref = intel_runtime_pm_get(rpm);
|
wakeref = intel_runtime_pm_get(rpm);
|
||||||
|
|
||||||
srcu = intel_gt_reset_trylock(ggtt->vm.gt);
|
ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
|
||||||
if (srcu < 0) {
|
if (ret)
|
||||||
ret = srcu;
|
|
||||||
goto err_rpm;
|
goto err_rpm;
|
||||||
}
|
|
||||||
|
|
||||||
ret = i915_mutex_lock_interruptible(dev);
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -318,7 +316,11 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
|||||||
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
|
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
|
||||||
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
||||||
|
|
||||||
i915_vma_set_ggtt_write(vma);
|
if (write) {
|
||||||
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||||
|
i915_vma_set_ggtt_write(vma);
|
||||||
|
obj->mm.dirty = true;
|
||||||
|
}
|
||||||
|
|
||||||
err_fence:
|
err_fence:
|
||||||
i915_vma_unpin_fence(vma);
|
i915_vma_unpin_fence(vma);
|
||||||
|
@ -241,9 +241,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
|
|||||||
mutex_lock(&i915->drm.struct_mutex);
|
mutex_lock(&i915->drm.struct_mutex);
|
||||||
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
|
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
|
||||||
|
|
||||||
i915_gem_restore_gtt_mappings(i915);
|
|
||||||
i915_gem_restore_fences(i915);
|
|
||||||
|
|
||||||
if (i915_gem_init_hw(i915))
|
if (i915_gem_init_hw(i915))
|
||||||
goto err_wedged;
|
goto err_wedged;
|
||||||
|
|
||||||
|
@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
|
|||||||
return READ_ONCE(*execlists->active);
|
return READ_ONCE(*execlists->active);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
execlists_active_lock_bh(struct intel_engine_execlists *execlists)
|
||||||
|
{
|
||||||
|
local_bh_disable(); /* prevent local softirq and lock recursion */
|
||||||
|
tasklet_lock(&execlists->tasklet);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
|
||||||
|
{
|
||||||
|
tasklet_unlock(&execlists->tasklet);
|
||||||
|
local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
|
||||||
|
}
|
||||||
|
|
||||||
struct i915_request *
|
struct i915_request *
|
||||||
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
|
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
|
||||||
|
|
||||||
|
@ -1197,9 +1197,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||||||
struct drm_printer *m)
|
struct drm_printer *m)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->i915;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
const struct intel_engine_execlists * const execlists =
|
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||||
&engine->execlists;
|
|
||||||
unsigned long flags;
|
|
||||||
u64 addr;
|
u64 addr;
|
||||||
|
|
||||||
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
|
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
|
||||||
@ -1281,7 +1279,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||||||
idx, hws[idx * 2], hws[idx * 2 + 1]);
|
idx, hws[idx * 2], hws[idx * 2 + 1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&engine->active.lock, flags);
|
execlists_active_lock_bh(execlists);
|
||||||
for (port = execlists->active; (rq = *port); port++) {
|
for (port = execlists->active; (rq = *port); port++) {
|
||||||
char hdr[80];
|
char hdr[80];
|
||||||
int len;
|
int len;
|
||||||
@ -1309,7 +1307,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
|||||||
hwsp_seqno(rq));
|
hwsp_seqno(rq));
|
||||||
print_request(m, rq, hdr);
|
print_request(m, rq, hdr);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&engine->active.lock, flags);
|
execlists_active_unlock_bh(execlists);
|
||||||
} else if (INTEL_GEN(dev_priv) > 6) {
|
} else if (INTEL_GEN(dev_priv) > 6) {
|
||||||
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
||||||
ENGINE_READ(engine, RING_PP_DIR_BASE));
|
ENGINE_READ(engine, RING_PP_DIR_BASE));
|
||||||
@ -1440,8 +1438,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
|||||||
if (!intel_engine_supports_stats(engine))
|
if (!intel_engine_supports_stats(engine))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
spin_lock_irqsave(&engine->active.lock, flags);
|
execlists_active_lock_bh(execlists);
|
||||||
write_seqlock(&engine->stats.lock);
|
write_seqlock_irqsave(&engine->stats.lock, flags);
|
||||||
|
|
||||||
if (unlikely(engine->stats.enabled == ~0)) {
|
if (unlikely(engine->stats.enabled == ~0)) {
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
@ -1469,8 +1467,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
|||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
write_sequnlock(&engine->stats.lock);
|
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
||||||
spin_unlock_irqrestore(&engine->active.lock, flags);
|
execlists_active_unlock_bh(execlists);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -631,7 +631,6 @@ execlists_schedule_out(struct i915_request *rq)
|
|||||||
struct intel_engine_cs *cur, *old;
|
struct intel_engine_cs *cur, *old;
|
||||||
|
|
||||||
trace_i915_request_out(rq);
|
trace_i915_request_out(rq);
|
||||||
GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
|
|
||||||
|
|
||||||
old = READ_ONCE(ce->inflight);
|
old = READ_ONCE(ce->inflight);
|
||||||
do
|
do
|
||||||
@ -797,6 +796,17 @@ static bool can_merge_rq(const struct i915_request *prev,
|
|||||||
GEM_BUG_ON(prev == next);
|
GEM_BUG_ON(prev == next);
|
||||||
GEM_BUG_ON(!assert_priority_queue(prev, next));
|
GEM_BUG_ON(!assert_priority_queue(prev, next));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We do not submit known completed requests. Therefore if the next
|
||||||
|
* request is already completed, we can pretend to merge it in
|
||||||
|
* with the previous context (and we will skip updating the ELSP
|
||||||
|
* and tracking). Thus hopefully keeping the ELSP full with active
|
||||||
|
* contexts, despite the best efforts of preempt-to-busy to confuse
|
||||||
|
* us.
|
||||||
|
*/
|
||||||
|
if (i915_request_completed(next))
|
||||||
|
return true;
|
||||||
|
|
||||||
if (!can_merge_ctx(prev->hw_context, next->hw_context))
|
if (!can_merge_ctx(prev->hw_context, next->hw_context))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -893,7 +903,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
|
|||||||
static struct i915_request *
|
static struct i915_request *
|
||||||
last_active(const struct intel_engine_execlists *execlists)
|
last_active(const struct intel_engine_execlists *execlists)
|
||||||
{
|
{
|
||||||
struct i915_request * const *last = execlists->active;
|
struct i915_request * const *last = READ_ONCE(execlists->active);
|
||||||
|
|
||||||
while (*last && i915_request_completed(*last))
|
while (*last && i915_request_completed(*last))
|
||||||
last++;
|
last++;
|
||||||
@ -1172,21 +1182,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i915_request_completed(rq)) {
|
|
||||||
ve->request = NULL;
|
|
||||||
ve->base.execlists.queue_priority_hint = INT_MIN;
|
|
||||||
rb_erase_cached(rb, &execlists->virtual);
|
|
||||||
RB_CLEAR_NODE(rb);
|
|
||||||
|
|
||||||
rq->engine = engine;
|
|
||||||
__i915_request_submit(rq);
|
|
||||||
|
|
||||||
spin_unlock(&ve->base.active.lock);
|
|
||||||
|
|
||||||
rb = rb_first_cached(&execlists->virtual);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (last && !can_merge_rq(last, rq)) {
|
if (last && !can_merge_rq(last, rq)) {
|
||||||
spin_unlock(&ve->base.active.lock);
|
spin_unlock(&ve->base.active.lock);
|
||||||
return; /* leave this for another */
|
return; /* leave this for another */
|
||||||
@ -1237,11 +1232,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||||||
GEM_BUG_ON(ve->siblings[0] != engine);
|
GEM_BUG_ON(ve->siblings[0] != engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
__i915_request_submit(rq);
|
if (__i915_request_submit(rq)) {
|
||||||
if (!i915_request_completed(rq)) {
|
|
||||||
submit = true;
|
submit = true;
|
||||||
last = rq;
|
last = rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hmm, we have a bunch of virtual engine requests,
|
||||||
|
* but the first one was already completed (thanks
|
||||||
|
* preempt-to-busy!). Keep looking at the veng queue
|
||||||
|
* until we have no more relevant requests (i.e.
|
||||||
|
* the normal submit queue has higher priority).
|
||||||
|
*/
|
||||||
|
if (!submit) {
|
||||||
|
spin_unlock(&ve->base.active.lock);
|
||||||
|
rb = rb_first_cached(&execlists->virtual);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&ve->base.active.lock);
|
spin_unlock(&ve->base.active.lock);
|
||||||
@ -1254,8 +1261,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
priolist_for_each_request_consume(rq, rn, p, i) {
|
priolist_for_each_request_consume(rq, rn, p, i) {
|
||||||
if (i915_request_completed(rq))
|
bool merge = true;
|
||||||
goto skip;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Can we combine this request with the current port?
|
* Can we combine this request with the current port?
|
||||||
@ -1296,14 +1302,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||||||
ctx_single_port_submission(rq->hw_context))
|
ctx_single_port_submission(rq->hw_context))
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
*port = execlists_schedule_in(last, port - execlists->pending);
|
merge = false;
|
||||||
port++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
last = rq;
|
if (__i915_request_submit(rq)) {
|
||||||
submit = true;
|
if (!merge) {
|
||||||
skip:
|
*port = execlists_schedule_in(last, port - execlists->pending);
|
||||||
__i915_request_submit(rq);
|
port++;
|
||||||
|
last = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
GEM_BUG_ON(last &&
|
||||||
|
!can_merge_ctx(last->hw_context,
|
||||||
|
rq->hw_context));
|
||||||
|
|
||||||
|
submit = true;
|
||||||
|
last = rq;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_erase_cached(&p->node, &execlists->queue);
|
rb_erase_cached(&p->node, &execlists->queue);
|
||||||
@ -1593,8 +1608,11 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||||||
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
|
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
|
||||||
{
|
{
|
||||||
lockdep_assert_held(&engine->active.lock);
|
lockdep_assert_held(&engine->active.lock);
|
||||||
if (!engine->execlists.pending[0])
|
if (!engine->execlists.pending[0]) {
|
||||||
|
rcu_read_lock(); /* protect peeking at execlists->active */
|
||||||
execlists_dequeue(engine);
|
execlists_dequeue(engine);
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2399,10 +2417,14 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
static struct i915_request *active_request(struct i915_request *rq)
|
static struct i915_request *active_request(struct i915_request *rq)
|
||||||
{
|
{
|
||||||
const struct list_head * const list = &rq->timeline->requests;
|
|
||||||
const struct intel_context * const ce = rq->hw_context;
|
const struct intel_context * const ce = rq->hw_context;
|
||||||
struct i915_request *active = NULL;
|
struct i915_request *active = NULL;
|
||||||
|
struct list_head *list;
|
||||||
|
|
||||||
|
if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
|
||||||
|
return rq;
|
||||||
|
|
||||||
|
list = &rq->timeline->requests;
|
||||||
list_for_each_entry_from_reverse(rq, list, link) {
|
list_for_each_entry_from_reverse(rq, list, link) {
|
||||||
if (i915_request_completed(rq))
|
if (i915_request_completed(rq))
|
||||||
break;
|
break;
|
||||||
@ -2565,7 +2587,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
priolist_for_each_request_consume(rq, rn, p, i) {
|
priolist_for_each_request_consume(rq, rn, p, i) {
|
||||||
list_del_init(&rq->sched.link);
|
|
||||||
__i915_request_submit(rq);
|
__i915_request_submit(rq);
|
||||||
dma_fence_set_error(&rq->fence, -EIO);
|
dma_fence_set_error(&rq->fence, -EIO);
|
||||||
i915_request_mark_complete(rq);
|
i915_request_mark_complete(rq);
|
||||||
@ -3631,18 +3652,22 @@ static void
|
|||||||
virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
|
virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
|
||||||
{
|
{
|
||||||
struct virtual_engine *ve = to_virtual_engine(rq->engine);
|
struct virtual_engine *ve = to_virtual_engine(rq->engine);
|
||||||
|
intel_engine_mask_t allowed, exec;
|
||||||
struct ve_bond *bond;
|
struct ve_bond *bond;
|
||||||
|
|
||||||
bond = virtual_find_bond(ve, to_request(signal)->engine);
|
allowed = ~to_request(signal)->engine->mask;
|
||||||
if (bond) {
|
|
||||||
intel_engine_mask_t old, new, cmp;
|
|
||||||
|
|
||||||
cmp = READ_ONCE(rq->execution_mask);
|
bond = virtual_find_bond(ve, to_request(signal)->engine);
|
||||||
do {
|
if (bond)
|
||||||
old = cmp;
|
allowed &= bond->sibling_mask;
|
||||||
new = cmp & bond->sibling_mask;
|
|
||||||
} while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
|
/* Restrict the bonded request to run on only the available engines */
|
||||||
}
|
exec = READ_ONCE(rq->execution_mask);
|
||||||
|
while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
|
||||||
|
;
|
||||||
|
|
||||||
|
/* Prevent the master from being re-run on the bonded engines */
|
||||||
|
to_request(signal)->execution_mask &= ~allowed;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct intel_context *
|
struct intel_context *
|
||||||
|
@ -42,11 +42,10 @@ static void engine_skip_context(struct i915_request *rq)
|
|||||||
struct intel_engine_cs *engine = rq->engine;
|
struct intel_engine_cs *engine = rq->engine;
|
||||||
struct i915_gem_context *hung_ctx = rq->gem_context;
|
struct i915_gem_context *hung_ctx = rq->gem_context;
|
||||||
|
|
||||||
lockdep_assert_held(&engine->active.lock);
|
|
||||||
|
|
||||||
if (!i915_request_is_active(rq))
|
if (!i915_request_is_active(rq))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
lockdep_assert_held(&engine->active.lock);
|
||||||
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
|
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
|
||||||
if (rq->gem_context == hung_ctx)
|
if (rq->gem_context == hung_ctx)
|
||||||
i915_request_skip(rq, -EIO);
|
i915_request_skip(rq, -EIO);
|
||||||
@ -123,7 +122,6 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
|
|||||||
rq->fence.seqno,
|
rq->fence.seqno,
|
||||||
yesno(guilty));
|
yesno(guilty));
|
||||||
|
|
||||||
lockdep_assert_held(&rq->engine->active.lock);
|
|
||||||
GEM_BUG_ON(i915_request_completed(rq));
|
GEM_BUG_ON(i915_request_completed(rq));
|
||||||
|
|
||||||
if (guilty) {
|
if (guilty) {
|
||||||
@ -1214,10 +1212,8 @@ out:
|
|||||||
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
||||||
}
|
}
|
||||||
|
|
||||||
int intel_gt_reset_trylock(struct intel_gt *gt)
|
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
|
||||||
{
|
{
|
||||||
int srcu;
|
|
||||||
|
|
||||||
might_lock(>->reset.backoff_srcu);
|
might_lock(>->reset.backoff_srcu);
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
@ -1232,10 +1228,10 @@ int intel_gt_reset_trylock(struct intel_gt *gt)
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
}
|
}
|
||||||
srcu = srcu_read_lock(>->reset.backoff_srcu);
|
*srcu = srcu_read_lock(>->reset.backoff_srcu);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return srcu;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
|
void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
|
||||||
|
@ -38,7 +38,7 @@ int intel_engine_reset(struct intel_engine_cs *engine,
|
|||||||
|
|
||||||
void __i915_request_reset(struct i915_request *rq, bool guilty);
|
void __i915_request_reset(struct i915_request *rq, bool guilty);
|
||||||
|
|
||||||
int __must_check intel_gt_reset_trylock(struct intel_gt *gt);
|
int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
|
||||||
void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
|
void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
|
||||||
|
|
||||||
void intel_gt_set_wedged(struct intel_gt *gt);
|
void intel_gt_set_wedged(struct intel_gt *gt);
|
||||||
|
@ -1573,7 +1573,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
|||||||
struct intel_engine_cs *engine = rq->engine;
|
struct intel_engine_cs *engine = rq->engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
const int num_engines =
|
const int num_engines =
|
||||||
IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
|
IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
|
||||||
bool force_restore = false;
|
bool force_restore = false;
|
||||||
int len;
|
int len;
|
||||||
u32 *cs;
|
u32 *cs;
|
||||||
|
@ -1063,6 +1063,9 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
|
|||||||
|
|
||||||
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
|
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
|
||||||
whitelist_reg(w, GEN8_HDC_CHICKEN1);
|
whitelist_reg(w, GEN8_HDC_CHICKEN1);
|
||||||
|
|
||||||
|
/* WaSendPushConstantsFromMMIO:skl,bxt */
|
||||||
|
whitelist_reg(w, COMMON_SLICE_CHICKEN2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void skl_whitelist_build(struct intel_engine_cs *engine)
|
static void skl_whitelist_build(struct intel_engine_cs *engine)
|
||||||
|
@ -1924,6 +1924,11 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
DRM_ERROR("failed to re-enable GGTT\n");
|
DRM_ERROR("failed to re-enable GGTT\n");
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
|
i915_gem_restore_gtt_mappings(dev_priv);
|
||||||
|
i915_gem_restore_fences(dev_priv);
|
||||||
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
|
|
||||||
intel_csr_ucode_resume(dev_priv);
|
intel_csr_ucode_resume(dev_priv);
|
||||||
|
|
||||||
i915_restore_state(dev_priv);
|
i915_restore_state(dev_priv);
|
||||||
|
@ -77,6 +77,12 @@ struct drm_i915_private;
|
|||||||
|
|
||||||
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
|
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
|
||||||
|
|
||||||
|
static inline void tasklet_lock(struct tasklet_struct *t)
|
||||||
|
{
|
||||||
|
while (!tasklet_trylock(t))
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
|
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
|
||||||
{
|
{
|
||||||
if (!atomic_fetch_inc(&t->count))
|
if (!atomic_fetch_inc(&t->count))
|
||||||
|
@ -194,6 +194,27 @@ static void free_capture_list(struct i915_request *request)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void remove_from_engine(struct i915_request *rq)
|
||||||
|
{
|
||||||
|
struct intel_engine_cs *engine, *locked;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Virtual engines complicate acquiring the engine timeline lock,
|
||||||
|
* as their rq->engine pointer is not stable until under that
|
||||||
|
* engine lock. The simple ploy we use is to take the lock then
|
||||||
|
* check that the rq still belongs to the newly locked engine.
|
||||||
|
*/
|
||||||
|
locked = READ_ONCE(rq->engine);
|
||||||
|
spin_lock(&locked->active.lock);
|
||||||
|
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
|
||||||
|
spin_unlock(&locked->active.lock);
|
||||||
|
spin_lock(&engine->active.lock);
|
||||||
|
locked = engine;
|
||||||
|
}
|
||||||
|
list_del(&rq->sched.link);
|
||||||
|
spin_unlock(&locked->active.lock);
|
||||||
|
}
|
||||||
|
|
||||||
static bool i915_request_retire(struct i915_request *rq)
|
static bool i915_request_retire(struct i915_request *rq)
|
||||||
{
|
{
|
||||||
struct i915_active_request *active, *next;
|
struct i915_active_request *active, *next;
|
||||||
@ -259,9 +280,7 @@ static bool i915_request_retire(struct i915_request *rq)
|
|||||||
* request that we have removed from the HW and put back on a run
|
* request that we have removed from the HW and put back on a run
|
||||||
* queue.
|
* queue.
|
||||||
*/
|
*/
|
||||||
spin_lock(&rq->engine->active.lock);
|
remove_from_engine(rq);
|
||||||
list_del(&rq->sched.link);
|
|
||||||
spin_unlock(&rq->engine->active.lock);
|
|
||||||
|
|
||||||
spin_lock(&rq->lock);
|
spin_lock(&rq->lock);
|
||||||
i915_request_mark_complete(rq);
|
i915_request_mark_complete(rq);
|
||||||
@ -358,9 +377,10 @@ __i915_request_await_execution(struct i915_request *rq,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __i915_request_submit(struct i915_request *request)
|
bool __i915_request_submit(struct i915_request *request)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = request->engine;
|
struct intel_engine_cs *engine = request->engine;
|
||||||
|
bool result = false;
|
||||||
|
|
||||||
GEM_TRACE("%s fence %llx:%lld, current %d\n",
|
GEM_TRACE("%s fence %llx:%lld, current %d\n",
|
||||||
engine->name,
|
engine->name,
|
||||||
@ -370,6 +390,25 @@ void __i915_request_submit(struct i915_request *request)
|
|||||||
GEM_BUG_ON(!irqs_disabled());
|
GEM_BUG_ON(!irqs_disabled());
|
||||||
lockdep_assert_held(&engine->active.lock);
|
lockdep_assert_held(&engine->active.lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* With the advent of preempt-to-busy, we frequently encounter
|
||||||
|
* requests that we have unsubmitted from HW, but left running
|
||||||
|
* until the next ack and so have completed in the meantime. On
|
||||||
|
* resubmission of that completed request, we can skip
|
||||||
|
* updating the payload, and execlists can even skip submitting
|
||||||
|
* the request.
|
||||||
|
*
|
||||||
|
* We must remove the request from the caller's priority queue,
|
||||||
|
* and the caller must only call us when the request is in their
|
||||||
|
* priority queue, under the active.lock. This ensures that the
|
||||||
|
* request has *not* yet been retired and we can safely move
|
||||||
|
* the request into the engine->active.list where it will be
|
||||||
|
* dropped upon retiring. (Otherwise if resubmit a *retired*
|
||||||
|
* request, this would be a horrible use-after-free.)
|
||||||
|
*/
|
||||||
|
if (i915_request_completed(request))
|
||||||
|
goto xfer;
|
||||||
|
|
||||||
if (i915_gem_context_is_banned(request->gem_context))
|
if (i915_gem_context_is_banned(request->gem_context))
|
||||||
i915_request_skip(request, -EIO);
|
i915_request_skip(request, -EIO);
|
||||||
|
|
||||||
@ -393,13 +432,18 @@ void __i915_request_submit(struct i915_request *request)
|
|||||||
i915_sw_fence_signaled(&request->semaphore))
|
i915_sw_fence_signaled(&request->semaphore))
|
||||||
engine->saturated |= request->sched.semaphores;
|
engine->saturated |= request->sched.semaphores;
|
||||||
|
|
||||||
/* We may be recursing from the signal callback of another i915 fence */
|
engine->emit_fini_breadcrumb(request,
|
||||||
|
request->ring->vaddr + request->postfix);
|
||||||
|
|
||||||
|
trace_i915_request_execute(request);
|
||||||
|
engine->serial++;
|
||||||
|
result = true;
|
||||||
|
|
||||||
|
xfer: /* We may be recursing from the signal callback of another i915 fence */
|
||||||
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
||||||
|
|
||||||
list_move_tail(&request->sched.link, &engine->active.requests);
|
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
|
||||||
|
list_move_tail(&request->sched.link, &engine->active.requests);
|
||||||
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
|
|
||||||
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
|
|
||||||
|
|
||||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
|
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
|
||||||
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
|
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
|
||||||
@ -410,12 +454,7 @@ void __i915_request_submit(struct i915_request *request)
|
|||||||
|
|
||||||
spin_unlock(&request->lock);
|
spin_unlock(&request->lock);
|
||||||
|
|
||||||
engine->emit_fini_breadcrumb(request,
|
return result;
|
||||||
request->ring->vaddr + request->postfix);
|
|
||||||
|
|
||||||
engine->serial++;
|
|
||||||
|
|
||||||
trace_i915_request_execute(request);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_request_submit(struct i915_request *request)
|
void i915_request_submit(struct i915_request *request)
|
||||||
|
@ -292,7 +292,7 @@ int i915_request_await_execution(struct i915_request *rq,
|
|||||||
|
|
||||||
void i915_request_add(struct i915_request *rq);
|
void i915_request_add(struct i915_request *rq);
|
||||||
|
|
||||||
void __i915_request_submit(struct i915_request *request);
|
bool __i915_request_submit(struct i915_request *request);
|
||||||
void i915_request_submit(struct i915_request *request);
|
void i915_request_submit(struct i915_request *request);
|
||||||
|
|
||||||
void i915_request_skip(struct i915_request *request, int error);
|
void i915_request_skip(struct i915_request *request, int error);
|
||||||
|
@ -69,6 +69,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
|
|||||||
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
|
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
|
||||||
return PCH_CNP;
|
return PCH_CNP;
|
||||||
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
|
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
|
||||||
|
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
|
||||||
DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
|
DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
|
||||||
WARN_ON(!IS_COFFEELAKE(dev_priv));
|
WARN_ON(!IS_COFFEELAKE(dev_priv));
|
||||||
/* CometPoint is CNP Compatible */
|
/* CometPoint is CNP Compatible */
|
||||||
|
@ -41,6 +41,7 @@ enum intel_pch {
|
|||||||
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
|
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
|
||||||
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
|
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
|
||||||
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
|
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
|
||||||
|
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
|
||||||
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
|
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
|
||||||
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
|
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
|
||||||
#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
|
#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
|
||||||
|
@ -118,6 +118,12 @@ static void pm_resume(struct drm_i915_private *i915)
|
|||||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
|
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
|
||||||
intel_gt_sanitize(&i915->gt, false);
|
intel_gt_sanitize(&i915->gt, false);
|
||||||
i915_gem_sanitize(i915);
|
i915_gem_sanitize(i915);
|
||||||
|
|
||||||
|
mutex_lock(&i915->drm.struct_mutex);
|
||||||
|
i915_gem_restore_gtt_mappings(i915);
|
||||||
|
i915_gem_restore_fences(i915);
|
||||||
|
mutex_unlock(&i915->drm.struct_mutex);
|
||||||
|
|
||||||
i915_gem_resume(i915);
|
i915_gem_resume(i915);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -220,9 +220,17 @@ static const struct of_device_id lb035q02_of_match[] = {
|
|||||||
|
|
||||||
MODULE_DEVICE_TABLE(of, lb035q02_of_match);
|
MODULE_DEVICE_TABLE(of, lb035q02_of_match);
|
||||||
|
|
||||||
|
static const struct spi_device_id lb035q02_ids[] = {
|
||||||
|
{ "lb035q02", 0 },
|
||||||
|
{ /* sentinel */ }
|
||||||
|
};
|
||||||
|
|
||||||
|
MODULE_DEVICE_TABLE(spi, lb035q02_ids);
|
||||||
|
|
||||||
static struct spi_driver lb035q02_driver = {
|
static struct spi_driver lb035q02_driver = {
|
||||||
.probe = lb035q02_probe,
|
.probe = lb035q02_probe,
|
||||||
.remove = lb035q02_remove,
|
.remove = lb035q02_remove,
|
||||||
|
.id_table = lb035q02_ids,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "panel-lg-lb035q02",
|
.name = "panel-lg-lb035q02",
|
||||||
.of_match_table = lb035q02_of_match,
|
.of_match_table = lb035q02_of_match,
|
||||||
@ -231,7 +239,6 @@ static struct spi_driver lb035q02_driver = {
|
|||||||
|
|
||||||
module_spi_driver(lb035q02_driver);
|
module_spi_driver(lb035q02_driver);
|
||||||
|
|
||||||
MODULE_ALIAS("spi:lgphilips,lb035q02");
|
|
||||||
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
|
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
|
||||||
MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
|
MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -230,9 +230,17 @@ static const struct of_device_id nl8048_of_match[] = {
|
|||||||
|
|
||||||
MODULE_DEVICE_TABLE(of, nl8048_of_match);
|
MODULE_DEVICE_TABLE(of, nl8048_of_match);
|
||||||
|
|
||||||
|
static const struct spi_device_id nl8048_ids[] = {
|
||||||
|
{ "nl8048hl11", 0 },
|
||||||
|
{ /* sentinel */ }
|
||||||
|
};
|
||||||
|
|
||||||
|
MODULE_DEVICE_TABLE(spi, nl8048_ids);
|
||||||
|
|
||||||
static struct spi_driver nl8048_driver = {
|
static struct spi_driver nl8048_driver = {
|
||||||
.probe = nl8048_probe,
|
.probe = nl8048_probe,
|
||||||
.remove = nl8048_remove,
|
.remove = nl8048_remove,
|
||||||
|
.id_table = nl8048_ids,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "panel-nec-nl8048hl11",
|
.name = "panel-nec-nl8048hl11",
|
||||||
.pm = &nl8048_pm_ops,
|
.pm = &nl8048_pm_ops,
|
||||||
@ -242,7 +250,6 @@ static struct spi_driver nl8048_driver = {
|
|||||||
|
|
||||||
module_spi_driver(nl8048_driver);
|
module_spi_driver(nl8048_driver);
|
||||||
|
|
||||||
MODULE_ALIAS("spi:nec,nl8048hl11");
|
|
||||||
MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
|
MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
|
||||||
MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
|
MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -684,9 +684,17 @@ static const struct of_device_id acx565akm_of_match[] = {
|
|||||||
|
|
||||||
MODULE_DEVICE_TABLE(of, acx565akm_of_match);
|
MODULE_DEVICE_TABLE(of, acx565akm_of_match);
|
||||||
|
|
||||||
|
static const struct spi_device_id acx565akm_ids[] = {
|
||||||
|
{ "acx565akm", 0 },
|
||||||
|
{ /* sentinel */ }
|
||||||
|
};
|
||||||
|
|
||||||
|
MODULE_DEVICE_TABLE(spi, acx565akm_ids);
|
||||||
|
|
||||||
static struct spi_driver acx565akm_driver = {
|
static struct spi_driver acx565akm_driver = {
|
||||||
.probe = acx565akm_probe,
|
.probe = acx565akm_probe,
|
||||||
.remove = acx565akm_remove,
|
.remove = acx565akm_remove,
|
||||||
|
.id_table = acx565akm_ids,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "panel-sony-acx565akm",
|
.name = "panel-sony-acx565akm",
|
||||||
.of_match_table = acx565akm_of_match,
|
.of_match_table = acx565akm_of_match,
|
||||||
@ -695,7 +703,6 @@ static struct spi_driver acx565akm_driver = {
|
|||||||
|
|
||||||
module_spi_driver(acx565akm_driver);
|
module_spi_driver(acx565akm_driver);
|
||||||
|
|
||||||
MODULE_ALIAS("spi:sony,acx565akm");
|
|
||||||
MODULE_AUTHOR("Nokia Corporation");
|
MODULE_AUTHOR("Nokia Corporation");
|
||||||
MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
|
MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
@ -375,8 +375,7 @@ static const struct of_device_id td028ttec1_of_match[] = {
|
|||||||
MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
|
MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
|
||||||
|
|
||||||
static const struct spi_device_id td028ttec1_ids[] = {
|
static const struct spi_device_id td028ttec1_ids[] = {
|
||||||
{ "tpo,td028ttec1", 0},
|
{ "td028ttec1", 0 },
|
||||||
{ "toppoly,td028ttec1", 0 },
|
|
||||||
{ /* sentinel */ }
|
{ /* sentinel */ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -491,9 +491,17 @@ static const struct of_device_id td043mtea1_of_match[] = {
|
|||||||
|
|
||||||
MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
|
MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
|
||||||
|
|
||||||
|
static const struct spi_device_id td043mtea1_ids[] = {
|
||||||
|
{ "td043mtea1", 0 },
|
||||||
|
{ /* sentinel */ }
|
||||||
|
};
|
||||||
|
|
||||||
|
MODULE_DEVICE_TABLE(spi, td043mtea1_ids);
|
||||||
|
|
||||||
static struct spi_driver td043mtea1_driver = {
|
static struct spi_driver td043mtea1_driver = {
|
||||||
.probe = td043mtea1_probe,
|
.probe = td043mtea1_probe,
|
||||||
.remove = td043mtea1_remove,
|
.remove = td043mtea1_remove,
|
||||||
|
.id_table = td043mtea1_ids,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "panel-tpo-td043mtea1",
|
.name = "panel-tpo-td043mtea1",
|
||||||
.pm = &td043mtea1_pm_ops,
|
.pm = &td043mtea1_pm_ops,
|
||||||
@ -503,7 +511,6 @@ static struct spi_driver td043mtea1_driver = {
|
|||||||
|
|
||||||
module_spi_driver(td043mtea1_driver);
|
module_spi_driver(td043mtea1_driver);
|
||||||
|
|
||||||
MODULE_ALIAS("spi:tpo,td043mtea1");
|
|
||||||
MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
|
MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
|
||||||
MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
|
MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
Loading…
x
Reference in New Issue
Block a user