drm/i915/gt: Force pte cacheline to main memory

We have problems of tgl not seeing a valid pte entry when iommu is
enabled. Add heavy handed flushing of entry modification by flushing the
cpu, cacheline and then wcb. This forces the pte out to main memory past
this point regarless of promises of coherency.

This is an evolution of an experimental patch from Chris Wilson of adding
wmb for coherent partners, by adding a clflush to force the cache->memory
step.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1840
Testcase: igt/gem_exec_fence/parallel
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200511160803.15407-1-mika.kuoppala@linux.intel.com
This commit is contained in:
Mika Kuoppala 2020-05-11 19:08:03 +03:00 committed by Chris Wilson
parent a1b2eeacbc
commit 84eac0c659

View File

@ -389,6 +389,16 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm,
return err;
}
static __always_inline inline void
write_pte(gen8_pte_t *pte, const gen8_pte_t val)
{
/* Magic delays? Or can we refine these to flush all in one pass? */
*pte = val;
wmb(); /* cpu to cache */
clflush(pte); /* cache to memory */
wmb(); /* visible to all */
}
static __always_inline u64
gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
struct i915_page_directory *pdp,
@ -405,7 +415,8 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
write_pte(&vaddr[gen8_pd_index(idx, 0)],
pte_encode | iter->dma);
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
@ -487,7 +498,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
do {
GEM_BUG_ON(iter->sg->length < page_size);
vaddr[index++] = encode | iter->dma;
write_pte(&vaddr[index++], encode | iter->dma);
start += page_size;
iter->dma += page_size;