powerpc/mm/radix: Update to tlb functions ric argument

Radix invalidate control (RIC) is used to control which cache to flush
using tlb instructions. When doing a PID flush, we currently flush
everything including page walk cache. For address range flush, we flush
only the TLB. In the next patch, we add support for flushing only the
page walk cache.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Aneesh Kumar K.V 2016-06-08 19:55:50 +05:30 committed by Michael Ellerman
parent 8017ea35d3
commit 36194812a4

View File

@ -18,16 +18,20 @@
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbiel_pid(unsigned long pid, int set)
#define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
#define RIC_FLUSH_ALL 2
static inline void __tlbiel_pid(unsigned long pid, int set,
unsigned long ric)
{
unsigned long rb,rs,ric,prs,r;
unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rb |= set << PPC_BITLSHIFT(51);
rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
static inline void _tlbiel_pid(unsigned long pid)
static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set);
__tlbiel_pid(pid, set, ric);
}
return;
}
static inline void _tlbie_pid(unsigned long pid)
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{
unsigned long rb,rs,ric,prs,r;
unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
}
static inline void _tlbiel_va(unsigned long va, unsigned long pid,
unsigned long ap)
unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,ric,prs,r;
unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
unsigned long ap)
unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,ric,prs,r;
unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */
r = 1; /* raidx format */
ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@ -122,7 +123,7 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
_tlbiel_pid(pid);
_tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_tlb_mm);
@ -135,7 +136,7 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
preempt_disable();
pid = mm ? mm->context.id : 0;
if (pid != MMU_NO_CONTEXT)
_tlbiel_va(vmaddr, pid, ap);
_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
preempt_enable();
}
@ -172,11 +173,11 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(pid);
_tlbie_pid(pid, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
_tlbiel_pid(pid);
_tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
preempt_enable();
}
@ -196,11 +197,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_va(vmaddr, pid, ap);
_tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} else
_tlbiel_va(vmaddr, pid, ap);
_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
preempt_enable();
}
@ -224,7 +225,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(0);
_tlbie_pid(0, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
}