mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 22:50:41 +00:00
powerpc/mm: Refactor pte_update() on book3s/32
When CONFIG_PTE_64BIT is set, pte_update() operates on 'unsigned long long' When CONFIG_PTE_64BIT is not set, pte_update() operates on 'unsigned long' In asm/page.h, we have pte_basic_t which is 'unsigned long long' when CONFIG_PTE_64BIT is set and 'unsigned long' otherwise. Refactor pte_update() using pte_basic_t. While we are at it, drop the comment on 44x which is not applicable to book3s version of pte_update(). Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/c78912bc8613fb249c3d80aeb1062796b5c49400.1589866984.git.christophe.leroy@csgroup.eu
This commit is contained in:
parent
2db99aeb63
commit
1c1bf29488
@ -253,53 +253,35 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
|
||||
* and the PTE may be either 32 or 64 bit wide. In the later case,
|
||||
* when using atomic updates, only the low part of the PTE is
|
||||
* accessed atomically.
|
||||
*
|
||||
* In addition, on 44x, we also maintain a global flag indicating
|
||||
* that an executable user mapping was modified, which is needed
|
||||
* to properly flush the virtually tagged instruction cache of
|
||||
* those implementations.
|
||||
*/
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
static inline unsigned long pte_update(pte_t *p,
|
||||
unsigned long clr,
|
||||
unsigned long set)
|
||||
static inline pte_basic_t pte_update(pte_t *p, unsigned long clr, unsigned long set)
|
||||
{
|
||||
unsigned long old, tmp;
|
||||
|
||||
__asm__ __volatile__("\
|
||||
1: lwarx %0,0,%3\n\
|
||||
andc %1,%0,%4\n\
|
||||
or %1,%1,%5\n"
|
||||
" stwcx. %1,0,%3\n\
|
||||
bne- 1b"
|
||||
: "=&r" (old), "=&r" (tmp), "=m" (*p)
|
||||
: "r" (p), "r" (clr), "r" (set), "m" (*p)
|
||||
: "cc" );
|
||||
|
||||
return old;
|
||||
}
|
||||
#else /* CONFIG_PTE_64BIT */
|
||||
static inline unsigned long long pte_update(pte_t *p,
|
||||
unsigned long clr,
|
||||
unsigned long set)
|
||||
{
|
||||
unsigned long long old;
|
||||
pte_basic_t old;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("\
|
||||
1: lwarx %L0,0,%4\n\
|
||||
lwzx %0,0,%3\n\
|
||||
andc %1,%L0,%5\n\
|
||||
or %1,%1,%6\n"
|
||||
" stwcx. %1,0,%4\n\
|
||||
bne- 1b"
|
||||
__asm__ __volatile__(
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
"1: lwarx %0, 0, %3\n"
|
||||
" andc %1, %0, %4\n"
|
||||
#else
|
||||
"1: lwarx %L0, 0, %3\n"
|
||||
" lwz %0, -4(%3)\n"
|
||||
" andc %1, %L0, %4\n"
|
||||
#endif
|
||||
" or %1, %1, %5\n"
|
||||
" stwcx. %1, 0, %3\n"
|
||||
" bne- 1b"
|
||||
: "=&r" (old), "=&r" (tmp), "=m" (*p)
|
||||
: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
|
||||
#ifndef CONFIG_PTE_64BIT
|
||||
: "r" (p),
|
||||
#else
|
||||
: "b" ((unsigned long)(p) + 4),
|
||||
#endif
|
||||
"r" (clr), "r" (set), "m" (*p)
|
||||
: "cc" );
|
||||
|
||||
return old;
|
||||
}
|
||||
#endif /* CONFIG_PTE_64BIT */
|
||||
|
||||
/*
|
||||
* 2.6 calls this without flushing the TLB entry; this is wrong
|
||||
|
Loading…
x
Reference in New Issue
Block a user