mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 05:02:31 +00:00
m68k: Pass a pointer to virt_to_pfn() virt_to_page()
Functions that work on a pointer to virtual memory such as virt_to_pfn() and users of that function such as virt_to_page() are supposed to pass a pointer to virtual memory, ideally a (void *) or other pointer. However since many architectures implement virt_to_pfn() as a macro, this function becomes polymorphic and accepts both a (unsigned long) and a (void *). Fix up the offending calls in arch/m68k with explicit casts. The page table include <asm/pgtable.h> will include different variants of the defines depending on whether you build for classic m68k, ColdFire or Sun3, so fix all variants. Delete Coldfire pte_pagenr() which was using unsigned long semantics from __pte_page(). Tested-by: Geert Uytterhoeven <geert@linux-m68k.org> Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
This commit is contained in:
parent
9b2d38b4e4
commit
8f24608772
@ -115,7 +115,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
|
||||
pgd_val(*pgdp) = virt_to_phys(pmdp);
|
||||
}
|
||||
|
||||
#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
|
||||
#define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK))
|
||||
#define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd)))
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
@ -134,7 +134,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_val(*ptep) = 0;
|
||||
}
|
||||
|
||||
#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
|
||||
#define pte_page(pte) virt_to_page(__pte_page(pte))
|
||||
|
||||
static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
|
||||
|
@ -91,7 +91,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
#define pmd_set(pmdp,ptep) do {} while (0)
|
||||
|
||||
#define __pte_page(pte) \
|
||||
((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
|
||||
(__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
|
||||
|
||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
@ -111,7 +111,7 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
|
||||
|
||||
#define pte_page(pte) virt_to_page(__pte_page(pte))
|
||||
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
|
||||
#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
|
||||
#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
|
||||
|
||||
|
||||
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
|
||||
|
@ -69,7 +69,8 @@ void __init paging_init(void)
|
||||
|
||||
/* now change pg_table to kernel virtual addresses */
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
|
||||
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
|
||||
pte_t pte = pfn_pte(virt_to_pfn((void *)address),
|
||||
PAGE_INIT);
|
||||
if (address >= (unsigned long) high_memory)
|
||||
pte_val(pte) = 0;
|
||||
|
||||
|
@ -102,7 +102,7 @@ static struct list_head ptable_list[2] = {
|
||||
LIST_HEAD_INIT(ptable_list[1]),
|
||||
};
|
||||
|
||||
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
|
||||
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru))
|
||||
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
|
||||
#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
|
||||
|
||||
@ -201,7 +201,7 @@ int free_pointer_table(void *table, int type)
|
||||
list_del(dp);
|
||||
mmu_page_dtor((void *)page);
|
||||
if (type == TABLE_PTE)
|
||||
pgtable_pte_page_dtor(virt_to_page(page));
|
||||
pgtable_pte_page_dtor(virt_to_page((void *)page));
|
||||
free_page (page);
|
||||
return 1;
|
||||
} else if (ptable_list[type].next != dp) {
|
||||
|
@ -75,7 +75,7 @@ void __init paging_init(void)
|
||||
/* now change pg_table to kernel virtual addresses */
|
||||
pg_table = (pte_t *) __va ((unsigned long) pg_table);
|
||||
for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
|
||||
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
|
||||
pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT);
|
||||
if (address >= (unsigned long)high_memory)
|
||||
pte_val (pte) = 0;
|
||||
set_pte (pg_table, pte);
|
||||
|
@ -29,7 +29,7 @@ static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr)
|
||||
j = *(volatile unsigned long *)kaddr;
|
||||
*(volatile unsigned long *)kaddr = j;
|
||||
|
||||
ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL);
|
||||
ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL);
|
||||
pte = pte_val(ptep);
|
||||
// pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte);
|
||||
if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
|
||||
|
@ -125,7 +125,7 @@ inline int dvma_map_cpu(unsigned long kaddr,
|
||||
do {
|
||||
pr_debug("mapping %08lx phys to %08lx\n",
|
||||
__pa(kaddr), vaddr);
|
||||
set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
|
||||
set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr),
|
||||
PAGE_KERNEL));
|
||||
pte++;
|
||||
kaddr += PAGE_SIZE;
|
||||
|
Loading…
Reference in New Issue
Block a user