mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-07 22:03:14 +00:00
Merge git://git.kernel.org:/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller: "Just some more random bits from Al, including a conversion over to generic extables" * git://git.kernel.org:/pub/scm/linux/kernel/git/davem/sparc: sparc32: take ->thread.flags out sparc32: get rid of fake_swapper_regs sparc64: get rid of fake_swapper_regs sparc32: switch to generic extables sparc32: switch copy_user.S away from range exception table entries sparc32: get rid of range exception table entries in checksum_32.S sparc32: switch __bzero() away from range exception table entries sparc32: kill lookup_fault() sparc32: don't bother with lookup_fault() in __bzero()
This commit is contained in:
commit
987a08741d
@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/extable_64.h>
|
|
||||||
#include <asm/spitfire.h>
|
#include <asm/spitfire.h>
|
||||||
#include <asm/adi.h>
|
#include <asm/adi.h>
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
#ifndef __ASM_EXTABLE64_H
|
#ifndef __ASM_EXTABLE_H
|
||||||
#define __ASM_EXTABLE64_H
|
#define __ASM_EXTABLE_H
|
||||||
/*
|
/*
|
||||||
* The exception table consists of pairs of addresses: the first is the
|
* The exception table consists of pairs of addresses: the first is the
|
||||||
* address of an instruction that is allowed to fault, and the second is
|
* address of an instruction that is allowed to fault, and the second is
|
@ -50,16 +50,12 @@ struct thread_struct {
|
|||||||
unsigned long fsr;
|
unsigned long fsr;
|
||||||
unsigned long fpqdepth;
|
unsigned long fpqdepth;
|
||||||
struct fpq fpqueue[16];
|
struct fpq fpqueue[16];
|
||||||
unsigned long flags;
|
|
||||||
mm_segment_t current_ds;
|
mm_segment_t current_ds;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
|
|
||||||
#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
|
|
||||||
|
|
||||||
#define INIT_THREAD { \
|
#define INIT_THREAD { \
|
||||||
.flags = SPARC_FLAG_KTHREAD, \
|
|
||||||
.current_ds = KERNEL_DS, \
|
.current_ds = KERNEL_DS, \
|
||||||
|
.kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Do necessary setup to start up a newly executed thread. */
|
/* Do necessary setup to start up a newly executed thread. */
|
||||||
|
@ -118,6 +118,7 @@ struct thread_info {
|
|||||||
.task = &tsk, \
|
.task = &tsk, \
|
||||||
.current_ds = ASI_P, \
|
.current_ds = ASI_P, \
|
||||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||||
|
.kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
|
||||||
}
|
}
|
||||||
|
|
||||||
/* how to get the thread information struct from C */
|
/* how to get the thread information struct from C */
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
#ifndef ___ASM_SPARC_UACCESS_H
|
#ifndef ___ASM_SPARC_UACCESS_H
|
||||||
#define ___ASM_SPARC_UACCESS_H
|
#define ___ASM_SPARC_UACCESS_H
|
||||||
|
|
||||||
|
#include <asm/extable.h>
|
||||||
|
|
||||||
#if defined(__sparc__) && defined(__arch64__)
|
#if defined(__sparc__) && defined(__arch64__)
|
||||||
#include <asm/uaccess_64.h>
|
#include <asm/uaccess_64.h>
|
||||||
#else
|
#else
|
||||||
|
@ -13,9 +13,6 @@
|
|||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
#define ARCH_HAS_SORT_EXTABLE
|
|
||||||
#define ARCH_HAS_SEARCH_EXTABLE
|
|
||||||
|
|
||||||
/* Sparc is not segmented, however we need to be able to fool access_ok()
|
/* Sparc is not segmented, however we need to be able to fool access_ok()
|
||||||
* when doing system calls from kernel mode legitimately.
|
* when doing system calls from kernel mode legitimately.
|
||||||
*
|
*
|
||||||
@ -40,36 +37,6 @@
|
|||||||
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
|
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
|
||||||
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
|
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
|
||||||
|
|
||||||
/*
|
|
||||||
* The exception table consists of pairs of addresses: the first is the
|
|
||||||
* address of an instruction that is allowed to fault, and the second is
|
|
||||||
* the address at which the program should continue. No registers are
|
|
||||||
* modified, so it is entirely up to the continuation code to figure out
|
|
||||||
* what to do.
|
|
||||||
*
|
|
||||||
* All the routines below use bits of fixup code that are out of line
|
|
||||||
* with the main instruction path. This means when everything is well,
|
|
||||||
* we don't even have to jump over them. Further, they do not intrude
|
|
||||||
* on our cache or tlb entries.
|
|
||||||
*
|
|
||||||
* There is a special way how to put a range of potentially faulting
|
|
||||||
* insns (like twenty ldd/std's with now intervening other instructions)
|
|
||||||
* You specify address of first in insn and 0 in fixup and in the next
|
|
||||||
* exception_table_entry you specify last potentially faulting insn + 1
|
|
||||||
* and in fixup the routine which should handle the fault.
|
|
||||||
* That fixup code will get
|
|
||||||
* (faulting_insn_address - first_insn_in_the_range_address)/4
|
|
||||||
* in %g2 (ie. index of the faulting instruction in the range).
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct exception_table_entry
|
|
||||||
{
|
|
||||||
unsigned long insn, fixup;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Returns 0 if exception not found and fixup otherwise. */
|
|
||||||
unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
|
|
||||||
|
|
||||||
/* Uh, these should become the main single-value transfer routines..
|
/* Uh, these should become the main single-value transfer routines..
|
||||||
* They automatically use the right size if we just have the right
|
* They automatically use the right size if we just have the right
|
||||||
* pointer type..
|
* pointer type..
|
||||||
@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
|
|||||||
unsigned long ret;
|
unsigned long ret;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
".section __ex_table,#alloc\n\t"
|
|
||||||
".align 4\n\t"
|
|
||||||
".word 1f,3\n\t"
|
|
||||||
".previous\n\t"
|
|
||||||
"mov %2, %%o1\n"
|
"mov %2, %%o1\n"
|
||||||
"1:\n\t"
|
|
||||||
"call __bzero\n\t"
|
"call __bzero\n\t"
|
||||||
" mov %1, %%o0\n\t"
|
" mov %1, %%o0\n\t"
|
||||||
"mov %%o0, %0\n"
|
"mov %%o0, %0\n"
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <asm/asi.h>
|
#include <asm/asi.h>
|
||||||
#include <asm/spitfire.h>
|
#include <asm/spitfire.h>
|
||||||
#include <asm/extable_64.h>
|
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
|
@ -515,7 +515,7 @@ continue_boot:
|
|||||||
|
|
||||||
/* I want a kernel stack NOW! */
|
/* I want a kernel stack NOW! */
|
||||||
set init_thread_union, %g1
|
set init_thread_union, %g1
|
||||||
set (THREAD_SIZE - STACKFRAME_SZ), %g2
|
set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2
|
||||||
add %g1, %g2, %sp
|
add %g1, %g2, %sp
|
||||||
mov 0, %fp /* And for good luck */
|
mov 0, %fp /* And for good luck */
|
||||||
|
|
||||||
|
@ -706,7 +706,7 @@ tlb_fixup_done:
|
|||||||
wr %g0, ASI_P, %asi
|
wr %g0, ASI_P, %asi
|
||||||
mov 1, %g1
|
mov 1, %g1
|
||||||
sllx %g1, THREAD_SHIFT, %g1
|
sllx %g1, THREAD_SHIFT, %g1
|
||||||
sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
|
sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1
|
||||||
add %g6, %g1, %sp
|
add %g6, %g1, %sp
|
||||||
|
|
||||||
/* Set per-cpu pointer initially to zero, this makes
|
/* Set per-cpu pointer initially to zero, this makes
|
||||||
|
@ -216,16 +216,6 @@ void flush_thread(void)
|
|||||||
clear_thread_flag(TIF_USEDFPU);
|
clear_thread_flag(TIF_USEDFPU);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This task is no longer a kernel thread. */
|
|
||||||
if (current->thread.flags & SPARC_FLAG_KTHREAD) {
|
|
||||||
current->thread.flags &= ~SPARC_FLAG_KTHREAD;
|
|
||||||
|
|
||||||
/* We must fixup kregs as well. */
|
|
||||||
/* XXX This was not fixed for ti for a while, worked. Unused? */
|
|
||||||
current->thread.kregs = (struct pt_regs *)
|
|
||||||
(task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct sparc_stackf __user *
|
static inline struct sparc_stackf __user *
|
||||||
@ -313,7 +303,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
|||||||
extern int nwindows;
|
extern int nwindows;
|
||||||
unsigned long psr;
|
unsigned long psr;
|
||||||
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
|
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
|
||||||
p->thread.flags |= SPARC_FLAG_KTHREAD;
|
|
||||||
p->thread.current_ds = KERNEL_DS;
|
p->thread.current_ds = KERNEL_DS;
|
||||||
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
|
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
|
||||||
childregs->u_regs[UREG_G1] = sp; /* function */
|
childregs->u_regs[UREG_G1] = sp; /* function */
|
||||||
@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
|||||||
}
|
}
|
||||||
memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
|
memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
|
||||||
childregs->u_regs[UREG_FP] = sp;
|
childregs->u_regs[UREG_FP] = sp;
|
||||||
p->thread.flags &= ~SPARC_FLAG_KTHREAD;
|
|
||||||
p->thread.current_ds = USER_DS;
|
p->thread.current_ds = USER_DS;
|
||||||
ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
|
ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
|
||||||
ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
|
ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
|
||||||
|
@ -266,7 +266,6 @@ static __init void leon_patch(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct tt_entry *sparc_ttable;
|
struct tt_entry *sparc_ttable;
|
||||||
static struct pt_regs fake_swapper_regs;
|
|
||||||
|
|
||||||
/* Called from head_32.S - before we have setup anything
|
/* Called from head_32.S - before we have setup anything
|
||||||
* in the kernel. Be very careful with what you do here.
|
* in the kernel. Be very careful with what you do here.
|
||||||
@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
(*(linux_dbvec->teach_debugger))();
|
(*(linux_dbvec->teach_debugger))();
|
||||||
}
|
}
|
||||||
|
|
||||||
init_task.thread.kregs = &fake_swapper_regs;
|
|
||||||
|
|
||||||
/* Run-time patch instructions to match the cpu model */
|
/* Run-time patch instructions to match the cpu model */
|
||||||
per_cpu_patch();
|
per_cpu_patch();
|
||||||
|
|
||||||
|
@ -165,8 +165,6 @@ extern int root_mountflags;
|
|||||||
|
|
||||||
char reboot_command[COMMAND_LINE_SIZE];
|
char reboot_command[COMMAND_LINE_SIZE];
|
||||||
|
|
||||||
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
|
|
||||||
|
|
||||||
static void __init per_cpu_patch(void)
|
static void __init per_cpu_patch(void)
|
||||||
{
|
{
|
||||||
struct cpuid_patch_entry *p;
|
struct cpuid_patch_entry *p;
|
||||||
@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
|
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
|
|
||||||
|
|
||||||
#ifdef CONFIG_IP_PNP
|
#ifdef CONFIG_IP_PNP
|
||||||
if (!ic_set_manually) {
|
if (!ic_set_manually) {
|
||||||
phandle chosen = prom_finddevice("/chosen");
|
phandle chosen = prom_finddevice("/chosen");
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
|
#include <linux/extable.h>
|
||||||
|
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
|
||||||
@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
|
|||||||
|
|
||||||
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
|
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
|
||||||
{
|
{
|
||||||
unsigned long g2 = regs->u_regs [UREG_G2];
|
const struct exception_table_entry *entry;
|
||||||
unsigned long fixup = search_extables_range(regs->pc, &g2);
|
|
||||||
|
|
||||||
if (!fixup) {
|
entry = search_exception_tables(regs->pc);
|
||||||
|
if (!entry) {
|
||||||
unsigned long address = compute_effective_address(regs, insn);
|
unsigned long address = compute_effective_address(regs, insn);
|
||||||
if(address < PAGE_SIZE) {
|
if(address < PAGE_SIZE) {
|
||||||
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
|
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
|
||||||
@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
|
|||||||
die_if_kernel("Oops", regs);
|
die_if_kernel("Oops", regs);
|
||||||
/* Not reached */
|
/* Not reached */
|
||||||
}
|
}
|
||||||
regs->pc = fixup;
|
regs->pc = entry->fixup;
|
||||||
regs->npc = regs->pc + 4;
|
regs->npc = regs->pc + 4;
|
||||||
regs->u_regs [UREG_G2] = g2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
||||||
@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
|
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
||||||
enum direction dir)
|
|
||||||
{
|
|
||||||
unsigned int reg;
|
|
||||||
int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
|
|
||||||
|
|
||||||
if ((regs->pc | regs->npc) & 3)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Must access_ok() in all the necessary places. */
|
|
||||||
#define WINREG_ADDR(regnum) \
|
|
||||||
((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
|
|
||||||
|
|
||||||
reg = (insn >> 25) & 0x1f;
|
|
||||||
if (reg >= 16) {
|
|
||||||
if (!access_ok(WINREG_ADDR(reg - 16), size))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
reg = (insn >> 14) & 0x1f;
|
|
||||||
if (reg >= 16) {
|
|
||||||
if (!access_ok(WINREG_ADDR(reg - 16), size))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
if (!(insn & 0x2000)) {
|
|
||||||
reg = (insn & 0x1f);
|
|
||||||
if (reg >= 16) {
|
|
||||||
if (!access_ok(WINREG_ADDR(reg - 16), size))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#undef WINREG_ADDR
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
|
|
||||||
{
|
{
|
||||||
send_sig_fault(SIGBUS, BUS_ADRALN,
|
send_sig_fault(SIGBUS, BUS_ADRALN,
|
||||||
(void __user *)safe_compute_effective_address(regs, insn),
|
(void __user *)safe_compute_effective_address(regs, insn),
|
||||||
0, current);
|
0, current);
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
|
||||||
{
|
|
||||||
enum direction dir;
|
|
||||||
|
|
||||||
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
|
|
||||||
(((insn >> 30) & 3) != 3))
|
|
||||||
goto kill_user;
|
|
||||||
dir = decode_direction(insn);
|
|
||||||
if(!ok_for_user(regs, insn, dir)) {
|
|
||||||
goto kill_user;
|
|
||||||
} else {
|
|
||||||
int err, size = decode_access_size(insn);
|
|
||||||
unsigned long addr;
|
|
||||||
|
|
||||||
if(floating_point_load_or_store_p(insn)) {
|
|
||||||
printk("User FPU load/store unaligned unsupported.\n");
|
|
||||||
goto kill_user;
|
|
||||||
}
|
|
||||||
|
|
||||||
addr = compute_effective_address(regs, insn);
|
|
||||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
|
|
||||||
switch(dir) {
|
|
||||||
case load:
|
|
||||||
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
|
|
||||||
regs),
|
|
||||||
size, (unsigned long *) addr,
|
|
||||||
decode_signedness(insn));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case store:
|
|
||||||
err = do_int_store(((insn>>25)&0x1f), size,
|
|
||||||
(unsigned long *) addr, regs);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case both:
|
|
||||||
/*
|
|
||||||
* This was supported in 2.4. However, we question
|
|
||||||
* the value of SWAP instruction across word boundaries.
|
|
||||||
*/
|
|
||||||
printk("Unaligned SWAP unsupported.\n");
|
|
||||||
err = -EFAULT;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
unaligned_panic("Impossible user unaligned trap.");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
if (err)
|
|
||||||
goto kill_user;
|
|
||||||
else
|
|
||||||
advance(regs);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
kill_user:
|
|
||||||
user_mna_trap_fault(regs, insn);
|
|
||||||
out:
|
|
||||||
;
|
|
||||||
}
|
|
||||||
|
@ -155,13 +155,6 @@ cpout: retl ! get outta here
|
|||||||
.text; \
|
.text; \
|
||||||
.align 4
|
.align 4
|
||||||
|
|
||||||
#define EXT(start,end) \
|
|
||||||
.section __ex_table,ALLOC; \
|
|
||||||
.align 4; \
|
|
||||||
.word start, 0, end, cc_fault; \
|
|
||||||
.text; \
|
|
||||||
.align 4
|
|
||||||
|
|
||||||
/* This aligned version executes typically in 8.5 superscalar cycles, this
|
/* This aligned version executes typically in 8.5 superscalar cycles, this
|
||||||
* is the best I can do. I say 8.5 because the final add will pair with
|
* is the best I can do. I say 8.5 because the final add will pair with
|
||||||
* the next ldd in the main unrolled loop. Thus the pipe is always full.
|
* the next ldd in the main unrolled loop. Thus the pipe is always full.
|
||||||
@ -169,20 +162,20 @@ cpout: retl ! get outta here
|
|||||||
* please check the fixup code below as well.
|
* please check the fixup code below as well.
|
||||||
*/
|
*/
|
||||||
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
|
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||||
ldd [src + off + 0x00], t0; \
|
EX(ldd [src + off + 0x00], t0); \
|
||||||
ldd [src + off + 0x08], t2; \
|
EX(ldd [src + off + 0x08], t2); \
|
||||||
addxcc t0, sum, sum; \
|
addxcc t0, sum, sum; \
|
||||||
ldd [src + off + 0x10], t4; \
|
EX(ldd [src + off + 0x10], t4); \
|
||||||
addxcc t1, sum, sum; \
|
addxcc t1, sum, sum; \
|
||||||
ldd [src + off + 0x18], t6; \
|
EX(ldd [src + off + 0x18], t6); \
|
||||||
addxcc t2, sum, sum; \
|
addxcc t2, sum, sum; \
|
||||||
std t0, [dst + off + 0x00]; \
|
EX(std t0, [dst + off + 0x00]); \
|
||||||
addxcc t3, sum, sum; \
|
addxcc t3, sum, sum; \
|
||||||
std t2, [dst + off + 0x08]; \
|
EX(std t2, [dst + off + 0x08]); \
|
||||||
addxcc t4, sum, sum; \
|
addxcc t4, sum, sum; \
|
||||||
std t4, [dst + off + 0x10]; \
|
EX(std t4, [dst + off + 0x10]); \
|
||||||
addxcc t5, sum, sum; \
|
addxcc t5, sum, sum; \
|
||||||
std t6, [dst + off + 0x18]; \
|
EX(std t6, [dst + off + 0x18]); \
|
||||||
addxcc t6, sum, sum; \
|
addxcc t6, sum, sum; \
|
||||||
addxcc t7, sum, sum;
|
addxcc t7, sum, sum;
|
||||||
|
|
||||||
@ -191,39 +184,39 @@ cpout: retl ! get outta here
|
|||||||
* Viking MXCC into streaming mode. Ho hum...
|
* Viking MXCC into streaming mode. Ho hum...
|
||||||
*/
|
*/
|
||||||
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
|
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||||
ldd [src + off + 0x00], t0; \
|
EX(ldd [src + off + 0x00], t0); \
|
||||||
ldd [src + off + 0x08], t2; \
|
EX(ldd [src + off + 0x08], t2); \
|
||||||
ldd [src + off + 0x10], t4; \
|
EX(ldd [src + off + 0x10], t4); \
|
||||||
ldd [src + off + 0x18], t6; \
|
EX(ldd [src + off + 0x18], t6); \
|
||||||
st t0, [dst + off + 0x00]; \
|
EX(st t0, [dst + off + 0x00]); \
|
||||||
addxcc t0, sum, sum; \
|
addxcc t0, sum, sum; \
|
||||||
st t1, [dst + off + 0x04]; \
|
EX(st t1, [dst + off + 0x04]); \
|
||||||
addxcc t1, sum, sum; \
|
addxcc t1, sum, sum; \
|
||||||
st t2, [dst + off + 0x08]; \
|
EX(st t2, [dst + off + 0x08]); \
|
||||||
addxcc t2, sum, sum; \
|
addxcc t2, sum, sum; \
|
||||||
st t3, [dst + off + 0x0c]; \
|
EX(st t3, [dst + off + 0x0c]); \
|
||||||
addxcc t3, sum, sum; \
|
addxcc t3, sum, sum; \
|
||||||
st t4, [dst + off + 0x10]; \
|
EX(st t4, [dst + off + 0x10]); \
|
||||||
addxcc t4, sum, sum; \
|
addxcc t4, sum, sum; \
|
||||||
st t5, [dst + off + 0x14]; \
|
EX(st t5, [dst + off + 0x14]); \
|
||||||
addxcc t5, sum, sum; \
|
addxcc t5, sum, sum; \
|
||||||
st t6, [dst + off + 0x18]; \
|
EX(st t6, [dst + off + 0x18]); \
|
||||||
addxcc t6, sum, sum; \
|
addxcc t6, sum, sum; \
|
||||||
st t7, [dst + off + 0x1c]; \
|
EX(st t7, [dst + off + 0x1c]); \
|
||||||
addxcc t7, sum, sum;
|
addxcc t7, sum, sum;
|
||||||
|
|
||||||
/* Yuck, 6 superscalar cycles... */
|
/* Yuck, 6 superscalar cycles... */
|
||||||
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
|
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
|
||||||
ldd [src - off - 0x08], t0; \
|
EX(ldd [src - off - 0x08], t0); \
|
||||||
ldd [src - off - 0x00], t2; \
|
EX(ldd [src - off - 0x00], t2); \
|
||||||
addxcc t0, sum, sum; \
|
addxcc t0, sum, sum; \
|
||||||
st t0, [dst - off - 0x08]; \
|
EX(st t0, [dst - off - 0x08]); \
|
||||||
addxcc t1, sum, sum; \
|
addxcc t1, sum, sum; \
|
||||||
st t1, [dst - off - 0x04]; \
|
EX(st t1, [dst - off - 0x04]); \
|
||||||
addxcc t2, sum, sum; \
|
addxcc t2, sum, sum; \
|
||||||
st t2, [dst - off - 0x00]; \
|
EX(st t2, [dst - off - 0x00]); \
|
||||||
addxcc t3, sum, sum; \
|
addxcc t3, sum, sum; \
|
||||||
st t3, [dst - off + 0x04];
|
EX(st t3, [dst - off + 0x04]);
|
||||||
|
|
||||||
/* Handle the end cruft code out of band for better cache patterns. */
|
/* Handle the end cruft code out of band for better cache patterns. */
|
||||||
cc_end_cruft:
|
cc_end_cruft:
|
||||||
@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
|
|||||||
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||||
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||||
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||||
10: EXT(5b, 10b) ! note for exception handling
|
|
||||||
sub %g1, 128, %g1 ! detract from length
|
sub %g1, 128, %g1 ! detract from length
|
||||||
addx %g0, %g7, %g7 ! add in last carry bit
|
addx %g0, %g7, %g7 ! add in last carry bit
|
||||||
andcc %g1, 0xffffff80, %g0 ! more to csum?
|
andcc %g1, 0xffffff80, %g0 ! more to csum?
|
||||||
@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
|
|||||||
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
|
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
|
||||||
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
|
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
|
||||||
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
|
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
|
||||||
12: EXT(cctbl, 12b) ! note for exception table handling
|
12: addx %g0, %g7, %g7
|
||||||
addx %g0, %g7, %g7
|
|
||||||
andcc %o3, 0xf, %g0 ! check for low bits set
|
andcc %o3, 0xf, %g0 ! check for low bits set
|
||||||
ccte: bne cc_end_cruft ! something left, handle it out of band
|
ccte: bne cc_end_cruft ! something left, handle it out of band
|
||||||
andcc %o3, 8, %g0 ! begin checks for that code
|
andcc %o3, 8, %g0 ! begin checks for that code
|
||||||
@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
|
|||||||
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||||
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||||
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||||
11: EXT(ccdbl, 11b) ! note for exception table handling
|
|
||||||
sub %g1, 128, %g1 ! detract from length
|
sub %g1, 128, %g1 ! detract from length
|
||||||
addx %g0, %g7, %g7 ! add in last carry bit
|
addx %g0, %g7, %g7 ! add in last carry bit
|
||||||
andcc %g1, 0xffffff80, %g0 ! more to csum?
|
andcc %g1, 0xffffff80, %g0 ! more to csum?
|
||||||
|
@ -21,98 +21,134 @@
|
|||||||
/* Work around cpp -rob */
|
/* Work around cpp -rob */
|
||||||
#define ALLOC #alloc
|
#define ALLOC #alloc
|
||||||
#define EXECINSTR #execinstr
|
#define EXECINSTR #execinstr
|
||||||
|
|
||||||
|
#define EX_ENTRY(l1, l2) \
|
||||||
|
.section __ex_table,ALLOC; \
|
||||||
|
.align 4; \
|
||||||
|
.word l1, l2; \
|
||||||
|
.text;
|
||||||
|
|
||||||
#define EX(x,y,a,b) \
|
#define EX(x,y,a,b) \
|
||||||
98: x,y; \
|
98: x,y; \
|
||||||
.section .fixup,ALLOC,EXECINSTR; \
|
.section .fixup,ALLOC,EXECINSTR; \
|
||||||
.align 4; \
|
.align 4; \
|
||||||
99: ba fixupretl; \
|
99: retl; \
|
||||||
a, b, %g3; \
|
a, b, %o0; \
|
||||||
.section __ex_table,ALLOC; \
|
EX_ENTRY(98b, 99b)
|
||||||
.align 4; \
|
|
||||||
.word 98b, 99b; \
|
|
||||||
.text; \
|
|
||||||
.align 4
|
|
||||||
|
|
||||||
#define EX2(x,y,c,d,e,a,b) \
|
#define EX2(x,y,c,d,e,a,b) \
|
||||||
98: x,y; \
|
98: x,y; \
|
||||||
.section .fixup,ALLOC,EXECINSTR; \
|
.section .fixup,ALLOC,EXECINSTR; \
|
||||||
.align 4; \
|
.align 4; \
|
||||||
99: c, d, e; \
|
99: c, d, e; \
|
||||||
ba fixupretl; \
|
retl; \
|
||||||
a, b, %g3; \
|
a, b, %o0; \
|
||||||
.section __ex_table,ALLOC; \
|
EX_ENTRY(98b, 99b)
|
||||||
.align 4; \
|
|
||||||
.word 98b, 99b; \
|
|
||||||
.text; \
|
|
||||||
.align 4
|
|
||||||
|
|
||||||
#define EXO2(x,y) \
|
#define EXO2(x,y) \
|
||||||
98: x, y; \
|
98: x, y; \
|
||||||
.section __ex_table,ALLOC; \
|
EX_ENTRY(98b, 97f)
|
||||||
.align 4; \
|
|
||||||
.word 98b, 97f; \
|
|
||||||
.text; \
|
|
||||||
.align 4
|
|
||||||
|
|
||||||
#define EXT(start,end,handler) \
|
#define LD(insn, src, offset, reg, label) \
|
||||||
.section __ex_table,ALLOC; \
|
98: insn [%src + (offset)], %reg; \
|
||||||
.align 4; \
|
.section .fixup,ALLOC,EXECINSTR; \
|
||||||
.word start, 0, end, handler; \
|
99: ba label; \
|
||||||
.text; \
|
mov offset, %g5; \
|
||||||
.align 4
|
EX_ENTRY(98b, 99b)
|
||||||
|
|
||||||
/* Please do not change following macros unless you change logic used
|
#define ST(insn, dst, offset, reg, label) \
|
||||||
* in .fixup at the end of this file as well
|
98: insn %reg, [%dst + (offset)]; \
|
||||||
*/
|
.section .fixup,ALLOC,EXECINSTR; \
|
||||||
|
99: ba label; \
|
||||||
|
mov offset, %g5; \
|
||||||
|
EX_ENTRY(98b, 99b)
|
||||||
|
|
||||||
/* Both these macros have to start with exactly the same insn */
|
/* Both these macros have to start with exactly the same insn */
|
||||||
|
/* left: g7 + (g1 % 128) - offset */
|
||||||
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||||
ldd [%src + (offset) + 0x00], %t0; \
|
LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
|
||||||
ldd [%src + (offset) + 0x08], %t2; \
|
LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
|
||||||
ldd [%src + (offset) + 0x10], %t4; \
|
LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
|
||||||
ldd [%src + (offset) + 0x18], %t6; \
|
LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
|
||||||
st %t0, [%dst + (offset) + 0x00]; \
|
ST(st, dst, offset + 0x00, t0, bigchunk_fault) \
|
||||||
st %t1, [%dst + (offset) + 0x04]; \
|
ST(st, dst, offset + 0x04, t1, bigchunk_fault) \
|
||||||
st %t2, [%dst + (offset) + 0x08]; \
|
ST(st, dst, offset + 0x08, t2, bigchunk_fault) \
|
||||||
st %t3, [%dst + (offset) + 0x0c]; \
|
ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \
|
||||||
st %t4, [%dst + (offset) + 0x10]; \
|
ST(st, dst, offset + 0x10, t4, bigchunk_fault) \
|
||||||
st %t5, [%dst + (offset) + 0x14]; \
|
ST(st, dst, offset + 0x14, t5, bigchunk_fault) \
|
||||||
st %t6, [%dst + (offset) + 0x18]; \
|
ST(st, dst, offset + 0x18, t6, bigchunk_fault) \
|
||||||
st %t7, [%dst + (offset) + 0x1c];
|
ST(st, dst, offset + 0x1c, t7, bigchunk_fault)
|
||||||
|
|
||||||
|
/* left: g7 + (g1 % 128) - offset */
|
||||||
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||||
ldd [%src + (offset) + 0x00], %t0; \
|
LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
|
||||||
ldd [%src + (offset) + 0x08], %t2; \
|
LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
|
||||||
ldd [%src + (offset) + 0x10], %t4; \
|
LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
|
||||||
ldd [%src + (offset) + 0x18], %t6; \
|
LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
|
||||||
std %t0, [%dst + (offset) + 0x00]; \
|
ST(std, dst, offset + 0x00, t0, bigchunk_fault) \
|
||||||
std %t2, [%dst + (offset) + 0x08]; \
|
ST(std, dst, offset + 0x08, t2, bigchunk_fault) \
|
||||||
std %t4, [%dst + (offset) + 0x10]; \
|
ST(std, dst, offset + 0x10, t4, bigchunk_fault) \
|
||||||
std %t6, [%dst + (offset) + 0x18];
|
ST(std, dst, offset + 0x18, t6, bigchunk_fault)
|
||||||
|
|
||||||
|
.section .fixup,#alloc,#execinstr
|
||||||
|
bigchunk_fault:
|
||||||
|
sub %g7, %g5, %o0
|
||||||
|
and %g1, 127, %g1
|
||||||
|
retl
|
||||||
|
add %o0, %g1, %o0
|
||||||
|
|
||||||
|
/* left: offset + 16 + (g1 % 16) */
|
||||||
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
||||||
ldd [%src - (offset) - 0x10], %t0; \
|
LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \
|
||||||
ldd [%src - (offset) - 0x08], %t2; \
|
LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \
|
||||||
st %t0, [%dst - (offset) - 0x10]; \
|
ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \
|
||||||
st %t1, [%dst - (offset) - 0x0c]; \
|
ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \
|
||||||
st %t2, [%dst - (offset) - 0x08]; \
|
ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \
|
||||||
st %t3, [%dst - (offset) - 0x04];
|
ST(st, dst, -(offset + 0x04), t3, lastchunk_fault)
|
||||||
|
|
||||||
|
.section .fixup,#alloc,#execinstr
|
||||||
|
lastchunk_fault:
|
||||||
|
and %g1, 15, %g1
|
||||||
|
retl
|
||||||
|
sub %g1, %g5, %o0
|
||||||
|
|
||||||
|
/* left: o3 + (o2 % 16) - offset */
|
||||||
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
||||||
lduh [%src + (offset) + 0x00], %t0; \
|
LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \
|
||||||
lduh [%src + (offset) + 0x02], %t1; \
|
LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \
|
||||||
lduh [%src + (offset) + 0x04], %t2; \
|
LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \
|
||||||
lduh [%src + (offset) + 0x06], %t3; \
|
LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \
|
||||||
sth %t0, [%dst + (offset) + 0x00]; \
|
ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \
|
||||||
sth %t1, [%dst + (offset) + 0x02]; \
|
ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \
|
||||||
sth %t2, [%dst + (offset) + 0x04]; \
|
ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \
|
||||||
sth %t3, [%dst + (offset) + 0x06];
|
ST(sth, dst, offset + 0x06, t3, halfchunk_fault)
|
||||||
|
|
||||||
|
/* left: o3 + (o2 % 16) + offset + 2 */
|
||||||
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
|
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
|
||||||
ldub [%src - (offset) - 0x02], %t0; \
|
LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \
|
||||||
ldub [%src - (offset) - 0x01], %t1; \
|
LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \
|
||||||
stb %t0, [%dst - (offset) - 0x02]; \
|
ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \
|
||||||
stb %t1, [%dst - (offset) - 0x01];
|
ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault)
|
||||||
|
|
||||||
|
.section .fixup,#alloc,#execinstr
|
||||||
|
halfchunk_fault:
|
||||||
|
and %o2, 15, %o2
|
||||||
|
sub %o3, %g5, %o3
|
||||||
|
retl
|
||||||
|
add %o2, %o3, %o0
|
||||||
|
|
||||||
|
/* left: offset + 2 + (o2 % 2) */
|
||||||
|
#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \
|
||||||
|
LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \
|
||||||
|
LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \
|
||||||
|
ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \
|
||||||
|
ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault)
|
||||||
|
|
||||||
|
.section .fixup,#alloc,#execinstr
|
||||||
|
last_shortchunk_fault:
|
||||||
|
and %o2, 1, %o2
|
||||||
|
retl
|
||||||
|
sub %o2, %g5, %o0
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.align 4
|
.align 4
|
||||||
@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
|
|||||||
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||||
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||||
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||||
80:
|
|
||||||
EXT(5b, 80b, 50f)
|
|
||||||
subcc %g7, 128, %g7
|
subcc %g7, 128, %g7
|
||||||
add %o1, 128, %o1
|
add %o1, 128, %o1
|
||||||
bne 5b
|
bne 5b
|
||||||
@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
|
|||||||
jmpl %o5 + %lo(copy_user_table_end), %g0
|
jmpl %o5 + %lo(copy_user_table_end), %g0
|
||||||
add %o0, %g7, %o0
|
add %o0, %g7, %o0
|
||||||
|
|
||||||
copy_user_table:
|
|
||||||
MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
|
MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
|
||||||
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
|
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
|
||||||
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
|
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
|
||||||
@ -210,7 +243,6 @@ copy_user_table:
|
|||||||
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
|
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
|
||||||
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
||||||
copy_user_table_end:
|
copy_user_table_end:
|
||||||
EXT(copy_user_table, copy_user_table_end, 51f)
|
|
||||||
be copy_user_last7
|
be copy_user_last7
|
||||||
andcc %g1, 4, %g0
|
andcc %g1, 4, %g0
|
||||||
|
|
||||||
@ -250,8 +282,6 @@ ldd_std:
|
|||||||
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||||
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||||
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||||
81:
|
|
||||||
EXT(ldd_std, 81b, 52f)
|
|
||||||
subcc %g7, 128, %g7
|
subcc %g7, 128, %g7
|
||||||
add %o1, 128, %o1
|
add %o1, 128, %o1
|
||||||
bne ldd_std
|
bne ldd_std
|
||||||
@ -290,8 +320,6 @@ cannot_optimize:
|
|||||||
10:
|
10:
|
||||||
MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
||||||
MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
|
MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
|
||||||
82:
|
|
||||||
EXT(10b, 82b, 53f)
|
|
||||||
subcc %o3, 0x10, %o3
|
subcc %o3, 0x10, %o3
|
||||||
add %o1, 0x10, %o1
|
add %o1, 0x10, %o1
|
||||||
bne 10b
|
bne 10b
|
||||||
@ -308,8 +336,6 @@ byte_chunk:
|
|||||||
MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
|
MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
|
MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
|
MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
|
||||||
83:
|
|
||||||
EXT(byte_chunk, 83b, 54f)
|
|
||||||
subcc %o3, 0x10, %o3
|
subcc %o3, 0x10, %o3
|
||||||
add %o1, 0x10, %o1
|
add %o1, 0x10, %o1
|
||||||
bne byte_chunk
|
bne byte_chunk
|
||||||
@ -325,16 +351,14 @@ short_end:
|
|||||||
add %o1, %o3, %o1
|
add %o1, %o3, %o1
|
||||||
jmpl %o5 + %lo(short_table_end), %g0
|
jmpl %o5 + %lo(short_table_end), %g0
|
||||||
andcc %o2, 1, %g0
|
andcc %o2, 1, %g0
|
||||||
84:
|
MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
|
MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
|
MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
|
MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
|
MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
|
MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
|
MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3)
|
||||||
MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
|
|
||||||
short_table_end:
|
short_table_end:
|
||||||
EXT(84b, short_table_end, 55f)
|
|
||||||
be 1f
|
be 1f
|
||||||
nop
|
nop
|
||||||
EX(ldub [%o1], %g2, add %g0, 1)
|
EX(ldub [%o1], %g2, add %g0, 1)
|
||||||
@ -363,123 +387,8 @@ short_aligned_end:
|
|||||||
.section .fixup,#alloc,#execinstr
|
.section .fixup,#alloc,#execinstr
|
||||||
.align 4
|
.align 4
|
||||||
97:
|
97:
|
||||||
mov %o2, %g3
|
|
||||||
fixupretl:
|
|
||||||
retl
|
retl
|
||||||
mov %g3, %o0
|
mov %o2, %o0
|
||||||
|
|
||||||
/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
|
|
||||||
50:
|
|
||||||
/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
|
|
||||||
* happens. This is derived from the amount ldd reads, st stores, etc.
|
|
||||||
* x = g2 % 12;
|
|
||||||
* g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
|
|
||||||
* o0 += (g2 / 12) * 32;
|
|
||||||
*/
|
|
||||||
cmp %g2, 12
|
|
||||||
add %o0, %g7, %o0
|
|
||||||
bcs 1f
|
|
||||||
cmp %g2, 24
|
|
||||||
bcs 2f
|
|
||||||
cmp %g2, 36
|
|
||||||
bcs 3f
|
|
||||||
nop
|
|
||||||
sub %g2, 12, %g2
|
|
||||||
sub %g7, 32, %g7
|
|
||||||
3: sub %g2, 12, %g2
|
|
||||||
sub %g7, 32, %g7
|
|
||||||
2: sub %g2, 12, %g2
|
|
||||||
sub %g7, 32, %g7
|
|
||||||
1: cmp %g2, 4
|
|
||||||
bcs,a 60f
|
|
||||||
clr %g2
|
|
||||||
sub %g2, 4, %g2
|
|
||||||
sll %g2, 2, %g2
|
|
||||||
60: and %g1, 0x7f, %g3
|
|
||||||
sub %o0, %g7, %o0
|
|
||||||
add %g3, %g7, %g3
|
|
||||||
ba fixupretl
|
|
||||||
sub %g3, %g2, %g3
|
|
||||||
51:
|
|
||||||
/* i = 41 - g2; j = i % 6;
|
|
||||||
* g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
|
|
||||||
* o0 -= (i / 6) * 16 + 16;
|
|
||||||
*/
|
|
||||||
neg %g2
|
|
||||||
and %g1, 0xf, %g1
|
|
||||||
add %g2, 41, %g2
|
|
||||||
add %o0, %g1, %o0
|
|
||||||
1: cmp %g2, 6
|
|
||||||
bcs,a 2f
|
|
||||||
cmp %g2, 4
|
|
||||||
add %g1, 16, %g1
|
|
||||||
b 1b
|
|
||||||
sub %g2, 6, %g2
|
|
||||||
2: bcc,a 2f
|
|
||||||
mov 16, %g2
|
|
||||||
inc %g2
|
|
||||||
sll %g2, 2, %g2
|
|
||||||
2: add %g1, %g2, %g3
|
|
||||||
ba fixupretl
|
|
||||||
sub %o0, %g3, %o0
|
|
||||||
52:
|
|
||||||
/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
|
|
||||||
o0 += (g2 / 8) * 32 */
|
|
||||||
andn %g2, 7, %g4
|
|
||||||
add %o0, %g7, %o0
|
|
||||||
andcc %g2, 4, %g0
|
|
||||||
and %g2, 3, %g2
|
|
||||||
sll %g4, 2, %g4
|
|
||||||
sll %g2, 3, %g2
|
|
||||||
bne 60b
|
|
||||||
sub %g7, %g4, %g7
|
|
||||||
ba 60b
|
|
||||||
clr %g2
|
|
||||||
53:
|
|
||||||
/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
|
|
||||||
o0 += (g2 & 8) */
|
|
||||||
and %g2, 3, %g4
|
|
||||||
andcc %g2, 4, %g0
|
|
||||||
and %g2, 8, %g2
|
|
||||||
sll %g4, 1, %g4
|
|
||||||
be 1f
|
|
||||||
add %o0, %g2, %o0
|
|
||||||
add %g2, %g4, %g2
|
|
||||||
1: and %o2, 0xf, %g3
|
|
||||||
add %g3, %o3, %g3
|
|
||||||
ba fixupretl
|
|
||||||
sub %g3, %g2, %g3
|
|
||||||
54:
|
|
||||||
/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
|
|
||||||
o0 += (g2 / 4) * 2 */
|
|
||||||
srl %g2, 2, %o4
|
|
||||||
and %g2, 1, %o5
|
|
||||||
srl %g2, 1, %g2
|
|
||||||
add %o4, %o4, %o4
|
|
||||||
and %o5, %g2, %o5
|
|
||||||
and %o2, 0xf, %o2
|
|
||||||
add %o0, %o4, %o0
|
|
||||||
sub %o3, %o5, %o3
|
|
||||||
sub %o2, %o4, %o2
|
|
||||||
ba fixupretl
|
|
||||||
add %o2, %o3, %g3
|
|
||||||
55:
|
|
||||||
/* i = 27 - g2;
|
|
||||||
g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
|
|
||||||
o0 -= i / 4 * 2 + 1 */
|
|
||||||
neg %g2
|
|
||||||
and %o2, 1, %o2
|
|
||||||
add %g2, 27, %g2
|
|
||||||
srl %g2, 2, %o5
|
|
||||||
andcc %g2, 3, %g0
|
|
||||||
mov 1, %g2
|
|
||||||
add %o5, %o5, %o5
|
|
||||||
be,a 1f
|
|
||||||
clr %g2
|
|
||||||
1: add %g2, %o5, %g3
|
|
||||||
sub %o0, %g3, %o0
|
|
||||||
ba fixupretl
|
|
||||||
add %g3, %o2, %g3
|
|
||||||
|
|
||||||
.globl __copy_user_end
|
.globl __copy_user_end
|
||||||
__copy_user_end:
|
__copy_user_end:
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
98: x,y; \
|
98: x,y; \
|
||||||
.section .fixup,ALLOC,EXECINSTR; \
|
.section .fixup,ALLOC,EXECINSTR; \
|
||||||
.align 4; \
|
.align 4; \
|
||||||
99: ba 30f; \
|
99: retl; \
|
||||||
a, b, %o0; \
|
a, b, %o0; \
|
||||||
.section __ex_table,ALLOC; \
|
.section __ex_table,ALLOC; \
|
||||||
.align 4; \
|
.align 4; \
|
||||||
@ -27,35 +27,44 @@
|
|||||||
.text; \
|
.text; \
|
||||||
.align 4
|
.align 4
|
||||||
|
|
||||||
#define EXT(start,end,handler) \
|
#define STORE(source, base, offset, n) \
|
||||||
|
98: std source, [base + offset + n]; \
|
||||||
|
.section .fixup,ALLOC,EXECINSTR; \
|
||||||
|
.align 4; \
|
||||||
|
99: ba 30f; \
|
||||||
|
sub %o3, n - offset, %o3; \
|
||||||
.section __ex_table,ALLOC; \
|
.section __ex_table,ALLOC; \
|
||||||
.align 4; \
|
.align 4; \
|
||||||
.word start, 0, end, handler; \
|
.word 98b, 99b; \
|
||||||
.text; \
|
.text; \
|
||||||
.align 4
|
.align 4;
|
||||||
|
|
||||||
|
#define STORE_LAST(source, base, offset, n) \
|
||||||
|
EX(std source, [base - offset - n], \
|
||||||
|
add %o1, offset + n);
|
||||||
|
|
||||||
/* Please don't change these macros, unless you change the logic
|
/* Please don't change these macros, unless you change the logic
|
||||||
* in the .fixup section below as well.
|
* in the .fixup section below as well.
|
||||||
* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
|
* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
|
||||||
#define ZERO_BIG_BLOCK(base, offset, source) \
|
#define ZERO_BIG_BLOCK(base, offset, source) \
|
||||||
std source, [base + offset + 0x00]; \
|
STORE(source, base, offset, 0x00); \
|
||||||
std source, [base + offset + 0x08]; \
|
STORE(source, base, offset, 0x08); \
|
||||||
std source, [base + offset + 0x10]; \
|
STORE(source, base, offset, 0x10); \
|
||||||
std source, [base + offset + 0x18]; \
|
STORE(source, base, offset, 0x18); \
|
||||||
std source, [base + offset + 0x20]; \
|
STORE(source, base, offset, 0x20); \
|
||||||
std source, [base + offset + 0x28]; \
|
STORE(source, base, offset, 0x28); \
|
||||||
std source, [base + offset + 0x30]; \
|
STORE(source, base, offset, 0x30); \
|
||||||
std source, [base + offset + 0x38];
|
STORE(source, base, offset, 0x38);
|
||||||
|
|
||||||
#define ZERO_LAST_BLOCKS(base, offset, source) \
|
#define ZERO_LAST_BLOCKS(base, offset, source) \
|
||||||
std source, [base - offset - 0x38]; \
|
STORE_LAST(source, base, offset, 0x38); \
|
||||||
std source, [base - offset - 0x30]; \
|
STORE_LAST(source, base, offset, 0x30); \
|
||||||
std source, [base - offset - 0x28]; \
|
STORE_LAST(source, base, offset, 0x28); \
|
||||||
std source, [base - offset - 0x20]; \
|
STORE_LAST(source, base, offset, 0x20); \
|
||||||
std source, [base - offset - 0x18]; \
|
STORE_LAST(source, base, offset, 0x18); \
|
||||||
std source, [base - offset - 0x10]; \
|
STORE_LAST(source, base, offset, 0x10); \
|
||||||
std source, [base - offset - 0x08]; \
|
STORE_LAST(source, base, offset, 0x08); \
|
||||||
std source, [base - offset - 0x00];
|
STORE_LAST(source, base, offset, 0x00);
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.align 4
|
.align 4
|
||||||
@ -68,8 +77,6 @@ __bzero_begin:
|
|||||||
.globl memset
|
.globl memset
|
||||||
EXPORT_SYMBOL(__bzero)
|
EXPORT_SYMBOL(__bzero)
|
||||||
EXPORT_SYMBOL(memset)
|
EXPORT_SYMBOL(memset)
|
||||||
.globl __memset_start, __memset_end
|
|
||||||
__memset_start:
|
|
||||||
memset:
|
memset:
|
||||||
mov %o0, %g1
|
mov %o0, %g1
|
||||||
mov 1, %g4
|
mov 1, %g4
|
||||||
@ -122,8 +129,6 @@ __bzero:
|
|||||||
ZERO_BIG_BLOCK(%o0, 0x00, %g2)
|
ZERO_BIG_BLOCK(%o0, 0x00, %g2)
|
||||||
subcc %o3, 128, %o3
|
subcc %o3, 128, %o3
|
||||||
ZERO_BIG_BLOCK(%o0, 0x40, %g2)
|
ZERO_BIG_BLOCK(%o0, 0x40, %g2)
|
||||||
11:
|
|
||||||
EXT(10b, 11b, 20f)
|
|
||||||
bne 10b
|
bne 10b
|
||||||
add %o0, 128, %o0
|
add %o0, 128, %o0
|
||||||
|
|
||||||
@ -138,11 +143,9 @@ __bzero:
|
|||||||
jmp %o4
|
jmp %o4
|
||||||
add %o0, %o2, %o0
|
add %o0, %o2, %o0
|
||||||
|
|
||||||
12:
|
|
||||||
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
|
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
|
||||||
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
|
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
|
||||||
13:
|
13:
|
||||||
EXT(12b, 13b, 21f)
|
|
||||||
be 8f
|
be 8f
|
||||||
andcc %o1, 4, %g0
|
andcc %o1, 4, %g0
|
||||||
|
|
||||||
@ -182,37 +185,13 @@ __bzero:
|
|||||||
5:
|
5:
|
||||||
retl
|
retl
|
||||||
clr %o0
|
clr %o0
|
||||||
__memset_end:
|
|
||||||
|
|
||||||
.section .fixup,#alloc,#execinstr
|
.section .fixup,#alloc,#execinstr
|
||||||
.align 4
|
.align 4
|
||||||
20:
|
|
||||||
cmp %g2, 8
|
|
||||||
bleu 1f
|
|
||||||
and %o1, 0x7f, %o1
|
|
||||||
sub %g2, 9, %g2
|
|
||||||
add %o3, 64, %o3
|
|
||||||
1:
|
|
||||||
sll %g2, 3, %g2
|
|
||||||
add %o3, %o1, %o0
|
|
||||||
b 30f
|
|
||||||
sub %o0, %g2, %o0
|
|
||||||
21:
|
|
||||||
mov 8, %o0
|
|
||||||
and %o1, 7, %o1
|
|
||||||
sub %o0, %g2, %o0
|
|
||||||
sll %o0, 3, %o0
|
|
||||||
b 30f
|
|
||||||
add %o0, %o1, %o0
|
|
||||||
30:
|
30:
|
||||||
/* %o4 is faulting address, %o5 is %pc where fault occurred */
|
and %o1, 0x7f, %o1
|
||||||
save %sp, -104, %sp
|
retl
|
||||||
mov %i5, %o0
|
add %o3, %o1, %o0
|
||||||
mov %i7, %o1
|
|
||||||
call lookup_fault
|
|
||||||
mov %i4, %o2
|
|
||||||
ret
|
|
||||||
restore
|
|
||||||
|
|
||||||
.globl __bzero_end
|
.globl __bzero_end
|
||||||
__bzero_end:
|
__bzero_end:
|
||||||
|
@ -8,7 +8,7 @@ ccflags-y := -Werror
|
|||||||
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
|
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
|
||||||
obj-y += fault_$(BITS).o
|
obj-y += fault_$(BITS).o
|
||||||
obj-y += init_$(BITS).o
|
obj-y += init_$(BITS).o
|
||||||
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
|
obj-$(CONFIG_SPARC32) += srmmu.o iommu.o io-unit.o
|
||||||
obj-$(CONFIG_SPARC32) += srmmu_access.o
|
obj-$(CONFIG_SPARC32) += srmmu_access.o
|
||||||
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
|
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
|
||||||
obj-$(CONFIG_SPARC32) += leon_mm.o
|
obj-$(CONFIG_SPARC32) += leon_mm.o
|
||||||
|
@ -1,107 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
|
||||||
/*
|
|
||||||
* linux/arch/sparc/mm/extable.c
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/extable.h>
|
|
||||||
#include <linux/uaccess.h>
|
|
||||||
|
|
||||||
void sort_extable(struct exception_table_entry *start,
|
|
||||||
struct exception_table_entry *finish)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Caller knows they are in a range if ret->fixup == 0 */
|
|
||||||
const struct exception_table_entry *
|
|
||||||
search_extable(const struct exception_table_entry *base,
|
|
||||||
const size_t num,
|
|
||||||
unsigned long value)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Single insn entries are encoded as:
|
|
||||||
* word 1: insn address
|
|
||||||
* word 2: fixup code address
|
|
||||||
*
|
|
||||||
* Range entries are encoded as:
|
|
||||||
* word 1: first insn address
|
|
||||||
* word 2: 0
|
|
||||||
* word 3: last insn address + 4 bytes
|
|
||||||
* word 4: fixup code address
|
|
||||||
*
|
|
||||||
* Deleted entries are encoded as:
|
|
||||||
* word 1: unused
|
|
||||||
* word 2: -1
|
|
||||||
*
|
|
||||||
* See asm/uaccess.h for more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* 1. Try to find an exact match. */
|
|
||||||
for (i = 0; i < num; i++) {
|
|
||||||
if (base[i].fixup == 0) {
|
|
||||||
/* A range entry, skip both parts. */
|
|
||||||
i++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* A deleted entry; see trim_init_extable */
|
|
||||||
if (base[i].fixup == -1)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (base[i].insn == value)
|
|
||||||
return &base[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
/* 2. Try to find a range match. */
|
|
||||||
for (i = 0; i < (num - 1); i++) {
|
|
||||||
if (base[i].fixup)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (base[i].insn <= value && base[i + 1].insn > value)
|
|
||||||
return &base[i];
|
|
||||||
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
|
||||||
/* We could memmove them around; easier to mark the trimmed ones. */
|
|
||||||
void trim_init_extable(struct module *m)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
bool range;
|
|
||||||
|
|
||||||
for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
|
|
||||||
range = m->extable[i].fixup == 0;
|
|
||||||
|
|
||||||
if (within_module_init(m->extable[i].insn, m)) {
|
|
||||||
m->extable[i].fixup = -1;
|
|
||||||
if (range)
|
|
||||||
m->extable[i+1].fixup = -1;
|
|
||||||
}
|
|
||||||
if (range)
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MODULES */
|
|
||||||
|
|
||||||
/* Special extable search, which handles ranges. Returns fixup */
|
|
||||||
unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
|
|
||||||
{
|
|
||||||
const struct exception_table_entry *entry;
|
|
||||||
|
|
||||||
entry = search_exception_tables(addr);
|
|
||||||
if (!entry)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Inside range? Fix g2 and return correct fixup */
|
|
||||||
if (!entry->fixup) {
|
|
||||||
*g2 = (addr - entry->insn) / 4;
|
|
||||||
return (entry + 1)->fixup;
|
|
||||||
}
|
|
||||||
|
|
||||||
return entry->fixup;
|
|
||||||
}
|
|
@ -23,6 +23,7 @@
|
|||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/extable.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/openprom.h>
|
#include <asm/openprom.h>
|
||||||
@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
|
|||||||
die_if_kernel("Oops", regs);
|
die_if_kernel("Oops", regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
|
|
||||||
unsigned long address)
|
|
||||||
{
|
|
||||||
struct pt_regs regs;
|
|
||||||
unsigned long g2;
|
|
||||||
unsigned int insn;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
i = search_extables_range(ret_pc, &g2);
|
|
||||||
switch (i) {
|
|
||||||
case 3:
|
|
||||||
/* load & store will be handled by fixup */
|
|
||||||
return 3;
|
|
||||||
|
|
||||||
case 1:
|
|
||||||
/* store will be handled by fixup, load will bump out */
|
|
||||||
/* for _to_ macros */
|
|
||||||
insn = *((unsigned int *) pc);
|
|
||||||
if ((insn >> 21) & 1)
|
|
||||||
return 1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 2:
|
|
||||||
/* load will be handled by fixup, store will bump out */
|
|
||||||
/* for _from_ macros */
|
|
||||||
insn = *((unsigned int *) pc);
|
|
||||||
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
|
|
||||||
return 2;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(®s, 0, sizeof(regs));
|
|
||||||
regs.pc = pc;
|
|
||||||
regs.npc = pc + 4;
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"rd %%psr, %0\n\t"
|
|
||||||
"nop\n\t"
|
|
||||||
"nop\n\t"
|
|
||||||
"nop\n" : "=r" (regs.psr));
|
|
||||||
unhandled_fault(address, current, ®s);
|
|
||||||
|
|
||||||
/* Not reached */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
show_signal_msg(struct pt_regs *regs, int sig, int code,
|
show_signal_msg(struct pt_regs *regs, int sig, int code,
|
||||||
unsigned long address, struct task_struct *tsk)
|
unsigned long address, struct task_struct *tsk)
|
||||||
@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
|||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
struct mm_struct *mm = tsk->mm;
|
struct mm_struct *mm = tsk->mm;
|
||||||
unsigned int fixup;
|
|
||||||
unsigned long g2;
|
|
||||||
int from_user = !(regs->psr & PSR_PS);
|
int from_user = !(regs->psr & PSR_PS);
|
||||||
int code;
|
int code;
|
||||||
vm_fault_t fault;
|
vm_fault_t fault;
|
||||||
@ -281,30 +232,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
|||||||
|
|
||||||
/* Is this in ex_table? */
|
/* Is this in ex_table? */
|
||||||
no_context:
|
no_context:
|
||||||
g2 = regs->u_regs[UREG_G2];
|
|
||||||
if (!from_user) {
|
if (!from_user) {
|
||||||
fixup = search_extables_range(regs->pc, &g2);
|
const struct exception_table_entry *entry;
|
||||||
/* Values below 10 are reserved for other things */
|
|
||||||
if (fixup > 10) {
|
|
||||||
extern const unsigned int __memset_start[];
|
|
||||||
extern const unsigned int __memset_end[];
|
|
||||||
|
|
||||||
|
entry = search_exception_tables(regs->pc);
|
||||||
#ifdef DEBUG_EXCEPTIONS
|
#ifdef DEBUG_EXCEPTIONS
|
||||||
printk("Exception: PC<%08lx> faddr<%08lx>\n",
|
printk("Exception: PC<%08lx> faddr<%08lx>\n",
|
||||||
regs->pc, address);
|
regs->pc, address);
|
||||||
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
|
printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
|
||||||
regs->pc, fixup, g2);
|
regs->pc, entry->fixup);
|
||||||
#endif
|
#endif
|
||||||
if ((regs->pc >= (unsigned long)__memset_start &&
|
regs->pc = entry->fixup;
|
||||||
regs->pc < (unsigned long)__memset_end)) {
|
regs->npc = regs->pc + 4;
|
||||||
regs->u_regs[UREG_I4] = address;
|
return;
|
||||||
regs->u_regs[UREG_I5] = regs->pc;
|
|
||||||
}
|
|
||||||
regs->u_regs[UREG_G2] = g2;
|
|
||||||
regs->pc = fixup;
|
|
||||||
regs->npc = regs->pc + 4;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unhandled_fault(address, tsk, regs);
|
unhandled_fault(address, tsk, regs);
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
/* fault_32.c - visible as they are called from assembler */
|
/* fault_32.c - visible as they are called from assembler */
|
||||||
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
|
|
||||||
unsigned long address);
|
|
||||||
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||||
unsigned long address);
|
unsigned long address);
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef ARCH_HAS_SORT_EXTABLE
|
|
||||||
#ifndef ARCH_HAS_RELATIVE_EXTABLE
|
#ifndef ARCH_HAS_RELATIVE_EXTABLE
|
||||||
#define swap_ex NULL
|
#define swap_ex NULL
|
||||||
#else
|
#else
|
||||||
@ -88,9 +87,6 @@ void trim_init_extable(struct module *m)
|
|||||||
m->num_exentries--;
|
m->num_exentries--;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MODULES */
|
#endif /* CONFIG_MODULES */
|
||||||
#endif /* !ARCH_HAS_SORT_EXTABLE */
|
|
||||||
|
|
||||||
#ifndef ARCH_HAS_SEARCH_EXTABLE
|
|
||||||
|
|
||||||
static int cmp_ex_search(const void *key, const void *elt)
|
static int cmp_ex_search(const void *key, const void *elt)
|
||||||
{
|
{
|
||||||
@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
|
|||||||
return bsearch(&value, base, num,
|
return bsearch(&value, base, num,
|
||||||
sizeof(struct exception_table_entry), cmp_ex_search);
|
sizeof(struct exception_table_entry), cmp_ex_search);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
Loading…
Reference in New Issue
Block a user