mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 23:39:18 +00:00
Merge branch 'wip-mips-pm' of https://github.com/paulburton/linux into mips-for-linux-next
This commit is contained in:
commit
2e2d663d2d
@ -50,6 +50,8 @@ config MIPS
|
|||||||
select CLONE_BACKWARDS
|
select CLONE_BACKWARDS
|
||||||
select HAVE_DEBUG_STACKOVERFLOW
|
select HAVE_DEBUG_STACKOVERFLOW
|
||||||
select HAVE_CC_STACKPROTECTOR
|
select HAVE_CC_STACKPROTECTOR
|
||||||
|
select CPU_PM if CPU_IDLE
|
||||||
|
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
|
|
||||||
menu "Machine selection"
|
menu "Machine selection"
|
||||||
|
|
||||||
@ -2012,9 +2014,11 @@ config MIPS_CPS
|
|||||||
depends on SYS_SUPPORTS_MIPS_CPS
|
depends on SYS_SUPPORTS_MIPS_CPS
|
||||||
select MIPS_CM
|
select MIPS_CM
|
||||||
select MIPS_CPC
|
select MIPS_CPC
|
||||||
|
select MIPS_CPS_PM if HOTPLUG_CPU
|
||||||
select MIPS_GIC_IPI
|
select MIPS_GIC_IPI
|
||||||
select SMP
|
select SMP
|
||||||
select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
|
select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
|
||||||
|
select SYS_SUPPORTS_HOTPLUG_CPU
|
||||||
select SYS_SUPPORTS_SMP
|
select SYS_SUPPORTS_SMP
|
||||||
select WEAK_ORDERING
|
select WEAK_ORDERING
|
||||||
help
|
help
|
||||||
@ -2024,6 +2028,9 @@ config MIPS_CPS
|
|||||||
no external assistance. It is safe to enable this when hardware
|
no external assistance. It is safe to enable this when hardware
|
||||||
support is unavailable.
|
support is unavailable.
|
||||||
|
|
||||||
|
config MIPS_CPS_PM
|
||||||
|
bool
|
||||||
|
|
||||||
config MIPS_GIC_IPI
|
config MIPS_GIC_IPI
|
||||||
bool
|
bool
|
||||||
|
|
||||||
@ -2633,12 +2640,16 @@ endmenu
|
|||||||
config MIPS_EXTERNAL_TIMER
|
config MIPS_EXTERNAL_TIMER
|
||||||
bool
|
bool
|
||||||
|
|
||||||
if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
|
|
||||||
menu "CPU Power Management"
|
menu "CPU Power Management"
|
||||||
|
|
||||||
|
if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
|
||||||
source "drivers/cpufreq/Kconfig"
|
source "drivers/cpufreq/Kconfig"
|
||||||
endmenu
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
source "drivers/cpuidle/Kconfig"
|
||||||
|
|
||||||
|
endmenu
|
||||||
|
|
||||||
source "net/Kconfig"
|
source "net/Kconfig"
|
||||||
|
|
||||||
source "drivers/Kconfig"
|
source "drivers/Kconfig"
|
||||||
|
@ -4,10 +4,9 @@ CONFIG_CPU_MIPS32_R2=y
|
|||||||
CONFIG_PAGE_SIZE_16KB=y
|
CONFIG_PAGE_SIZE_16KB=y
|
||||||
CONFIG_MIPS_MT_SMP=y
|
CONFIG_MIPS_MT_SMP=y
|
||||||
CONFIG_SCHED_SMT=y
|
CONFIG_SCHED_SMT=y
|
||||||
CONFIG_MIPS_CMP=y
|
CONFIG_MIPS_CPS=y
|
||||||
CONFIG_NR_CPUS=8
|
CONFIG_NR_CPUS=8
|
||||||
CONFIG_HZ_100=y
|
CONFIG_HZ_100=y
|
||||||
CONFIG_LOCALVERSION="cmp"
|
|
||||||
CONFIG_SYSVIPC=y
|
CONFIG_SYSVIPC=y
|
||||||
CONFIG_POSIX_MQUEUE=y
|
CONFIG_POSIX_MQUEUE=y
|
||||||
CONFIG_AUDIT=y
|
CONFIG_AUDIT=y
|
||||||
|
@ -5,10 +5,9 @@ CONFIG_CPU_MIPS32_3_5_FEATURES=y
|
|||||||
CONFIG_PAGE_SIZE_16KB=y
|
CONFIG_PAGE_SIZE_16KB=y
|
||||||
CONFIG_MIPS_MT_SMP=y
|
CONFIG_MIPS_MT_SMP=y
|
||||||
CONFIG_SCHED_SMT=y
|
CONFIG_SCHED_SMT=y
|
||||||
CONFIG_MIPS_CMP=y
|
CONFIG_MIPS_CPS=y
|
||||||
CONFIG_NR_CPUS=8
|
CONFIG_NR_CPUS=8
|
||||||
CONFIG_HZ_100=y
|
CONFIG_HZ_100=y
|
||||||
CONFIG_LOCALVERSION="cmp"
|
|
||||||
CONFIG_SYSVIPC=y
|
CONFIG_SYSVIPC=y
|
||||||
CONFIG_POSIX_MQUEUE=y
|
CONFIG_POSIX_MQUEUE=y
|
||||||
CONFIG_AUDIT=y
|
CONFIG_AUDIT=y
|
||||||
|
@ -113,6 +113,12 @@ unsigned long run_uncached(void *func);
|
|||||||
|
|
||||||
extern void *kmap_coherent(struct page *page, unsigned long addr);
|
extern void *kmap_coherent(struct page *page, unsigned long addr);
|
||||||
extern void kunmap_coherent(void);
|
extern void kunmap_coherent(void);
|
||||||
|
extern void *kmap_noncoherent(struct page *page, unsigned long addr);
|
||||||
|
|
||||||
|
static inline void kunmap_noncoherent(void)
|
||||||
|
{
|
||||||
|
kunmap_coherent();
|
||||||
|
}
|
||||||
|
|
||||||
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
||||||
static inline void flush_kernel_dcache_page(struct page *page)
|
static inline void flush_kernel_dcache_page(struct page *page)
|
||||||
|
@ -380,6 +380,7 @@ extern unsigned int gic_compare_int (void);
|
|||||||
extern cycle_t gic_read_count(void);
|
extern cycle_t gic_read_count(void);
|
||||||
extern cycle_t gic_read_compare(void);
|
extern cycle_t gic_read_compare(void);
|
||||||
extern void gic_write_compare(cycle_t cnt);
|
extern void gic_write_compare(cycle_t cnt);
|
||||||
|
extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
|
||||||
extern void gic_send_ipi(unsigned int intr);
|
extern void gic_send_ipi(unsigned int intr);
|
||||||
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
|
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
|
||||||
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
|
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#ifndef __ASM_IDLE_H
|
#ifndef __ASM_IDLE_H
|
||||||
#define __ASM_IDLE_H
|
#define __ASM_IDLE_H
|
||||||
|
|
||||||
|
#include <linux/cpuidle.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
|
||||||
extern void (*cpu_wait)(void);
|
extern void (*cpu_wait)(void);
|
||||||
@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
|
|||||||
addr < (unsigned long)__pastwait;
|
addr < (unsigned long)__pastwait;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
|
||||||
|
struct cpuidle_driver *drv, int index);
|
||||||
|
|
||||||
|
#define MIPS_CPUIDLE_WAIT_STATE {\
|
||||||
|
.enter = mips_cpuidle_wait_enter,\
|
||||||
|
.exit_latency = 1,\
|
||||||
|
.target_residency = 1,\
|
||||||
|
.power_usage = UINT_MAX,\
|
||||||
|
.flags = CPUIDLE_FLAG_TIME_VALID,\
|
||||||
|
.name = "wait",\
|
||||||
|
.desc = "MIPS wait",\
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __ASM_IDLE_H */
|
#endif /* __ASM_IDLE_H */
|
||||||
|
@ -72,7 +72,12 @@ static inline bool mips_cpc_present(void)
|
|||||||
#define MIPS_CPC_COCB_OFS 0x4000
|
#define MIPS_CPC_COCB_OFS 0x4000
|
||||||
|
|
||||||
/* Macros to ease the creation of register access functions */
|
/* Macros to ease the creation of register access functions */
|
||||||
#define BUILD_CPC_R_(name, off) \
|
#define BUILD_CPC_R_(name, off) \
|
||||||
|
static inline u32 *addr_cpc_##name(void) \
|
||||||
|
{ \
|
||||||
|
return (u32 *)(mips_cpc_base + (off)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
static inline u32 read_cpc_##name(void) \
|
static inline u32 read_cpc_##name(void) \
|
||||||
{ \
|
{ \
|
||||||
return __raw_readl(mips_cpc_base + (off)); \
|
return __raw_readl(mips_cpc_base + (off)); \
|
||||||
@ -147,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10)
|
|||||||
#define CPC_Cx_OTHER_CORENUM_SHF 16
|
#define CPC_Cx_OTHER_CORENUM_SHF 16
|
||||||
#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
|
#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
|
||||||
|
|
||||||
|
#ifdef CONFIG_MIPS_CPC
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mips_cpc_lock_other - lock access to another core
|
||||||
|
* core: the other core to be accessed
|
||||||
|
*
|
||||||
|
* Call before operating upon a core via the 'other' register region in
|
||||||
|
* order to prevent the region being moved during access. Must be followed
|
||||||
|
* by a call to mips_cpc_unlock_other.
|
||||||
|
*/
|
||||||
|
extern void mips_cpc_lock_other(unsigned int core);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mips_cpc_unlock_other - unlock access to another core
|
||||||
|
*
|
||||||
|
* Call after operating upon another core via the 'other' register region.
|
||||||
|
* Must be called after mips_cpc_lock_other.
|
||||||
|
*/
|
||||||
|
extern void mips_cpc_unlock_other(void);
|
||||||
|
|
||||||
|
#else /* !CONFIG_MIPS_CPC */
|
||||||
|
|
||||||
|
static inline void mips_cpc_lock_other(unsigned int core) { }
|
||||||
|
static inline void mips_cpc_unlock_other(void) { }
|
||||||
|
|
||||||
|
#endif /* !CONFIG_MIPS_CPC */
|
||||||
|
|
||||||
#endif /* __MIPS_ASM_MIPS_CPC_H__ */
|
#endif /* __MIPS_ASM_MIPS_CPC_H__ */
|
||||||
|
@ -36,6 +36,8 @@
|
|||||||
|
|
||||||
#define read_c0_tcbind() __read_32bit_c0_register($2, 2)
|
#define read_c0_tcbind() __read_32bit_c0_register($2, 2)
|
||||||
|
|
||||||
|
#define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val)
|
||||||
|
|
||||||
#define read_c0_tccontext() __read_32bit_c0_register($2, 5)
|
#define read_c0_tccontext() __read_32bit_c0_register($2, 5)
|
||||||
#define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
|
#define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
|
||||||
|
|
||||||
|
@ -27,11 +27,15 @@ do { \
|
|||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
|
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||||
|
|
||||||
|
#define TLBMISS_HANDLER_RESTORE() \
|
||||||
|
write_c0_xcontext((unsigned long) smp_processor_id() << \
|
||||||
|
SMP_CPUID_REGSHIFT)
|
||||||
|
|
||||||
#define TLBMISS_HANDLER_SETUP() \
|
#define TLBMISS_HANDLER_SETUP() \
|
||||||
do { \
|
do { \
|
||||||
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
|
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
|
||||||
write_c0_xcontext((unsigned long) smp_processor_id() << \
|
TLBMISS_HANDLER_RESTORE(); \
|
||||||
SMP_CPUID_REGSHIFT); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
|
#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
|
||||||
@ -43,9 +47,12 @@ do { \
|
|||||||
*/
|
*/
|
||||||
extern unsigned long pgd_current[];
|
extern unsigned long pgd_current[];
|
||||||
|
|
||||||
#define TLBMISS_HANDLER_SETUP() \
|
#define TLBMISS_HANDLER_RESTORE() \
|
||||||
write_c0_context((unsigned long) smp_processor_id() << \
|
write_c0_context((unsigned long) smp_processor_id() << \
|
||||||
SMP_CPUID_REGSHIFT); \
|
SMP_CPUID_REGSHIFT)
|
||||||
|
|
||||||
|
#define TLBMISS_HANDLER_SETUP() \
|
||||||
|
TLBMISS_HANDLER_RESTORE(); \
|
||||||
back_to_back_c0_hazard(); \
|
back_to_back_c0_hazard(); \
|
||||||
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
|
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
|
||||||
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
|
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
|
||||||
|
@ -32,6 +32,8 @@ struct vm_area_struct;
|
|||||||
_page_cachable_default)
|
_page_cachable_default)
|
||||||
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
|
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
|
||||||
_PAGE_GLOBAL | _page_cachable_default)
|
_PAGE_GLOBAL | _page_cachable_default)
|
||||||
|
#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
|
||||||
|
_PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
|
||||||
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
|
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
|
||||||
_page_cachable_default)
|
_page_cachable_default)
|
||||||
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
|
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
|
||||||
|
51
arch/mips/include/asm/pm-cps.h
Normal file
51
arch/mips/include/asm/pm-cps.h
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Imagination Technologies
|
||||||
|
* Author: Paul Burton <paul.burton@imgtec.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the
|
||||||
|
* Free Software Foundation; either version 2 of the License, or (at your
|
||||||
|
* option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __MIPS_ASM_PM_CPS_H__
|
||||||
|
#define __MIPS_ASM_PM_CPS_H__
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The CM & CPC can only handle coherence & power control on a per-core basis,
|
||||||
|
* thus in an MT system the VPEs within each core are coupled and can only
|
||||||
|
* enter or exit states requiring CM or CPC assistance in unison.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_MIPS_MT
|
||||||
|
# define coupled_coherence cpu_has_mipsmt
|
||||||
|
#else
|
||||||
|
# define coupled_coherence 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Enumeration of possible PM states */
|
||||||
|
enum cps_pm_state {
|
||||||
|
CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */
|
||||||
|
CPS_PM_CLOCK_GATED, /* Core clock gated */
|
||||||
|
CPS_PM_POWER_GATED, /* Core power gated */
|
||||||
|
CPS_PM_STATE_COUNT,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cps_pm_support_state - determine whether the system supports a PM state
|
||||||
|
* @state: the state to test for support
|
||||||
|
*
|
||||||
|
* Returns true if the system supports the given state, otherwise false.
|
||||||
|
*/
|
||||||
|
extern bool cps_pm_support_state(enum cps_pm_state state);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cps_pm_enter_state - enter a PM state
|
||||||
|
* @state: the state to enter
|
||||||
|
*
|
||||||
|
* Enter the given PM state. If coupled_coherence is non-zero then it is
|
||||||
|
* expected that this function be called at approximately the same time on
|
||||||
|
* each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno.
|
||||||
|
*/
|
||||||
|
extern int cps_pm_enter_state(enum cps_pm_state state);
|
||||||
|
|
||||||
|
#endif /* __MIPS_ASM_PM_CPS_H__ */
|
159
arch/mips/include/asm/pm.h
Normal file
159
arch/mips/include/asm/pm.h
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Imagination Technologies Ltd
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the
|
||||||
|
* Free Software Foundation; either version 2 of the License, or (at your
|
||||||
|
* option) any later version.
|
||||||
|
*
|
||||||
|
* PM helper macros for CPU power off (e.g. Suspend-to-RAM).
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __ASM_PM_H
|
||||||
|
#define __ASM_PM_H
|
||||||
|
|
||||||
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/asm.h>
|
||||||
|
#include <asm/mipsregs.h>
|
||||||
|
#include <asm/regdef.h>
|
||||||
|
|
||||||
|
/* Save CPU state to stack for suspend to RAM */
|
||||||
|
.macro SUSPEND_SAVE_REGS
|
||||||
|
subu sp, PT_SIZE
|
||||||
|
/* Call preserved GPRs */
|
||||||
|
LONG_S $16, PT_R16(sp)
|
||||||
|
LONG_S $17, PT_R17(sp)
|
||||||
|
LONG_S $18, PT_R18(sp)
|
||||||
|
LONG_S $19, PT_R19(sp)
|
||||||
|
LONG_S $20, PT_R20(sp)
|
||||||
|
LONG_S $21, PT_R21(sp)
|
||||||
|
LONG_S $22, PT_R22(sp)
|
||||||
|
LONG_S $23, PT_R23(sp)
|
||||||
|
LONG_S $28, PT_R28(sp)
|
||||||
|
LONG_S $30, PT_R30(sp)
|
||||||
|
LONG_S $31, PT_R31(sp)
|
||||||
|
/* A couple of CP0 registers with space in pt_regs */
|
||||||
|
mfc0 k0, CP0_STATUS
|
||||||
|
LONG_S k0, PT_STATUS(sp)
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* Restore CPU state from stack after resume from RAM */
|
||||||
|
.macro RESUME_RESTORE_REGS_RETURN
|
||||||
|
.set push
|
||||||
|
.set noreorder
|
||||||
|
/* A couple of CP0 registers with space in pt_regs */
|
||||||
|
LONG_L k0, PT_STATUS(sp)
|
||||||
|
mtc0 k0, CP0_STATUS
|
||||||
|
/* Call preserved GPRs */
|
||||||
|
LONG_L $16, PT_R16(sp)
|
||||||
|
LONG_L $17, PT_R17(sp)
|
||||||
|
LONG_L $18, PT_R18(sp)
|
||||||
|
LONG_L $19, PT_R19(sp)
|
||||||
|
LONG_L $20, PT_R20(sp)
|
||||||
|
LONG_L $21, PT_R21(sp)
|
||||||
|
LONG_L $22, PT_R22(sp)
|
||||||
|
LONG_L $23, PT_R23(sp)
|
||||||
|
LONG_L $28, PT_R28(sp)
|
||||||
|
LONG_L $30, PT_R30(sp)
|
||||||
|
LONG_L $31, PT_R31(sp)
|
||||||
|
/* Pop and return */
|
||||||
|
jr ra
|
||||||
|
addiu sp, PT_SIZE
|
||||||
|
.set pop
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* Get address of static suspend state into t1 */
|
||||||
|
.macro LA_STATIC_SUSPEND
|
||||||
|
la t1, mips_static_suspend_state
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* Save important CPU state for early restoration to global data */
|
||||||
|
.macro SUSPEND_SAVE_STATIC
|
||||||
|
#ifdef CONFIG_EVA
|
||||||
|
/*
|
||||||
|
* Segment configuration is saved in global data where it can be easily
|
||||||
|
* reloaded without depending on the segment configuration.
|
||||||
|
*/
|
||||||
|
mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
|
||||||
|
LONG_S k0, SSS_SEGCTL0(t1)
|
||||||
|
mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
|
||||||
|
LONG_S k0, SSS_SEGCTL1(t1)
|
||||||
|
mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
|
||||||
|
LONG_S k0, SSS_SEGCTL2(t1)
|
||||||
|
#endif
|
||||||
|
/* save stack pointer (pointing to GPRs) */
|
||||||
|
LONG_S sp, SSS_SP(t1)
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* Restore important CPU state early from global data */
|
||||||
|
.macro RESUME_RESTORE_STATIC
|
||||||
|
#ifdef CONFIG_EVA
|
||||||
|
/*
|
||||||
|
* Segment configuration must be restored prior to any access to
|
||||||
|
* allocated memory, as it may reside outside of the legacy kernel
|
||||||
|
* segments.
|
||||||
|
*/
|
||||||
|
LONG_L k0, SSS_SEGCTL0(t1)
|
||||||
|
mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
|
||||||
|
LONG_L k0, SSS_SEGCTL1(t1)
|
||||||
|
mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
|
||||||
|
LONG_L k0, SSS_SEGCTL2(t1)
|
||||||
|
mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
|
||||||
|
tlbw_use_hazard
|
||||||
|
#endif
|
||||||
|
/* restore stack pointer (pointing to GPRs) */
|
||||||
|
LONG_L sp, SSS_SP(t1)
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* flush caches to make sure context has reached memory */
|
||||||
|
.macro SUSPEND_CACHE_FLUSH
|
||||||
|
.extern __wback_cache_all
|
||||||
|
.set push
|
||||||
|
.set noreorder
|
||||||
|
la t1, __wback_cache_all
|
||||||
|
LONG_L t0, 0(t1)
|
||||||
|
jalr t0
|
||||||
|
nop
|
||||||
|
.set pop
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* Save suspend state and flush data caches to RAM */
|
||||||
|
.macro SUSPEND_SAVE
|
||||||
|
SUSPEND_SAVE_REGS
|
||||||
|
LA_STATIC_SUSPEND
|
||||||
|
SUSPEND_SAVE_STATIC
|
||||||
|
SUSPEND_CACHE_FLUSH
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* Restore saved state after resume from RAM and return */
|
||||||
|
.macro RESUME_RESTORE_RETURN
|
||||||
|
LA_STATIC_SUSPEND
|
||||||
|
RESUME_RESTORE_STATIC
|
||||||
|
RESUME_RESTORE_REGS_RETURN
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#else /* __ASSEMBLY__ */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct mips_static_suspend_state - Core saved CPU state across S2R.
|
||||||
|
* @segctl: CP0 Segment control registers.
|
||||||
|
* @sp: Stack frame where GP register context is saved.
|
||||||
|
*
|
||||||
|
* This structure contains minimal CPU state that must be saved in static kernel
|
||||||
|
* data in order to be able to restore the rest of the state. This includes
|
||||||
|
* segmentation configuration in the case of EVA being enabled, as they must be
|
||||||
|
* restored prior to any kmalloc'd memory being referenced (even the stack
|
||||||
|
* pointer).
|
||||||
|
*/
|
||||||
|
struct mips_static_suspend_state {
|
||||||
|
#ifdef CONFIG_EVA
|
||||||
|
unsigned long segctl[3];
|
||||||
|
#endif
|
||||||
|
unsigned long sp;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
|
#endif /* __ASM_PM_HELPERS_H */
|
@ -13,17 +13,28 @@
|
|||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
struct boot_config {
|
struct vpe_boot_config {
|
||||||
unsigned int core;
|
|
||||||
unsigned int vpe;
|
|
||||||
unsigned long pc;
|
unsigned long pc;
|
||||||
unsigned long sp;
|
unsigned long sp;
|
||||||
unsigned long gp;
|
unsigned long gp;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct boot_config mips_cps_bootcfg;
|
struct core_boot_config {
|
||||||
|
atomic_t vpe_mask;
|
||||||
|
struct vpe_boot_config *vpe_config;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern struct core_boot_config *mips_cps_core_bootcfg;
|
||||||
|
|
||||||
extern void mips_cps_core_entry(void);
|
extern void mips_cps_core_entry(void);
|
||||||
|
extern void mips_cps_core_init(void);
|
||||||
|
|
||||||
|
extern struct vpe_boot_config *mips_cps_boot_vpes(void);
|
||||||
|
|
||||||
|
extern bool mips_cps_smp_in_use(void);
|
||||||
|
|
||||||
|
extern void mips_cps_pm_save(void);
|
||||||
|
extern void mips_cps_pm_restore(void);
|
||||||
|
|
||||||
#else /* __ASSEMBLY__ */
|
#else /* __ASSEMBLY__ */
|
||||||
|
|
||||||
|
@ -46,6 +46,9 @@ extern int __cpu_logical_map[NR_CPUS];
|
|||||||
|
|
||||||
extern volatile cpumask_t cpu_callin_map;
|
extern volatile cpumask_t cpu_callin_map;
|
||||||
|
|
||||||
|
/* Mask of CPUs which are currently definitely operating coherently */
|
||||||
|
extern cpumask_t cpu_coherent_mask;
|
||||||
|
|
||||||
extern void asmlinkage smp_bootstrap(void);
|
extern void asmlinkage smp_bootstrap(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -74,6 +74,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
|
|||||||
#define Ip_u1u2(op) \
|
#define Ip_u1u2(op) \
|
||||||
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
|
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
|
||||||
|
|
||||||
|
#define Ip_u2u1(op) \
|
||||||
|
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
|
||||||
|
|
||||||
#define Ip_u1s2(op) \
|
#define Ip_u1s2(op) \
|
||||||
void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
|
void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
|
||||||
|
|
||||||
@ -114,6 +117,7 @@ Ip_u2u1msbu3(_ext);
|
|||||||
Ip_u2u1msbu3(_ins);
|
Ip_u2u1msbu3(_ins);
|
||||||
Ip_u1(_j);
|
Ip_u1(_j);
|
||||||
Ip_u1(_jal);
|
Ip_u1(_jal);
|
||||||
|
Ip_u2u1(_jalr);
|
||||||
Ip_u1(_jr);
|
Ip_u1(_jr);
|
||||||
Ip_u2s3u1(_ld);
|
Ip_u2s3u1(_ld);
|
||||||
Ip_u3u1u2(_ldx);
|
Ip_u3u1u2(_ldx);
|
||||||
@ -137,13 +141,16 @@ Ip_u2u1u3(_sra);
|
|||||||
Ip_u2u1u3(_srl);
|
Ip_u2u1u3(_srl);
|
||||||
Ip_u3u1u2(_subu);
|
Ip_u3u1u2(_subu);
|
||||||
Ip_u2s3u1(_sw);
|
Ip_u2s3u1(_sw);
|
||||||
|
Ip_u1(_sync);
|
||||||
Ip_u1(_syscall);
|
Ip_u1(_syscall);
|
||||||
Ip_0(_tlbp);
|
Ip_0(_tlbp);
|
||||||
Ip_0(_tlbr);
|
Ip_0(_tlbr);
|
||||||
Ip_0(_tlbwi);
|
Ip_0(_tlbwi);
|
||||||
Ip_0(_tlbwr);
|
Ip_0(_tlbwr);
|
||||||
|
Ip_u1(_wait);
|
||||||
Ip_u3u1u2(_xor);
|
Ip_u3u1u2(_xor);
|
||||||
Ip_u2u1u3(_xori);
|
Ip_u2u1u3(_xori);
|
||||||
|
Ip_u2u1(_yield);
|
||||||
|
|
||||||
|
|
||||||
/* Handle labels. */
|
/* Handle labels. */
|
||||||
@ -264,6 +271,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
|||||||
unsigned int bit, int lid);
|
unsigned int bit, int lid);
|
||||||
void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||||
unsigned int bit, int lid);
|
unsigned int bit, int lid);
|
||||||
|
void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
|
||||||
|
unsigned int r2, int lid);
|
||||||
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||||
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||||
void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||||
|
@ -76,16 +76,17 @@ enum spec2_op {
|
|||||||
enum spec3_op {
|
enum spec3_op {
|
||||||
ext_op, dextm_op, dextu_op, dext_op,
|
ext_op, dextm_op, dextu_op, dext_op,
|
||||||
ins_op, dinsm_op, dinsu_op, dins_op,
|
ins_op, dinsm_op, dinsu_op, dins_op,
|
||||||
lx_op = 0x0a, lwle_op = 0x19,
|
yield_op = 0x09, lx_op = 0x0a,
|
||||||
lwre_op = 0x1a, cachee_op = 0x1b,
|
lwle_op = 0x19, lwre_op = 0x1a,
|
||||||
sbe_op = 0x1c, she_op = 0x1d,
|
cachee_op = 0x1b, sbe_op = 0x1c,
|
||||||
sce_op = 0x1e, swe_op = 0x1f,
|
she_op = 0x1d, sce_op = 0x1e,
|
||||||
bshfl_op = 0x20, swle_op = 0x21,
|
swe_op = 0x1f, bshfl_op = 0x20,
|
||||||
swre_op = 0x22, prefe_op = 0x23,
|
swle_op = 0x21, swre_op = 0x22,
|
||||||
dbshfl_op = 0x24, lbue_op = 0x28,
|
prefe_op = 0x23, dbshfl_op = 0x24,
|
||||||
lhue_op = 0x29, lbe_op = 0x2c,
|
lbue_op = 0x28, lhue_op = 0x29,
|
||||||
lhe_op = 0x2d, lle_op = 0x2e,
|
lbe_op = 0x2c, lhe_op = 0x2d,
|
||||||
lwe_op = 0x2f, rdhwr_op = 0x3b
|
lle_op = 0x2e, lwe_op = 0x2f,
|
||||||
|
rdhwr_op = 0x3b
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -127,7 +128,8 @@ enum bcop_op {
|
|||||||
enum cop0_coi_func {
|
enum cop0_coi_func {
|
||||||
tlbr_op = 0x01, tlbwi_op = 0x02,
|
tlbr_op = 0x01, tlbwi_op = 0x02,
|
||||||
tlbwr_op = 0x06, tlbp_op = 0x08,
|
tlbwr_op = 0x06, tlbp_op = 0x08,
|
||||||
rfe_op = 0x10, eret_op = 0x18
|
rfe_op = 0x10, eret_op = 0x18,
|
||||||
|
wait_op = 0x20,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -303,7 +305,9 @@ enum mm_32axf_minor_op {
|
|||||||
mm_tlbwr_op = 0x0cd,
|
mm_tlbwr_op = 0x0cd,
|
||||||
mm_jalrs_op = 0x13c,
|
mm_jalrs_op = 0x13c,
|
||||||
mm_jalrshb_op = 0x17c,
|
mm_jalrshb_op = 0x17c,
|
||||||
|
mm_sync_op = 0x1ad,
|
||||||
mm_syscall_op = 0x22d,
|
mm_syscall_op = 0x22d,
|
||||||
|
mm_wait_op = 0x24d,
|
||||||
mm_eret_op = 0x3cd,
|
mm_eret_op = 0x3cd,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -105,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
|||||||
obj-$(CONFIG_MIPS_CM) += mips-cm.o
|
obj-$(CONFIG_MIPS_CM) += mips-cm.o
|
||||||
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
|
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
|
||||||
|
|
||||||
|
obj-$(CONFIG_CPU_PM) += pm.o
|
||||||
|
obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
|
||||||
|
|
||||||
#
|
#
|
||||||
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
|
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
|
||||||
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
|
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/kbuild.h>
|
#include <linux/kbuild.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
|
#include <asm/pm.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/smp-cps.h>
|
#include <asm/smp-cps.h>
|
||||||
@ -401,6 +402,20 @@ void output_pbe_defines(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_PM
|
||||||
|
void output_pm_defines(void)
|
||||||
|
{
|
||||||
|
COMMENT(" PM offsets. ");
|
||||||
|
#ifdef CONFIG_EVA
|
||||||
|
OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
|
||||||
|
OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
|
||||||
|
OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
|
||||||
|
#endif
|
||||||
|
OFFSET(SSS_SP, mips_static_suspend_state, sp);
|
||||||
|
BLANK();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void output_kvm_defines(void)
|
void output_kvm_defines(void)
|
||||||
{
|
{
|
||||||
COMMENT(" KVM/MIPS Specfic offsets. ");
|
COMMENT(" KVM/MIPS Specfic offsets. ");
|
||||||
@ -469,10 +484,14 @@ void output_kvm_defines(void)
|
|||||||
void output_cps_defines(void)
|
void output_cps_defines(void)
|
||||||
{
|
{
|
||||||
COMMENT(" MIPS CPS offsets. ");
|
COMMENT(" MIPS CPS offsets. ");
|
||||||
OFFSET(BOOTCFG_CORE, boot_config, core);
|
|
||||||
OFFSET(BOOTCFG_VPE, boot_config, vpe);
|
OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
|
||||||
OFFSET(BOOTCFG_PC, boot_config, pc);
|
OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
|
||||||
OFFSET(BOOTCFG_SP, boot_config, sp);
|
DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
|
||||||
OFFSET(BOOTCFG_GP, boot_config, gp);
|
|
||||||
|
OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
|
||||||
|
OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
|
||||||
|
OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
|
||||||
|
DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
|
|||||||
|
|
||||||
cnt = gic_read_count();
|
cnt = gic_read_count();
|
||||||
cnt += (u64)delta;
|
cnt += (u64)delta;
|
||||||
gic_write_compare(cnt);
|
gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
|
||||||
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
|
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -73,7 +73,8 @@ int gic_clockevent_init(void)
|
|||||||
cd = &per_cpu(gic_clockevent_device, cpu);
|
cd = &per_cpu(gic_clockevent_device, cpu);
|
||||||
|
|
||||||
cd->name = "MIPS GIC";
|
cd->name = "MIPS GIC";
|
||||||
cd->features = CLOCK_EVT_FEAT_ONESHOT;
|
cd->features = CLOCK_EVT_FEAT_ONESHOT |
|
||||||
|
CLOCK_EVT_FEAT_C3STOP;
|
||||||
|
|
||||||
clockevent_set_clock(cd, gic_frequency);
|
clockevent_set_clock(cd, gic_frequency);
|
||||||
|
|
||||||
|
@ -62,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
|
|||||||
/* Clear Count/Compare Interrupt */
|
/* Clear Count/Compare Interrupt */
|
||||||
write_c0_compare(read_c0_compare());
|
write_c0_compare(read_c0_compare());
|
||||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||||
#ifdef CONFIG_CEVT_GIC
|
|
||||||
if (!gic_present)
|
|
||||||
#endif
|
|
||||||
cd->event_handler(cd);
|
cd->event_handler(cd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,7 +179,9 @@ int r4k_clockevent_init(void)
|
|||||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||||
|
|
||||||
cd->name = "MIPS";
|
cd->name = "MIPS";
|
||||||
cd->features = CLOCK_EVT_FEAT_ONESHOT;
|
cd->features = CLOCK_EVT_FEAT_ONESHOT |
|
||||||
|
CLOCK_EVT_FEAT_C3STOP |
|
||||||
|
CLOCK_EVT_FEAT_PERCPU;
|
||||||
|
|
||||||
clockevent_set_clock(cd, mips_hpt_frequency);
|
clockevent_set_clock(cd, mips_hpt_frequency);
|
||||||
|
|
||||||
@ -197,9 +196,6 @@ int r4k_clockevent_init(void)
|
|||||||
cd->set_mode = mips_set_clock_mode;
|
cd->set_mode = mips_set_clock_mode;
|
||||||
cd->event_handler = mips_event_handler;
|
cd->event_handler = mips_event_handler;
|
||||||
|
|
||||||
#ifdef CONFIG_CEVT_GIC
|
|
||||||
if (!gic_present)
|
|
||||||
#endif
|
|
||||||
clockevents_register_device(cd);
|
clockevents_register_device(cd);
|
||||||
|
|
||||||
if (cp0_timer_irq_installed)
|
if (cp0_timer_irq_installed)
|
||||||
|
@ -14,19 +14,43 @@
|
|||||||
#include <asm/asmmacro.h>
|
#include <asm/asmmacro.h>
|
||||||
#include <asm/cacheops.h>
|
#include <asm/cacheops.h>
|
||||||
#include <asm/mipsregs.h>
|
#include <asm/mipsregs.h>
|
||||||
|
#include <asm/mipsmtregs.h>
|
||||||
|
#include <asm/pm.h>
|
||||||
|
|
||||||
#define GCR_CL_COHERENCE_OFS 0x2008
|
#define GCR_CL_COHERENCE_OFS 0x2008
|
||||||
|
#define GCR_CL_ID_OFS 0x2028
|
||||||
|
|
||||||
|
.extern mips_cm_base
|
||||||
|
|
||||||
|
.set noreorder
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set dest to non-zero if the core supports the MT ASE, else zero. If
|
||||||
|
* MT is not supported then branch to nomt.
|
||||||
|
*/
|
||||||
|
.macro has_mt dest, nomt
|
||||||
|
mfc0 \dest, CP0_CONFIG
|
||||||
|
bgez \dest, \nomt
|
||||||
|
mfc0 \dest, CP0_CONFIG, 1
|
||||||
|
bgez \dest, \nomt
|
||||||
|
mfc0 \dest, CP0_CONFIG, 2
|
||||||
|
bgez \dest, \nomt
|
||||||
|
mfc0 \dest, CP0_CONFIG, 3
|
||||||
|
andi \dest, \dest, MIPS_CONF3_MT
|
||||||
|
beqz \dest, \nomt
|
||||||
|
.endm
|
||||||
|
|
||||||
.section .text.cps-vec
|
.section .text.cps-vec
|
||||||
.balign 0x1000
|
.balign 0x1000
|
||||||
.set noreorder
|
|
||||||
|
|
||||||
LEAF(mips_cps_core_entry)
|
LEAF(mips_cps_core_entry)
|
||||||
/*
|
/*
|
||||||
* These first 8 bytes will be patched by cps_smp_setup to load the
|
* These first 12 bytes will be patched by cps_smp_setup to load the
|
||||||
* base address of the CM GCRs into register v1.
|
* base address of the CM GCRs into register v1 and the CCA to use into
|
||||||
|
* register s0.
|
||||||
*/
|
*/
|
||||||
.quad 0
|
.quad 0
|
||||||
|
.word 0
|
||||||
|
|
||||||
/* Check whether we're here due to an NMI */
|
/* Check whether we're here due to an NMI */
|
||||||
mfc0 k0, CP0_STATUS
|
mfc0 k0, CP0_STATUS
|
||||||
@ -117,10 +141,11 @@ icache_done:
|
|||||||
add a0, a0, t0
|
add a0, a0, t0
|
||||||
dcache_done:
|
dcache_done:
|
||||||
|
|
||||||
/* Set Kseg0 cacheable, coherent, write-back, write-allocate */
|
/* Set Kseg0 CCA to that in s0 */
|
||||||
mfc0 t0, CP0_CONFIG
|
mfc0 t0, CP0_CONFIG
|
||||||
ori t0, 0x7
|
ori t0, 0x7
|
||||||
xori t0, 0x2
|
xori t0, 0x7
|
||||||
|
or t0, t0, s0
|
||||||
mtc0 t0, CP0_CONFIG
|
mtc0 t0, CP0_CONFIG
|
||||||
ehb
|
ehb
|
||||||
|
|
||||||
@ -134,21 +159,24 @@ dcache_done:
|
|||||||
jr t0
|
jr t0
|
||||||
nop
|
nop
|
||||||
|
|
||||||
1: /* We're up, cached & coherent */
|
/*
|
||||||
|
* We're up, cached & coherent. Perform any further required core-level
|
||||||
|
* initialisation.
|
||||||
|
*/
|
||||||
|
1: jal mips_cps_core_init
|
||||||
|
nop
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: We should check the VPE number we intended to boot here, and
|
* Boot any other VPEs within this core that should be online, and
|
||||||
* if non-zero we should start that VPE and stop this one. For
|
* deactivate this VPE if it should be offline.
|
||||||
* the moment this doesn't matter since CPUs are brought up
|
|
||||||
* sequentially and in order, but once hotplug is implemented
|
|
||||||
* this will need revisiting.
|
|
||||||
*/
|
*/
|
||||||
|
jal mips_cps_boot_vpes
|
||||||
|
nop
|
||||||
|
|
||||||
/* Off we go! */
|
/* Off we go! */
|
||||||
la t0, mips_cps_bootcfg
|
lw t1, VPEBOOTCFG_PC(v0)
|
||||||
lw t1, BOOTCFG_PC(t0)
|
lw gp, VPEBOOTCFG_GP(v0)
|
||||||
lw gp, BOOTCFG_GP(t0)
|
lw sp, VPEBOOTCFG_SP(v0)
|
||||||
lw sp, BOOTCFG_SP(t0)
|
|
||||||
jr t1
|
jr t1
|
||||||
nop
|
nop
|
||||||
END(mips_cps_core_entry)
|
END(mips_cps_core_entry)
|
||||||
@ -189,3 +217,271 @@ LEAF(excep_ejtag)
|
|||||||
jr k0
|
jr k0
|
||||||
nop
|
nop
|
||||||
END(excep_ejtag)
|
END(excep_ejtag)
|
||||||
|
|
||||||
|
LEAF(mips_cps_core_init)
|
||||||
|
#ifdef CONFIG_MIPS_MT
|
||||||
|
/* Check that the core implements the MT ASE */
|
||||||
|
has_mt t0, 3f
|
||||||
|
nop
|
||||||
|
|
||||||
|
.set push
|
||||||
|
.set mt
|
||||||
|
|
||||||
|
/* Only allow 1 TC per VPE to execute... */
|
||||||
|
dmt
|
||||||
|
|
||||||
|
/* ...and for the moment only 1 VPE */
|
||||||
|
dvpe
|
||||||
|
la t1, 1f
|
||||||
|
jr.hb t1
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Enter VPE configuration state */
|
||||||
|
1: mfc0 t0, CP0_MVPCONTROL
|
||||||
|
ori t0, t0, MVPCONTROL_VPC
|
||||||
|
mtc0 t0, CP0_MVPCONTROL
|
||||||
|
|
||||||
|
/* Retrieve the number of VPEs within the core */
|
||||||
|
mfc0 t0, CP0_MVPCONF0
|
||||||
|
srl t0, t0, MVPCONF0_PVPE_SHIFT
|
||||||
|
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
|
||||||
|
addi t7, t0, 1
|
||||||
|
|
||||||
|
/* If there's only 1, we're done */
|
||||||
|
beqz t0, 2f
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Loop through each VPE within this core */
|
||||||
|
li t5, 1
|
||||||
|
|
||||||
|
1: /* Operate on the appropriate TC */
|
||||||
|
mtc0 t5, CP0_VPECONTROL
|
||||||
|
ehb
|
||||||
|
|
||||||
|
/* Bind TC to VPE (1:1 TC:VPE mapping) */
|
||||||
|
mttc0 t5, CP0_TCBIND
|
||||||
|
|
||||||
|
/* Set exclusive TC, non-active, master */
|
||||||
|
li t0, VPECONF0_MVP
|
||||||
|
sll t1, t5, VPECONF0_XTC_SHIFT
|
||||||
|
or t0, t0, t1
|
||||||
|
mttc0 t0, CP0_VPECONF0
|
||||||
|
|
||||||
|
/* Set TC non-active, non-allocatable */
|
||||||
|
mttc0 zero, CP0_TCSTATUS
|
||||||
|
|
||||||
|
/* Set TC halted */
|
||||||
|
li t0, TCHALT_H
|
||||||
|
mttc0 t0, CP0_TCHALT
|
||||||
|
|
||||||
|
/* Next VPE */
|
||||||
|
addi t5, t5, 1
|
||||||
|
slt t0, t5, t7
|
||||||
|
bnez t0, 1b
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Leave VPE configuration state */
|
||||||
|
2: mfc0 t0, CP0_MVPCONTROL
|
||||||
|
xori t0, t0, MVPCONTROL_VPC
|
||||||
|
mtc0 t0, CP0_MVPCONTROL
|
||||||
|
|
||||||
|
3: .set pop
|
||||||
|
#endif
|
||||||
|
jr ra
|
||||||
|
nop
|
||||||
|
END(mips_cps_core_init)
|
||||||
|
|
||||||
|
LEAF(mips_cps_boot_vpes)
|
||||||
|
/* Retrieve CM base address */
|
||||||
|
la t0, mips_cm_base
|
||||||
|
lw t0, 0(t0)
|
||||||
|
|
||||||
|
/* Calculate a pointer to this cores struct core_boot_config */
|
||||||
|
lw t0, GCR_CL_ID_OFS(t0)
|
||||||
|
li t1, COREBOOTCFG_SIZE
|
||||||
|
mul t0, t0, t1
|
||||||
|
la t1, mips_cps_core_bootcfg
|
||||||
|
lw t1, 0(t1)
|
||||||
|
addu t0, t0, t1
|
||||||
|
|
||||||
|
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
|
||||||
|
has_mt t6, 1f
|
||||||
|
li t9, 0
|
||||||
|
|
||||||
|
/* Find the number of VPEs present in the core */
|
||||||
|
mfc0 t1, CP0_MVPCONF0
|
||||||
|
srl t1, t1, MVPCONF0_PVPE_SHIFT
|
||||||
|
andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
|
||||||
|
addi t1, t1, 1
|
||||||
|
|
||||||
|
/* Calculate a mask for the VPE ID from EBase.CPUNum */
|
||||||
|
clz t1, t1
|
||||||
|
li t2, 31
|
||||||
|
subu t1, t2, t1
|
||||||
|
li t2, 1
|
||||||
|
sll t1, t2, t1
|
||||||
|
addiu t1, t1, -1
|
||||||
|
|
||||||
|
/* Retrieve the VPE ID from EBase.CPUNum */
|
||||||
|
mfc0 t9, $15, 1
|
||||||
|
and t9, t9, t1
|
||||||
|
|
||||||
|
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
|
||||||
|
li t1, VPEBOOTCFG_SIZE
|
||||||
|
mul v0, t9, t1
|
||||||
|
lw t7, COREBOOTCFG_VPECONFIG(t0)
|
||||||
|
addu v0, v0, t7
|
||||||
|
|
||||||
|
#ifdef CONFIG_MIPS_MT
|
||||||
|
|
||||||
|
/* If the core doesn't support MT then return */
|
||||||
|
bnez t6, 1f
|
||||||
|
nop
|
||||||
|
jr ra
|
||||||
|
nop
|
||||||
|
|
||||||
|
.set push
|
||||||
|
.set mt
|
||||||
|
|
||||||
|
1: /* Enter VPE configuration state */
|
||||||
|
dvpe
|
||||||
|
la t1, 1f
|
||||||
|
jr.hb t1
|
||||||
|
nop
|
||||||
|
1: mfc0 t1, CP0_MVPCONTROL
|
||||||
|
ori t1, t1, MVPCONTROL_VPC
|
||||||
|
mtc0 t1, CP0_MVPCONTROL
|
||||||
|
ehb
|
||||||
|
|
||||||
|
/* Loop through each VPE */
|
||||||
|
lw t6, COREBOOTCFG_VPEMASK(t0)
|
||||||
|
move t8, t6
|
||||||
|
li t5, 0
|
||||||
|
|
||||||
|
/* Check whether the VPE should be running. If not, skip it */
|
||||||
|
1: andi t0, t6, 1
|
||||||
|
beqz t0, 2f
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Operate on the appropriate TC */
|
||||||
|
mfc0 t0, CP0_VPECONTROL
|
||||||
|
ori t0, t0, VPECONTROL_TARGTC
|
||||||
|
xori t0, t0, VPECONTROL_TARGTC
|
||||||
|
or t0, t0, t5
|
||||||
|
mtc0 t0, CP0_VPECONTROL
|
||||||
|
ehb
|
||||||
|
|
||||||
|
/* Skip the VPE if its TC is not halted */
|
||||||
|
mftc0 t0, CP0_TCHALT
|
||||||
|
beqz t0, 2f
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Calculate a pointer to the VPEs struct vpe_boot_config */
|
||||||
|
li t0, VPEBOOTCFG_SIZE
|
||||||
|
mul t0, t0, t5
|
||||||
|
addu t0, t0, t7
|
||||||
|
|
||||||
|
/* Set the TC restart PC */
|
||||||
|
lw t1, VPEBOOTCFG_PC(t0)
|
||||||
|
mttc0 t1, CP0_TCRESTART
|
||||||
|
|
||||||
|
/* Set the TC stack pointer */
|
||||||
|
lw t1, VPEBOOTCFG_SP(t0)
|
||||||
|
mttgpr t1, sp
|
||||||
|
|
||||||
|
/* Set the TC global pointer */
|
||||||
|
lw t1, VPEBOOTCFG_GP(t0)
|
||||||
|
mttgpr t1, gp
|
||||||
|
|
||||||
|
/* Copy config from this VPE */
|
||||||
|
mfc0 t0, CP0_CONFIG
|
||||||
|
mttc0 t0, CP0_CONFIG
|
||||||
|
|
||||||
|
/* Ensure no software interrupts are pending */
|
||||||
|
mttc0 zero, CP0_CAUSE
|
||||||
|
mttc0 zero, CP0_STATUS
|
||||||
|
|
||||||
|
/* Set TC active, not interrupt exempt */
|
||||||
|
mftc0 t0, CP0_TCSTATUS
|
||||||
|
li t1, ~TCSTATUS_IXMT
|
||||||
|
and t0, t0, t1
|
||||||
|
ori t0, t0, TCSTATUS_A
|
||||||
|
mttc0 t0, CP0_TCSTATUS
|
||||||
|
|
||||||
|
/* Clear the TC halt bit */
|
||||||
|
mttc0 zero, CP0_TCHALT
|
||||||
|
|
||||||
|
/* Set VPE active */
|
||||||
|
mftc0 t0, CP0_VPECONF0
|
||||||
|
ori t0, t0, VPECONF0_VPA
|
||||||
|
mttc0 t0, CP0_VPECONF0
|
||||||
|
|
||||||
|
/* Next VPE */
|
||||||
|
2: srl t6, t6, 1
|
||||||
|
addi t5, t5, 1
|
||||||
|
bnez t6, 1b
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* Leave VPE configuration state */
|
||||||
|
mfc0 t1, CP0_MVPCONTROL
|
||||||
|
xori t1, t1, MVPCONTROL_VPC
|
||||||
|
mtc0 t1, CP0_MVPCONTROL
|
||||||
|
ehb
|
||||||
|
evpe
|
||||||
|
|
||||||
|
/* Check whether this VPE is meant to be running */
|
||||||
|
li t0, 1
|
||||||
|
sll t0, t0, t9
|
||||||
|
and t0, t0, t8
|
||||||
|
bnez t0, 2f
|
||||||
|
nop
|
||||||
|
|
||||||
|
/* This VPE should be offline, halt the TC */
|
||||||
|
li t0, TCHALT_H
|
||||||
|
mtc0 t0, CP0_TCHALT
|
||||||
|
la t0, 1f
|
||||||
|
1: jr.hb t0
|
||||||
|
nop
|
||||||
|
|
||||||
|
2: .set pop
|
||||||
|
|
||||||
|
#endif /* CONFIG_MIPS_MT */
|
||||||
|
|
||||||
|
/* Return */
|
||||||
|
jr ra
|
||||||
|
nop
|
||||||
|
END(mips_cps_boot_vpes)
|
||||||
|
|
||||||
|
#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
|
||||||
|
|
||||||
|
/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
|
||||||
|
.macro psstate dest
|
||||||
|
.set push
|
||||||
|
.set noat
|
||||||
|
lw $1, TI_CPU(gp)
|
||||||
|
sll $1, $1, LONGLOG
|
||||||
|
la \dest, __per_cpu_offset
|
||||||
|
addu $1, $1, \dest
|
||||||
|
lw $1, 0($1)
|
||||||
|
la \dest, cps_cpu_state
|
||||||
|
addu \dest, \dest, $1
|
||||||
|
.set pop
|
||||||
|
.endm
|
||||||
|
|
||||||
|
LEAF(mips_cps_pm_save)
|
||||||
|
/* Save CPU state */
|
||||||
|
SUSPEND_SAVE_REGS
|
||||||
|
psstate t1
|
||||||
|
SUSPEND_SAVE_STATIC
|
||||||
|
jr v0
|
||||||
|
nop
|
||||||
|
END(mips_cps_pm_save)
|
||||||
|
|
||||||
|
LEAF(mips_cps_pm_restore)
|
||||||
|
/* Restore CPU state */
|
||||||
|
psstate t1
|
||||||
|
RESUME_RESTORE_STATIC
|
||||||
|
RESUME_RESTORE_REGS_RETURN
|
||||||
|
END(mips_cps_pm_restore)
|
||||||
|
|
||||||
|
#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
|
||||||
|
@ -236,3 +236,14 @@ void arch_cpu_idle(void)
|
|||||||
else
|
else
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_IDLE
|
||||||
|
|
||||||
|
int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
|
||||||
|
struct cpuidle_driver *drv, int index)
|
||||||
|
{
|
||||||
|
arch_cpu_idle();
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -54,6 +54,21 @@ void gic_write_compare(cycle_t cnt)
|
|||||||
(int)(cnt & 0xffffffff));
|
(int)(cnt & 0xffffffff));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gic_write_cpu_compare(cycle_t cnt, int cpu)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
|
||||||
|
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
|
||||||
|
(int)(cnt >> 32));
|
||||||
|
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
|
||||||
|
(int)(cnt & 0xffffffff));
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
cycle_t gic_read_compare(void)
|
cycle_t gic_read_compare(void)
|
||||||
{
|
{
|
||||||
unsigned int hi, lo;
|
unsigned int hi, lo;
|
||||||
|
@ -9,12 +9,18 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
#include <asm/mips-cm.h>
|
#include <asm/mips-cm.h>
|
||||||
#include <asm/mips-cpc.h>
|
#include <asm/mips-cpc.h>
|
||||||
|
|
||||||
void __iomem *mips_cpc_base;
|
void __iomem *mips_cpc_base;
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
|
||||||
|
|
||||||
phys_t __weak mips_cpc_phys_base(void)
|
phys_t __weak mips_cpc_phys_base(void)
|
||||||
{
|
{
|
||||||
u32 cpc_base;
|
u32 cpc_base;
|
||||||
@ -39,6 +45,10 @@ phys_t __weak mips_cpc_phys_base(void)
|
|||||||
int mips_cpc_probe(void)
|
int mips_cpc_probe(void)
|
||||||
{
|
{
|
||||||
phys_t addr;
|
phys_t addr;
|
||||||
|
unsigned cpu;
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
|
||||||
|
|
||||||
addr = mips_cpc_phys_base();
|
addr = mips_cpc_phys_base();
|
||||||
if (!addr)
|
if (!addr)
|
||||||
@ -50,3 +60,21 @@ int mips_cpc_probe(void)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mips_cpc_lock_other(unsigned int core)
|
||||||
|
{
|
||||||
|
unsigned curr_core;
|
||||||
|
preempt_disable();
|
||||||
|
curr_core = current_cpu_data.core;
|
||||||
|
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
|
||||||
|
per_cpu(cpc_core_lock_flags, curr_core));
|
||||||
|
write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mips_cpc_unlock_other(void)
|
||||||
|
{
|
||||||
|
unsigned curr_core = current_cpu_data.core;
|
||||||
|
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
|
||||||
|
per_cpu(cpc_core_lock_flags, curr_core));
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
716
arch/mips/kernel/pm-cps.c
Normal file
716
arch/mips/kernel/pm-cps.c
Normal file
@ -0,0 +1,716 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Imagination Technologies
|
||||||
|
* Author: Paul Burton <paul.burton@imgtec.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the
|
||||||
|
* Free Software Foundation; either version 2 of the License, or (at your
|
||||||
|
* option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/cacheops.h>
|
||||||
|
#include <asm/idle.h>
|
||||||
|
#include <asm/mips-cm.h>
|
||||||
|
#include <asm/mips-cpc.h>
|
||||||
|
#include <asm/mipsmtregs.h>
|
||||||
|
#include <asm/pm.h>
|
||||||
|
#include <asm/pm-cps.h>
|
||||||
|
#include <asm/smp-cps.h>
|
||||||
|
#include <asm/uasm.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cps_nc_entry_fn - type of a generated non-coherent state entry function
|
||||||
|
* @online: the count of online coupled VPEs
|
||||||
|
* @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
|
||||||
|
*
|
||||||
|
* The code entering & exiting non-coherent states is generated at runtime
|
||||||
|
* using uasm, in order to ensure that the compiler cannot insert a stray
|
||||||
|
* memory access at an unfortunate time and to allow the generation of optimal
|
||||||
|
* core-specific code particularly for cache routines. If coupled_coherence
|
||||||
|
* is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
|
||||||
|
* returns the number of VPEs that were in the wait state at the point this
|
||||||
|
* VPE left it. Returns garbage if coupled_coherence is zero or this is not
|
||||||
|
* the entry function for CPS_PM_NC_WAIT.
|
||||||
|
*/
|
||||||
|
typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The entry point of the generated non-coherent idle state entry/exit
|
||||||
|
* functions. Actually per-core rather than per-CPU.
|
||||||
|
*/
|
||||||
|
static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
|
||||||
|
nc_asm_enter);
|
||||||
|
|
||||||
|
/* Bitmap indicating which states are supported by the system */
|
||||||
|
DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Indicates the number of coupled VPEs ready to operate in a non-coherent
|
||||||
|
* state. Actually per-core rather than per-CPU.
|
||||||
|
*/
|
||||||
|
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
|
||||||
|
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
|
||||||
|
|
||||||
|
/* Indicates online CPUs coupled with the current CPU */
|
||||||
|
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Used to synchronize entry to deep idle states. Actually per-core rather
|
||||||
|
* than per-CPU.
|
||||||
|
*/
|
||||||
|
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
|
||||||
|
|
||||||
|
/* Saved CPU state across the CPS_PM_POWER_GATED state */
|
||||||
|
DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
|
||||||
|
|
||||||
|
/* A somewhat arbitrary number of labels & relocs for uasm */
|
||||||
|
static struct uasm_label labels[32] __initdata;
|
||||||
|
static struct uasm_reloc relocs[32] __initdata;
|
||||||
|
|
||||||
|
/* CPU dependant sync types */
|
||||||
|
static unsigned stype_intervention;
|
||||||
|
static unsigned stype_memory;
|
||||||
|
static unsigned stype_ordering;
|
||||||
|
|
||||||
|
enum mips_reg {
|
||||||
|
zero, at, v0, v1, a0, a1, a2, a3,
|
||||||
|
t0, t1, t2, t3, t4, t5, t6, t7,
|
||||||
|
s0, s1, s2, s3, s4, s5, s6, s7,
|
||||||
|
t8, t9, k0, k1, gp, sp, fp, ra,
|
||||||
|
};
|
||||||
|
|
||||||
|
bool cps_pm_support_state(enum cps_pm_state state)
|
||||||
|
{
|
||||||
|
return test_bit(state, state_support);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void coupled_barrier(atomic_t *a, unsigned online)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* This function is effectively the same as
|
||||||
|
* cpuidle_coupled_parallel_barrier, which can't be used here since
|
||||||
|
* there's no cpuidle device.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (!coupled_coherence)
|
||||||
|
return;
|
||||||
|
|
||||||
|
smp_mb__before_atomic_inc();
|
||||||
|
atomic_inc(a);
|
||||||
|
|
||||||
|
while (atomic_read(a) < online)
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
if (atomic_inc_return(a) == online * 2) {
|
||||||
|
atomic_set(a, 0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (atomic_read(a) > online)
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
|
int cps_pm_enter_state(enum cps_pm_state state)
|
||||||
|
{
|
||||||
|
unsigned cpu = smp_processor_id();
|
||||||
|
unsigned core = current_cpu_data.core;
|
||||||
|
unsigned online, left;
|
||||||
|
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
|
||||||
|
u32 *core_ready_count, *nc_core_ready_count;
|
||||||
|
void *nc_addr;
|
||||||
|
cps_nc_entry_fn entry;
|
||||||
|
struct core_boot_config *core_cfg;
|
||||||
|
struct vpe_boot_config *vpe_cfg;
|
||||||
|
|
||||||
|
/* Check that there is an entry function for this state */
|
||||||
|
entry = per_cpu(nc_asm_enter, core)[state];
|
||||||
|
if (!entry)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Calculate which coupled CPUs (VPEs) are online */
|
||||||
|
#ifdef CONFIG_MIPS_MT
|
||||||
|
if (cpu_online(cpu)) {
|
||||||
|
cpumask_and(coupled_mask, cpu_online_mask,
|
||||||
|
&cpu_sibling_map[cpu]);
|
||||||
|
online = cpumask_weight(coupled_mask);
|
||||||
|
cpumask_clear_cpu(cpu, coupled_mask);
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
cpumask_clear(coupled_mask);
|
||||||
|
online = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup the VPE to run mips_cps_pm_restore when started again */
|
||||||
|
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
|
||||||
|
core_cfg = &mips_cps_core_bootcfg[core];
|
||||||
|
vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
|
||||||
|
vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
|
||||||
|
vpe_cfg->gp = (unsigned long)current_thread_info();
|
||||||
|
vpe_cfg->sp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Indicate that this CPU might not be coherent */
|
||||||
|
cpumask_clear_cpu(cpu, &cpu_coherent_mask);
|
||||||
|
smp_mb__after_clear_bit();
|
||||||
|
|
||||||
|
/* Create a non-coherent mapping of the core ready_count */
|
||||||
|
core_ready_count = per_cpu(ready_count, core);
|
||||||
|
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
|
||||||
|
(unsigned long)core_ready_count);
|
||||||
|
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
|
||||||
|
nc_core_ready_count = nc_addr;
|
||||||
|
|
||||||
|
/* Ensure ready_count is zero-initialised before the assembly runs */
|
||||||
|
ACCESS_ONCE(*nc_core_ready_count) = 0;
|
||||||
|
coupled_barrier(&per_cpu(pm_barrier, core), online);
|
||||||
|
|
||||||
|
/* Run the generated entry code */
|
||||||
|
left = entry(online, nc_core_ready_count);
|
||||||
|
|
||||||
|
/* Remove the non-coherent mapping of ready_count */
|
||||||
|
kunmap_noncoherent();
|
||||||
|
|
||||||
|
/* Indicate that this CPU is definitely coherent */
|
||||||
|
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this VPE is the first to leave the non-coherent wait state then
|
||||||
|
* it needs to wake up any coupled VPEs still running their wait
|
||||||
|
* instruction so that they return to cpuidle, which can then complete
|
||||||
|
* coordination between the coupled VPEs & provide the governor with
|
||||||
|
* a chance to reflect on the length of time the VPEs were in the
|
||||||
|
* idle state.
|
||||||
|
*/
|
||||||
|
if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
|
||||||
|
arch_send_call_function_ipi_mask(coupled_mask);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
|
||||||
|
struct uasm_reloc **pr,
|
||||||
|
const struct cache_desc *cache,
|
||||||
|
unsigned op, int lbl)
|
||||||
|
{
|
||||||
|
unsigned cache_size = cache->ways << cache->waybit;
|
||||||
|
unsigned i;
|
||||||
|
const unsigned unroll_lines = 32;
|
||||||
|
|
||||||
|
/* If the cache isn't present this function has it easy */
|
||||||
|
if (cache->flags & MIPS_CACHE_NOT_PRESENT)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Load base address */
|
||||||
|
UASM_i_LA(pp, t0, (long)CKSEG0);
|
||||||
|
|
||||||
|
/* Calculate end address */
|
||||||
|
if (cache_size < 0x8000)
|
||||||
|
uasm_i_addiu(pp, t1, t0, cache_size);
|
||||||
|
else
|
||||||
|
UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
|
||||||
|
|
||||||
|
/* Start of cache op loop */
|
||||||
|
uasm_build_label(pl, *pp, lbl);
|
||||||
|
|
||||||
|
/* Generate the cache ops */
|
||||||
|
for (i = 0; i < unroll_lines; i++)
|
||||||
|
uasm_i_cache(pp, op, i * cache->linesz, t0);
|
||||||
|
|
||||||
|
/* Update the base address */
|
||||||
|
uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
|
||||||
|
|
||||||
|
/* Loop if we haven't reached the end address yet */
|
||||||
|
uasm_il_bne(pp, pr, t0, t1, lbl);
|
||||||
|
uasm_i_nop(pp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
|
||||||
|
struct uasm_reloc **pr,
|
||||||
|
const struct cpuinfo_mips *cpu_info,
|
||||||
|
int lbl)
|
||||||
|
{
|
||||||
|
unsigned i, fsb_size = 8;
|
||||||
|
unsigned num_loads = (fsb_size * 3) / 2;
|
||||||
|
unsigned line_stride = 2;
|
||||||
|
unsigned line_size = cpu_info->dcache.linesz;
|
||||||
|
unsigned perf_counter, perf_event;
|
||||||
|
unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determine whether this CPU requires an FSB flush, and if so which
|
||||||
|
* performance counter/event reflect stalls due to a full FSB.
|
||||||
|
*/
|
||||||
|
switch (__get_cpu_type(cpu_info->cputype)) {
|
||||||
|
case CPU_INTERAPTIV:
|
||||||
|
perf_counter = 1;
|
||||||
|
perf_event = 51;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case CPU_PROAPTIV:
|
||||||
|
/* Newer proAptiv cores don't require this workaround */
|
||||||
|
if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* On older ones it's unavailable */
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
/* CPUs which do not require the workaround */
|
||||||
|
case CPU_P5600:
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
default:
|
||||||
|
WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that the fill/store buffer (FSB) is not holding the results
|
||||||
|
* of a prefetch, since if it is then the CPC sequencer may become
|
||||||
|
* stuck in the D3 (ClrBus) state whilst entering a low power state.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Preserve perf counter setup */
|
||||||
|
uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
|
||||||
|
uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
|
||||||
|
|
||||||
|
/* Setup perf counter to count FSB full pipeline stalls */
|
||||||
|
uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
|
||||||
|
uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
|
||||||
|
uasm_i_ehb(pp);
|
||||||
|
uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
|
||||||
|
uasm_i_ehb(pp);
|
||||||
|
|
||||||
|
/* Base address for loads */
|
||||||
|
UASM_i_LA(pp, t0, (long)CKSEG0);
|
||||||
|
|
||||||
|
/* Start of clear loop */
|
||||||
|
uasm_build_label(pl, *pp, lbl);
|
||||||
|
|
||||||
|
/* Perform some loads to fill the FSB */
|
||||||
|
for (i = 0; i < num_loads; i++)
|
||||||
|
uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Invalidate the new D-cache entries so that the cache will need
|
||||||
|
* refilling (via the FSB) if the loop is executed again.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < num_loads; i++) {
|
||||||
|
uasm_i_cache(pp, Hit_Invalidate_D,
|
||||||
|
i * line_size * line_stride, t0);
|
||||||
|
uasm_i_cache(pp, Hit_Writeback_Inv_SD,
|
||||||
|
i * line_size * line_stride, t0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Completion barrier */
|
||||||
|
uasm_i_sync(pp, stype_memory);
|
||||||
|
uasm_i_ehb(pp);
|
||||||
|
|
||||||
|
/* Check whether the pipeline stalled due to the FSB being full */
|
||||||
|
uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
|
||||||
|
|
||||||
|
/* Loop if it didn't */
|
||||||
|
uasm_il_beqz(pp, pr, t1, lbl);
|
||||||
|
uasm_i_nop(pp);
|
||||||
|
|
||||||
|
/* Restore perf counter 1. The count may well now be wrong... */
|
||||||
|
uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
|
||||||
|
uasm_i_ehb(pp);
|
||||||
|
uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
|
||||||
|
uasm_i_ehb(pp);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
|
||||||
|
struct uasm_reloc **pr,
|
||||||
|
unsigned r_addr, int lbl)
|
||||||
|
{
|
||||||
|
uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
|
||||||
|
uasm_build_label(pl, *pp, lbl);
|
||||||
|
uasm_i_ll(pp, t1, 0, r_addr);
|
||||||
|
uasm_i_or(pp, t1, t1, t0);
|
||||||
|
uasm_i_sc(pp, t1, 0, r_addr);
|
||||||
|
uasm_il_beqz(pp, pr, t1, lbl);
|
||||||
|
uasm_i_nop(pp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
|
||||||
|
{
|
||||||
|
struct uasm_label *l = labels;
|
||||||
|
struct uasm_reloc *r = relocs;
|
||||||
|
u32 *buf, *p;
|
||||||
|
const unsigned r_online = a0;
|
||||||
|
const unsigned r_nc_count = a1;
|
||||||
|
const unsigned r_pcohctl = t7;
|
||||||
|
const unsigned max_instrs = 256;
|
||||||
|
unsigned cpc_cmd;
|
||||||
|
int err;
|
||||||
|
enum {
|
||||||
|
lbl_incready = 1,
|
||||||
|
lbl_poll_cont,
|
||||||
|
lbl_secondary_hang,
|
||||||
|
lbl_disable_coherence,
|
||||||
|
lbl_flush_fsb,
|
||||||
|
lbl_invicache,
|
||||||
|
lbl_flushdcache,
|
||||||
|
lbl_hang,
|
||||||
|
lbl_set_cont,
|
||||||
|
lbl_secondary_cont,
|
||||||
|
lbl_decready,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Allocate a buffer to hold the generated code */
|
||||||
|
p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
|
||||||
|
if (!buf)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/* Clear labels & relocs ready for (re)use */
|
||||||
|
memset(labels, 0, sizeof(labels));
|
||||||
|
memset(relocs, 0, sizeof(relocs));
|
||||||
|
|
||||||
|
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
|
||||||
|
/*
|
||||||
|
* Save CPU state. Note the non-standard calling convention
|
||||||
|
* with the return address placed in v0 to avoid clobbering
|
||||||
|
* the ra register before it is saved.
|
||||||
|
*/
|
||||||
|
UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
|
||||||
|
uasm_i_jalr(&p, v0, t0);
|
||||||
|
uasm_i_nop(&p);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load addresses of required CM & CPC registers. This is done early
|
||||||
|
* because they're needed in both the enable & disable coherence steps
|
||||||
|
* but in the coupled case the enable step will only run on one VPE.
|
||||||
|
*/
|
||||||
|
UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
|
||||||
|
|
||||||
|
if (coupled_coherence) {
|
||||||
|
/* Increment ready_count */
|
||||||
|
uasm_i_sync(&p, stype_ordering);
|
||||||
|
uasm_build_label(&l, p, lbl_incready);
|
||||||
|
uasm_i_ll(&p, t1, 0, r_nc_count);
|
||||||
|
uasm_i_addiu(&p, t2, t1, 1);
|
||||||
|
uasm_i_sc(&p, t2, 0, r_nc_count);
|
||||||
|
uasm_il_beqz(&p, &r, t2, lbl_incready);
|
||||||
|
uasm_i_addiu(&p, t1, t1, 1);
|
||||||
|
|
||||||
|
/* Ordering barrier */
|
||||||
|
uasm_i_sync(&p, stype_ordering);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is the last VPE to become ready for non-coherence
|
||||||
|
* then it should branch below.
|
||||||
|
*/
|
||||||
|
uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
|
||||||
|
uasm_i_nop(&p);
|
||||||
|
|
||||||
|
if (state < CPS_PM_POWER_GATED) {
|
||||||
|
/*
|
||||||
|
* Otherwise this is not the last VPE to become ready
|
||||||
|
* for non-coherence. It needs to wait until coherence
|
||||||
|
* has been disabled before proceeding, which it will do
|
||||||
|
* by polling for the top bit of ready_count being set.
|
||||||
|
*/
|
||||||
|
uasm_i_addiu(&p, t1, zero, -1);
|
||||||
|
uasm_build_label(&l, p, lbl_poll_cont);
|
||||||
|
uasm_i_lw(&p, t0, 0, r_nc_count);
|
||||||
|
uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
|
||||||
|
uasm_i_ehb(&p);
|
||||||
|
uasm_i_yield(&p, zero, t1);
|
||||||
|
uasm_il_b(&p, &r, lbl_poll_cont);
|
||||||
|
uasm_i_nop(&p);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* The core will lose power & this VPE will not continue
|
||||||
|
* so it can simply halt here.
|
||||||
|
*/
|
||||||
|
uasm_i_addiu(&p, t0, zero, TCHALT_H);
|
||||||
|
uasm_i_mtc0(&p, t0, 2, 4);
|
||||||
|
uasm_build_label(&l, p, lbl_secondary_hang);
|
||||||
|
uasm_il_b(&p, &r, lbl_secondary_hang);
|
||||||
|
uasm_i_nop(&p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is the point of no return - this VPE will now proceed to
|
||||||
|
* disable coherence. At this point we *must* be sure that no other
|
||||||
|
* VPE within the core will interfere with the L1 dcache.
|
||||||
|
*/
|
||||||
|
uasm_build_label(&l, p, lbl_disable_coherence);
|
||||||
|
|
||||||
|
/* Invalidate the L1 icache */
|
||||||
|
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
|
||||||
|
Index_Invalidate_I, lbl_invicache);
|
||||||
|
|
||||||
|
/* Writeback & invalidate the L1 dcache */
|
||||||
|
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
|
||||||
|
Index_Writeback_Inv_D, lbl_flushdcache);
|
||||||
|
|
||||||
|
/* Completion barrier */
|
||||||
|
uasm_i_sync(&p, stype_memory);
|
||||||
|
uasm_i_ehb(&p);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable all but self interventions. The load from COHCTL is defined
|
||||||
|
* by the interAptiv & proAptiv SUMs as ensuring that the operation
|
||||||
|
* resulting from the preceeding store is complete.
|
||||||
|
*/
|
||||||
|
uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
|
||||||
|
uasm_i_sw(&p, t0, 0, r_pcohctl);
|
||||||
|
uasm_i_lw(&p, t0, 0, r_pcohctl);
|
||||||
|
|
||||||
|
/* Sync to ensure previous interventions are complete */
|
||||||
|
uasm_i_sync(&p, stype_intervention);
|
||||||
|
uasm_i_ehb(&p);
|
||||||
|
|
||||||
|
/* Disable coherence */
|
||||||
|
uasm_i_sw(&p, zero, 0, r_pcohctl);
|
||||||
|
uasm_i_lw(&p, t0, 0, r_pcohctl);
|
||||||
|
|
||||||
|
if (state >= CPS_PM_CLOCK_GATED) {
|
||||||
|
err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
|
||||||
|
lbl_flush_fsb);
|
||||||
|
if (err)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
/* Determine the CPC command to issue */
|
||||||
|
switch (state) {
|
||||||
|
case CPS_PM_CLOCK_GATED:
|
||||||
|
cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
|
||||||
|
break;
|
||||||
|
case CPS_PM_POWER_GATED:
|
||||||
|
cpc_cmd = CPC_Cx_CMD_PWRDOWN;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Issue the CPC command */
|
||||||
|
UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
|
||||||
|
uasm_i_addiu(&p, t1, zero, cpc_cmd);
|
||||||
|
uasm_i_sw(&p, t1, 0, t0);
|
||||||
|
|
||||||
|
if (state == CPS_PM_POWER_GATED) {
|
||||||
|
/* If anything goes wrong just hang */
|
||||||
|
uasm_build_label(&l, p, lbl_hang);
|
||||||
|
uasm_il_b(&p, &r, lbl_hang);
|
||||||
|
uasm_i_nop(&p);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There's no point generating more code, the core is
|
||||||
|
* powered down & if powered back up will run from the
|
||||||
|
* reset vector not from here.
|
||||||
|
*/
|
||||||
|
goto gen_done;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Completion barrier */
|
||||||
|
uasm_i_sync(&p, stype_memory);
|
||||||
|
uasm_i_ehb(&p);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state == CPS_PM_NC_WAIT) {
|
||||||
|
/*
|
||||||
|
* At this point it is safe for all VPEs to proceed with
|
||||||
|
* execution. This VPE will set the top bit of ready_count
|
||||||
|
* to indicate to the other VPEs that they may continue.
|
||||||
|
*/
|
||||||
|
if (coupled_coherence)
|
||||||
|
cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
|
||||||
|
lbl_set_cont);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* VPEs which did not disable coherence will continue
|
||||||
|
* executing, after coherence has been disabled, from this
|
||||||
|
* point.
|
||||||
|
*/
|
||||||
|
uasm_build_label(&l, p, lbl_secondary_cont);
|
||||||
|
|
||||||
|
/* Now perform our wait */
|
||||||
|
uasm_i_wait(&p, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
|
||||||
|
* will run this. The first will actually re-enable coherence & the
|
||||||
|
* rest will just be performing a rather unusual nop.
|
||||||
|
*/
|
||||||
|
uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
|
||||||
|
uasm_i_sw(&p, t0, 0, r_pcohctl);
|
||||||
|
uasm_i_lw(&p, t0, 0, r_pcohctl);
|
||||||
|
|
||||||
|
/* Completion barrier */
|
||||||
|
uasm_i_sync(&p, stype_memory);
|
||||||
|
uasm_i_ehb(&p);
|
||||||
|
|
||||||
|
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
|
||||||
|
/* Decrement ready_count */
|
||||||
|
uasm_build_label(&l, p, lbl_decready);
|
||||||
|
uasm_i_sync(&p, stype_ordering);
|
||||||
|
uasm_i_ll(&p, t1, 0, r_nc_count);
|
||||||
|
uasm_i_addiu(&p, t2, t1, -1);
|
||||||
|
uasm_i_sc(&p, t2, 0, r_nc_count);
|
||||||
|
uasm_il_beqz(&p, &r, t2, lbl_decready);
|
||||||
|
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
|
||||||
|
|
||||||
|
/* Ordering barrier */
|
||||||
|
uasm_i_sync(&p, stype_ordering);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
|
||||||
|
/*
|
||||||
|
* At this point it is safe for all VPEs to proceed with
|
||||||
|
* execution. This VPE will set the top bit of ready_count
|
||||||
|
* to indicate to the other VPEs that they may continue.
|
||||||
|
*/
|
||||||
|
cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This core will be reliant upon another core sending a
|
||||||
|
* power-up command to the CPC in order to resume operation.
|
||||||
|
* Thus an arbitrary VPE can't trigger the core leaving the
|
||||||
|
* idle state and the one that disables coherence might as well
|
||||||
|
* be the one to re-enable it. The rest will continue from here
|
||||||
|
* after that has been done.
|
||||||
|
*/
|
||||||
|
uasm_build_label(&l, p, lbl_secondary_cont);
|
||||||
|
|
||||||
|
/* Ordering barrier */
|
||||||
|
uasm_i_sync(&p, stype_ordering);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The core is coherent, time to return to C code */
|
||||||
|
uasm_i_jr(&p, ra);
|
||||||
|
uasm_i_nop(&p);
|
||||||
|
|
||||||
|
gen_done:
|
||||||
|
/* Ensure the code didn't exceed the resources allocated for it */
|
||||||
|
BUG_ON((p - buf) > max_instrs);
|
||||||
|
BUG_ON((l - labels) > ARRAY_SIZE(labels));
|
||||||
|
BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
|
||||||
|
|
||||||
|
/* Patch branch offsets */
|
||||||
|
uasm_resolve_relocs(relocs, labels);
|
||||||
|
|
||||||
|
/* Flush the icache */
|
||||||
|
local_flush_icache_range((unsigned long)buf, (unsigned long)p);
|
||||||
|
|
||||||
|
return buf;
|
||||||
|
out_err:
|
||||||
|
kfree(buf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init cps_gen_core_entries(unsigned cpu)
|
||||||
|
{
|
||||||
|
enum cps_pm_state state;
|
||||||
|
unsigned core = cpu_data[cpu].core;
|
||||||
|
unsigned dlinesz = cpu_data[cpu].dcache.linesz;
|
||||||
|
void *entry_fn, *core_rc;
|
||||||
|
|
||||||
|
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
|
||||||
|
if (per_cpu(nc_asm_enter, core)[state])
|
||||||
|
continue;
|
||||||
|
if (!test_bit(state, state_support))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry_fn = cps_gen_entry_code(cpu, state);
|
||||||
|
if (!entry_fn) {
|
||||||
|
pr_err("Failed to generate core %u state %u entry\n",
|
||||||
|
core, state);
|
||||||
|
clear_bit(state, state_support);
|
||||||
|
}
|
||||||
|
|
||||||
|
per_cpu(nc_asm_enter, core)[state] = entry_fn;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!per_cpu(ready_count, core)) {
|
||||||
|
core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
|
||||||
|
if (!core_rc) {
|
||||||
|
pr_err("Failed allocate core %u ready_count\n", core);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
per_cpu(ready_count_alloc, core) = core_rc;
|
||||||
|
|
||||||
|
/* Ensure ready_count is aligned to a cacheline boundary */
|
||||||
|
core_rc += dlinesz - 1;
|
||||||
|
core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
|
||||||
|
per_cpu(ready_count, core) = core_rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init cps_pm_init(void)
|
||||||
|
{
|
||||||
|
unsigned cpu;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/* Detect appropriate sync types for the system */
|
||||||
|
switch (current_cpu_data.cputype) {
|
||||||
|
case CPU_INTERAPTIV:
|
||||||
|
case CPU_PROAPTIV:
|
||||||
|
case CPU_M5150:
|
||||||
|
case CPU_P5600:
|
||||||
|
stype_intervention = 0x2;
|
||||||
|
stype_memory = 0x3;
|
||||||
|
stype_ordering = 0x10;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
pr_warn("Power management is using heavyweight sync 0\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* A CM is required for all non-coherent states */
|
||||||
|
if (!mips_cm_present()) {
|
||||||
|
pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If interrupts were enabled whilst running a wait instruction on a
|
||||||
|
* non-coherent core then the VPE may end up processing interrupts
|
||||||
|
* whilst non-coherent. That would be bad.
|
||||||
|
*/
|
||||||
|
if (cpu_wait == r4k_wait_irqoff)
|
||||||
|
set_bit(CPS_PM_NC_WAIT, state_support);
|
||||||
|
else
|
||||||
|
pr_warn("pm-cps: non-coherent wait unavailable\n");
|
||||||
|
|
||||||
|
/* Detect whether a CPC is present */
|
||||||
|
if (mips_cpc_present()) {
|
||||||
|
/* Detect whether clock gating is implemented */
|
||||||
|
if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
|
||||||
|
set_bit(CPS_PM_CLOCK_GATED, state_support);
|
||||||
|
else
|
||||||
|
pr_warn("pm-cps: CPC does not support clock gating\n");
|
||||||
|
|
||||||
|
/* Power gating is available with CPS SMP & any CPC */
|
||||||
|
if (mips_cps_smp_in_use())
|
||||||
|
set_bit(CPS_PM_POWER_GATED, state_support);
|
||||||
|
else
|
||||||
|
pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
|
||||||
|
} else {
|
||||||
|
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_present_cpu(cpu) {
|
||||||
|
err = cps_gen_core_entries(cpu);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
arch_initcall(cps_pm_init);
|
99
arch/mips/kernel/pm.c
Normal file
99
arch/mips/kernel/pm.c
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Imagination Technologies Ltd.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the
|
||||||
|
* Free Software Foundation; either version 2 of the License, or (at your
|
||||||
|
* option) any later version.
|
||||||
|
*
|
||||||
|
* CPU PM notifiers for saving/restoring general CPU state.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/cpu_pm.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
|
||||||
|
#include <asm/dsp.h>
|
||||||
|
#include <asm/fpu.h>
|
||||||
|
#include <asm/mmu_context.h>
|
||||||
|
#include <asm/pm.h>
|
||||||
|
#include <asm/watch.h>
|
||||||
|
|
||||||
|
/* Used by PM helper macros in asm/pm.h */
|
||||||
|
struct mips_static_suspend_state mips_static_suspend_state;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mips_cpu_save() - Save general CPU state.
|
||||||
|
* Ensures that general CPU context is saved, notably FPU and DSP.
|
||||||
|
*/
|
||||||
|
static int mips_cpu_save(void)
|
||||||
|
{
|
||||||
|
/* Save FPU state */
|
||||||
|
lose_fpu(1);
|
||||||
|
|
||||||
|
/* Save DSP state */
|
||||||
|
save_dsp(current);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mips_cpu_restore() - Restore general CPU state.
|
||||||
|
* Restores important CPU context.
|
||||||
|
*/
|
||||||
|
static void mips_cpu_restore(void)
|
||||||
|
{
|
||||||
|
unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
/* Restore ASID */
|
||||||
|
if (current->mm)
|
||||||
|
write_c0_entryhi(cpu_asid(cpu, current->mm));
|
||||||
|
|
||||||
|
/* Restore DSP state */
|
||||||
|
restore_dsp(current);
|
||||||
|
|
||||||
|
/* Restore UserLocal */
|
||||||
|
if (cpu_has_userlocal)
|
||||||
|
write_c0_userlocal(current_thread_info()->tp_value);
|
||||||
|
|
||||||
|
/* Restore watch registers */
|
||||||
|
__restore_watch();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mips_pm_notifier() - Notifier for preserving general CPU context.
|
||||||
|
* @self: Notifier block.
|
||||||
|
* @cmd: CPU PM event.
|
||||||
|
* @v: Private data (unused).
|
||||||
|
*
|
||||||
|
* This is called when a CPU power management event occurs, and is used to
|
||||||
|
* ensure that important CPU context is preserved across a CPU power down.
|
||||||
|
*/
|
||||||
|
static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
||||||
|
void *v)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
switch (cmd) {
|
||||||
|
case CPU_PM_ENTER:
|
||||||
|
ret = mips_cpu_save();
|
||||||
|
if (ret)
|
||||||
|
return NOTIFY_STOP;
|
||||||
|
break;
|
||||||
|
case CPU_PM_ENTER_FAILED:
|
||||||
|
case CPU_PM_EXIT:
|
||||||
|
mips_cpu_restore();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block mips_pm_notifier_block = {
|
||||||
|
.notifier_call = mips_pm_notifier,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init mips_pm_init(void)
|
||||||
|
{
|
||||||
|
return cpu_pm_register_notifier(&mips_pm_notifier_block);
|
||||||
|
}
|
||||||
|
arch_initcall(mips_pm_init);
|
@ -20,104 +20,43 @@
|
|||||||
#include <asm/mips-cpc.h>
|
#include <asm/mips-cpc.h>
|
||||||
#include <asm/mips_mt.h>
|
#include <asm/mips_mt.h>
|
||||||
#include <asm/mipsregs.h>
|
#include <asm/mipsregs.h>
|
||||||
|
#include <asm/pm-cps.h>
|
||||||
#include <asm/smp-cps.h>
|
#include <asm/smp-cps.h>
|
||||||
#include <asm/time.h>
|
#include <asm/time.h>
|
||||||
#include <asm/uasm.h>
|
#include <asm/uasm.h>
|
||||||
|
|
||||||
static DECLARE_BITMAP(core_power, NR_CPUS);
|
static DECLARE_BITMAP(core_power, NR_CPUS);
|
||||||
|
|
||||||
struct boot_config mips_cps_bootcfg;
|
struct core_boot_config *mips_cps_core_bootcfg;
|
||||||
|
|
||||||
static void init_core(void)
|
static unsigned core_vpe_count(unsigned core)
|
||||||
{
|
{
|
||||||
unsigned int nvpes, t;
|
unsigned cfg;
|
||||||
u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
|
|
||||||
|
|
||||||
if (!cpu_has_mipsmt)
|
if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
|
||||||
return;
|
return 1;
|
||||||
|
|
||||||
/* Enter VPE configuration state */
|
write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
|
||||||
dvpe();
|
cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
|
||||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
|
||||||
|
|
||||||
/* Retrieve the count of VPEs in this core */
|
|
||||||
mvpconf0 = read_c0_mvpconf0();
|
|
||||||
nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
|
|
||||||
smp_num_siblings = nvpes;
|
|
||||||
|
|
||||||
for (t = 1; t < nvpes; t++) {
|
|
||||||
/* Use a 1:1 mapping of TC index to VPE index */
|
|
||||||
settc(t);
|
|
||||||
|
|
||||||
/* Bind 1 TC to this VPE */
|
|
||||||
tcbind = read_tc_c0_tcbind();
|
|
||||||
tcbind &= ~TCBIND_CURVPE;
|
|
||||||
tcbind |= t << TCBIND_CURVPE_SHIFT;
|
|
||||||
write_tc_c0_tcbind(tcbind);
|
|
||||||
|
|
||||||
/* Set exclusive TC, non-active, master */
|
|
||||||
vpeconf0 = read_vpe_c0_vpeconf0();
|
|
||||||
vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
|
|
||||||
vpeconf0 |= t << VPECONF0_XTC_SHIFT;
|
|
||||||
vpeconf0 |= VPECONF0_MVP;
|
|
||||||
write_vpe_c0_vpeconf0(vpeconf0);
|
|
||||||
|
|
||||||
/* Declare TC non-active, non-allocatable & interrupt exempt */
|
|
||||||
tcstatus = read_tc_c0_tcstatus();
|
|
||||||
tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
|
|
||||||
tcstatus |= TCSTATUS_IXMT;
|
|
||||||
write_tc_c0_tcstatus(tcstatus);
|
|
||||||
|
|
||||||
/* Halt the TC */
|
|
||||||
write_tc_c0_tchalt(TCHALT_H);
|
|
||||||
|
|
||||||
/* Allow only 1 TC to execute */
|
|
||||||
vpecontrol = read_vpe_c0_vpecontrol();
|
|
||||||
vpecontrol &= ~VPECONTROL_TE;
|
|
||||||
write_vpe_c0_vpecontrol(vpecontrol);
|
|
||||||
|
|
||||||
/* Copy (most of) Status from VPE 0 */
|
|
||||||
status = read_c0_status();
|
|
||||||
status &= ~(ST0_IM | ST0_IE | ST0_KSU);
|
|
||||||
status |= ST0_CU0;
|
|
||||||
write_vpe_c0_status(status);
|
|
||||||
|
|
||||||
/* Copy Config from VPE 0 */
|
|
||||||
write_vpe_c0_config(read_c0_config());
|
|
||||||
write_vpe_c0_config7(read_c0_config7());
|
|
||||||
|
|
||||||
/* Ensure no software interrupts are pending */
|
|
||||||
write_vpe_c0_cause(0);
|
|
||||||
|
|
||||||
/* Sync Count */
|
|
||||||
write_vpe_c0_count(read_c0_count());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Leave VPE configuration state */
|
|
||||||
clear_c0_mvpcontrol(MVPCONTROL_VPC);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init cps_smp_setup(void)
|
static void __init cps_smp_setup(void)
|
||||||
{
|
{
|
||||||
unsigned int ncores, nvpes, core_vpes;
|
unsigned int ncores, nvpes, core_vpes;
|
||||||
int c, v;
|
int c, v;
|
||||||
u32 core_cfg, *entry_code;
|
|
||||||
|
|
||||||
/* Detect & record VPE topology */
|
/* Detect & record VPE topology */
|
||||||
ncores = mips_cm_numcores();
|
ncores = mips_cm_numcores();
|
||||||
pr_info("VPE topology ");
|
pr_info("VPE topology ");
|
||||||
for (c = nvpes = 0; c < ncores; c++) {
|
for (c = nvpes = 0; c < ncores; c++) {
|
||||||
if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) {
|
core_vpes = core_vpe_count(c);
|
||||||
write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
|
|
||||||
core_cfg = read_gcr_co_config();
|
|
||||||
core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
|
|
||||||
CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
|
|
||||||
} else {
|
|
||||||
core_vpes = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_cont("%c%u", c ? ',' : '{', core_vpes);
|
pr_cont("%c%u", c ? ',' : '{', core_vpes);
|
||||||
|
|
||||||
|
/* Use the number of VPEs in core 0 for smp_num_siblings */
|
||||||
|
if (!c)
|
||||||
|
smp_num_siblings = core_vpes;
|
||||||
|
|
||||||
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
|
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
|
||||||
cpu_data[nvpes + v].core = c;
|
cpu_data[nvpes + v].core = c;
|
||||||
#ifdef CONFIG_MIPS_MT_SMP
|
#ifdef CONFIG_MIPS_MT_SMP
|
||||||
@ -137,19 +76,14 @@ static void __init cps_smp_setup(void)
|
|||||||
__cpu_logical_map[v] = v;
|
__cpu_logical_map[v] = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Set a coherent default CCA (CWB) */
|
||||||
|
change_c0_config(CONF_CM_CMASK, 0x5);
|
||||||
|
|
||||||
/* Core 0 is powered up (we're running on it) */
|
/* Core 0 is powered up (we're running on it) */
|
||||||
bitmap_set(core_power, 0, 1);
|
bitmap_set(core_power, 0, 1);
|
||||||
|
|
||||||
/* Disable MT - we only want to run 1 TC per VPE */
|
|
||||||
if (cpu_has_mipsmt)
|
|
||||||
dmt();
|
|
||||||
|
|
||||||
/* Initialise core 0 */
|
/* Initialise core 0 */
|
||||||
init_core();
|
mips_cps_core_init();
|
||||||
|
|
||||||
/* Patch the start of mips_cps_core_entry to provide the CM base */
|
|
||||||
entry_code = (u32 *)&mips_cps_core_entry;
|
|
||||||
UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
|
|
||||||
|
|
||||||
/* Make core 0 coherent with everything */
|
/* Make core 0 coherent with everything */
|
||||||
write_gcr_cl_coherence(0xff);
|
write_gcr_cl_coherence(0xff);
|
||||||
@ -157,15 +91,99 @@ static void __init cps_smp_setup(void)
|
|||||||
|
|
||||||
static void __init cps_prepare_cpus(unsigned int max_cpus)
|
static void __init cps_prepare_cpus(unsigned int max_cpus)
|
||||||
{
|
{
|
||||||
|
unsigned ncores, core_vpes, c, cca;
|
||||||
|
bool cca_unsuitable;
|
||||||
|
u32 *entry_code;
|
||||||
|
|
||||||
mips_mt_set_cpuoptions();
|
mips_mt_set_cpuoptions();
|
||||||
|
|
||||||
|
/* Detect whether the CCA is unsuited to multi-core SMP */
|
||||||
|
cca = read_c0_config() & CONF_CM_CMASK;
|
||||||
|
switch (cca) {
|
||||||
|
case 0x4: /* CWBE */
|
||||||
|
case 0x5: /* CWB */
|
||||||
|
/* The CCA is coherent, multi-core is fine */
|
||||||
|
cca_unsuitable = false;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* CCA is not coherent, multi-core is not usable */
|
||||||
|
cca_unsuitable = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Warn the user if the CCA prevents multi-core */
|
||||||
|
ncores = mips_cm_numcores();
|
||||||
|
if (cca_unsuitable && ncores > 1) {
|
||||||
|
pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
|
||||||
|
cca);
|
||||||
|
|
||||||
|
for_each_present_cpu(c) {
|
||||||
|
if (cpu_data[c].core)
|
||||||
|
set_cpu_present(c, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Patch the start of mips_cps_core_entry to provide:
|
||||||
|
*
|
||||||
|
* v0 = CM base address
|
||||||
|
* s0 = kseg0 CCA
|
||||||
|
*/
|
||||||
|
entry_code = (u32 *)&mips_cps_core_entry;
|
||||||
|
UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
|
||||||
|
uasm_i_addiu(&entry_code, 16, 0, cca);
|
||||||
|
dma_cache_wback_inv((unsigned long)&mips_cps_core_entry,
|
||||||
|
(void *)entry_code - (void *)&mips_cps_core_entry);
|
||||||
|
|
||||||
|
/* Allocate core boot configuration structs */
|
||||||
|
mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!mips_cps_core_bootcfg) {
|
||||||
|
pr_err("Failed to allocate boot config for %u cores\n", ncores);
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allocate VPE boot configuration structs */
|
||||||
|
for (c = 0; c < ncores; c++) {
|
||||||
|
core_vpes = core_vpe_count(c);
|
||||||
|
mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
|
||||||
|
sizeof(*mips_cps_core_bootcfg[c].vpe_config),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!mips_cps_core_bootcfg[c].vpe_config) {
|
||||||
|
pr_err("Failed to allocate %u VPE boot configs\n",
|
||||||
|
core_vpes);
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Mark this CPU as booted */
|
||||||
|
atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
|
||||||
|
1 << cpu_vpe_id(¤t_cpu_data));
|
||||||
|
|
||||||
|
return;
|
||||||
|
err_out:
|
||||||
|
/* Clean up allocations */
|
||||||
|
if (mips_cps_core_bootcfg) {
|
||||||
|
for (c = 0; c < ncores; c++)
|
||||||
|
kfree(mips_cps_core_bootcfg[c].vpe_config);
|
||||||
|
kfree(mips_cps_core_bootcfg);
|
||||||
|
mips_cps_core_bootcfg = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Effectively disable SMP by declaring CPUs not present */
|
||||||
|
for_each_possible_cpu(c) {
|
||||||
|
if (c == 0)
|
||||||
|
continue;
|
||||||
|
set_cpu_present(c, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void boot_core(struct boot_config *cfg)
|
static void boot_core(unsigned core)
|
||||||
{
|
{
|
||||||
u32 access;
|
u32 access;
|
||||||
|
|
||||||
/* Select the appropriate core */
|
/* Select the appropriate core */
|
||||||
write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF);
|
write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
|
||||||
|
|
||||||
/* Set its reset vector */
|
/* Set its reset vector */
|
||||||
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
|
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
|
||||||
@ -175,104 +193,74 @@ static void boot_core(struct boot_config *cfg)
|
|||||||
|
|
||||||
/* Ensure the core can access the GCRs */
|
/* Ensure the core can access the GCRs */
|
||||||
access = read_gcr_access();
|
access = read_gcr_access();
|
||||||
access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core);
|
access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
|
||||||
write_gcr_access(access);
|
write_gcr_access(access);
|
||||||
|
|
||||||
/* Copy cfg */
|
|
||||||
mips_cps_bootcfg = *cfg;
|
|
||||||
|
|
||||||
if (mips_cpc_present()) {
|
if (mips_cpc_present()) {
|
||||||
/* Select the appropriate core */
|
|
||||||
write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
|
|
||||||
|
|
||||||
/* Reset the core */
|
/* Reset the core */
|
||||||
|
mips_cpc_lock_other(core);
|
||||||
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
|
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
|
||||||
|
mips_cpc_unlock_other();
|
||||||
} else {
|
} else {
|
||||||
/* Take the core out of reset */
|
/* Take the core out of reset */
|
||||||
write_gcr_co_reset_release(0);
|
write_gcr_co_reset_release(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The core is now powered up */
|
/* The core is now powered up */
|
||||||
bitmap_set(core_power, cfg->core, 1);
|
bitmap_set(core_power, core, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void boot_vpe(void *info)
|
static void remote_vpe_boot(void *dummy)
|
||||||
{
|
{
|
||||||
struct boot_config *cfg = info;
|
mips_cps_boot_vpes();
|
||||||
u32 tcstatus, vpeconf0;
|
|
||||||
|
|
||||||
/* Enter VPE configuration state */
|
|
||||||
dvpe();
|
|
||||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
|
||||||
|
|
||||||
settc(cfg->vpe);
|
|
||||||
|
|
||||||
/* Set the TC restart PC */
|
|
||||||
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
|
|
||||||
|
|
||||||
/* Activate the TC, allow interrupts */
|
|
||||||
tcstatus = read_tc_c0_tcstatus();
|
|
||||||
tcstatus &= ~TCSTATUS_IXMT;
|
|
||||||
tcstatus |= TCSTATUS_A;
|
|
||||||
write_tc_c0_tcstatus(tcstatus);
|
|
||||||
|
|
||||||
/* Clear the TC halt bit */
|
|
||||||
write_tc_c0_tchalt(0);
|
|
||||||
|
|
||||||
/* Activate the VPE */
|
|
||||||
vpeconf0 = read_vpe_c0_vpeconf0();
|
|
||||||
vpeconf0 |= VPECONF0_VPA;
|
|
||||||
write_vpe_c0_vpeconf0(vpeconf0);
|
|
||||||
|
|
||||||
/* Set the stack & global pointer registers */
|
|
||||||
write_tc_gpr_sp(cfg->sp);
|
|
||||||
write_tc_gpr_gp(cfg->gp);
|
|
||||||
|
|
||||||
/* Leave VPE configuration state */
|
|
||||||
clear_c0_mvpcontrol(MVPCONTROL_VPC);
|
|
||||||
|
|
||||||
/* Enable other VPEs to execute */
|
|
||||||
evpe(EVPE_ENABLE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cps_boot_secondary(int cpu, struct task_struct *idle)
|
static void cps_boot_secondary(int cpu, struct task_struct *idle)
|
||||||
{
|
{
|
||||||
struct boot_config cfg;
|
unsigned core = cpu_data[cpu].core;
|
||||||
|
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
|
||||||
|
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
|
||||||
|
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
|
||||||
unsigned int remote;
|
unsigned int remote;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
cfg.core = cpu_data[cpu].core;
|
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
|
||||||
cfg.vpe = cpu_vpe_id(&cpu_data[cpu]);
|
vpe_cfg->sp = __KSTK_TOS(idle);
|
||||||
cfg.pc = (unsigned long)&smp_bootstrap;
|
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
|
||||||
cfg.sp = __KSTK_TOS(idle);
|
|
||||||
cfg.gp = (unsigned long)task_thread_info(idle);
|
|
||||||
|
|
||||||
if (!test_bit(cfg.core, core_power)) {
|
atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
|
if (!test_bit(core, core_power)) {
|
||||||
/* Boot a VPE on a powered down core */
|
/* Boot a VPE on a powered down core */
|
||||||
boot_core(&cfg);
|
boot_core(core);
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfg.core != current_cpu_data.core) {
|
if (core != current_cpu_data.core) {
|
||||||
/* Boot a VPE on another powered up core */
|
/* Boot a VPE on another powered up core */
|
||||||
for (remote = 0; remote < NR_CPUS; remote++) {
|
for (remote = 0; remote < NR_CPUS; remote++) {
|
||||||
if (cpu_data[remote].core != cfg.core)
|
if (cpu_data[remote].core != core)
|
||||||
continue;
|
continue;
|
||||||
if (cpu_online(remote))
|
if (cpu_online(remote))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
BUG_ON(remote >= NR_CPUS);
|
BUG_ON(remote >= NR_CPUS);
|
||||||
|
|
||||||
err = smp_call_function_single(remote, boot_vpe, &cfg, 1);
|
err = smp_call_function_single(remote, remote_vpe_boot,
|
||||||
|
NULL, 1);
|
||||||
if (err)
|
if (err)
|
||||||
panic("Failed to call remote CPU\n");
|
panic("Failed to call remote CPU\n");
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(!cpu_has_mipsmt);
|
BUG_ON(!cpu_has_mipsmt);
|
||||||
|
|
||||||
/* Boot a VPE on this core */
|
/* Boot a VPE on this core */
|
||||||
boot_vpe(&cfg);
|
mips_cps_boot_vpes();
|
||||||
|
out:
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cps_init_secondary(void)
|
static void cps_init_secondary(void)
|
||||||
@ -281,10 +269,6 @@ static void cps_init_secondary(void)
|
|||||||
if (cpu_has_mipsmt)
|
if (cpu_has_mipsmt)
|
||||||
dmt();
|
dmt();
|
||||||
|
|
||||||
/* TODO: revisit this assumption once hotplug is implemented */
|
|
||||||
if (cpu_vpe_id(¤t_cpu_data) == 0)
|
|
||||||
init_core();
|
|
||||||
|
|
||||||
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
|
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
|
||||||
STATUSF_IP6 | STATUSF_IP7);
|
STATUSF_IP6 | STATUSF_IP7);
|
||||||
}
|
}
|
||||||
@ -302,6 +286,148 @@ static void cps_smp_finish(void)
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
|
||||||
|
static int cps_cpu_disable(void)
|
||||||
|
{
|
||||||
|
unsigned cpu = smp_processor_id();
|
||||||
|
struct core_boot_config *core_cfg;
|
||||||
|
|
||||||
|
if (!cpu)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
|
||||||
|
atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
|
||||||
|
smp_mb__after_atomic_dec();
|
||||||
|
set_cpu_online(cpu, false);
|
||||||
|
cpu_clear(cpu, cpu_callin_map);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static DECLARE_COMPLETION(cpu_death_chosen);
|
||||||
|
static unsigned cpu_death_sibling;
|
||||||
|
static enum {
|
||||||
|
CPU_DEATH_HALT,
|
||||||
|
CPU_DEATH_POWER,
|
||||||
|
} cpu_death;
|
||||||
|
|
||||||
|
void play_dead(void)
|
||||||
|
{
|
||||||
|
unsigned cpu, core;
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
|
idle_task_exit();
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
cpu_death = CPU_DEATH_POWER;
|
||||||
|
|
||||||
|
if (cpu_has_mipsmt) {
|
||||||
|
core = cpu_data[cpu].core;
|
||||||
|
|
||||||
|
/* Look for another online VPE within the core */
|
||||||
|
for_each_online_cpu(cpu_death_sibling) {
|
||||||
|
if (cpu_data[cpu_death_sibling].core != core)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There is an online VPE within the core. Just halt
|
||||||
|
* this TC and leave the core alone.
|
||||||
|
*/
|
||||||
|
cpu_death = CPU_DEATH_HALT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This CPU has chosen its way out */
|
||||||
|
complete(&cpu_death_chosen);
|
||||||
|
|
||||||
|
if (cpu_death == CPU_DEATH_HALT) {
|
||||||
|
/* Halt this TC */
|
||||||
|
write_c0_tchalt(TCHALT_H);
|
||||||
|
instruction_hazard();
|
||||||
|
} else {
|
||||||
|
/* Power down the core */
|
||||||
|
cps_pm_enter_state(CPS_PM_POWER_GATED);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This should never be reached */
|
||||||
|
panic("Failed to offline CPU %u", cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void wait_for_sibling_halt(void *ptr_cpu)
|
||||||
|
{
|
||||||
|
unsigned cpu = (unsigned)ptr_cpu;
|
||||||
|
unsigned vpe_id = cpu_data[cpu].vpe_id;
|
||||||
|
unsigned halted;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
do {
|
||||||
|
local_irq_save(flags);
|
||||||
|
settc(vpe_id);
|
||||||
|
halted = read_tc_c0_tchalt();
|
||||||
|
local_irq_restore(flags);
|
||||||
|
} while (!(halted & TCHALT_H));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cps_cpu_die(unsigned int cpu)
|
||||||
|
{
|
||||||
|
unsigned core = cpu_data[cpu].core;
|
||||||
|
unsigned stat;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/* Wait for the cpu to choose its way out */
|
||||||
|
if (!wait_for_completion_timeout(&cpu_death_chosen,
|
||||||
|
msecs_to_jiffies(5000))) {
|
||||||
|
pr_err("CPU%u: didn't offline\n", cpu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now wait for the CPU to actually offline. Without doing this that
|
||||||
|
* offlining may race with one or more of:
|
||||||
|
*
|
||||||
|
* - Onlining the CPU again.
|
||||||
|
* - Powering down the core if another VPE within it is offlined.
|
||||||
|
* - A sibling VPE entering a non-coherent state.
|
||||||
|
*
|
||||||
|
* In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
|
||||||
|
* with which we could race, so do nothing.
|
||||||
|
*/
|
||||||
|
if (cpu_death == CPU_DEATH_POWER) {
|
||||||
|
/*
|
||||||
|
* Wait for the core to enter a powered down or clock gated
|
||||||
|
* state, the latter happening when a JTAG probe is connected
|
||||||
|
* in which case the CPC will refuse to power down the core.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
|
mips_cpc_lock_other(core);
|
||||||
|
stat = read_cpc_co_stat_conf();
|
||||||
|
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
|
||||||
|
mips_cpc_unlock_other();
|
||||||
|
} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
|
||||||
|
stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
|
||||||
|
stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
|
||||||
|
|
||||||
|
/* Indicate the core is powered off */
|
||||||
|
bitmap_clear(core_power, core, 1);
|
||||||
|
} else if (cpu_has_mipsmt) {
|
||||||
|
/*
|
||||||
|
* Have a CPU with access to the offlined CPUs registers wait
|
||||||
|
* for its TC to halt.
|
||||||
|
*/
|
||||||
|
err = smp_call_function_single(cpu_death_sibling,
|
||||||
|
wait_for_sibling_halt,
|
||||||
|
(void *)cpu, 1);
|
||||||
|
if (err)
|
||||||
|
panic("Failed to call remote sibling CPU\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
static struct plat_smp_ops cps_smp_ops = {
|
static struct plat_smp_ops cps_smp_ops = {
|
||||||
.smp_setup = cps_smp_setup,
|
.smp_setup = cps_smp_setup,
|
||||||
.prepare_cpus = cps_prepare_cpus,
|
.prepare_cpus = cps_prepare_cpus,
|
||||||
@ -310,8 +436,18 @@ static struct plat_smp_ops cps_smp_ops = {
|
|||||||
.smp_finish = cps_smp_finish,
|
.smp_finish = cps_smp_finish,
|
||||||
.send_ipi_single = gic_send_ipi_single,
|
.send_ipi_single = gic_send_ipi_single,
|
||||||
.send_ipi_mask = gic_send_ipi_mask,
|
.send_ipi_mask = gic_send_ipi_mask,
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
.cpu_disable = cps_cpu_disable,
|
||||||
|
.cpu_die = cps_cpu_die,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bool mips_cps_smp_in_use(void)
|
||||||
|
{
|
||||||
|
extern struct plat_smp_ops *mp_ops;
|
||||||
|
return mp_ops == &cps_smp_ops;
|
||||||
|
}
|
||||||
|
|
||||||
int register_cps_smp_ops(void)
|
int register_cps_smp_ops(void)
|
||||||
{
|
{
|
||||||
if (!mips_cm_present()) {
|
if (!mips_cm_present()) {
|
||||||
|
@ -15,12 +15,14 @@
|
|||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
|
|
||||||
#include <asm/gic.h>
|
#include <asm/gic.h>
|
||||||
|
#include <asm/mips-cpc.h>
|
||||||
#include <asm/smp-ops.h>
|
#include <asm/smp-ops.h>
|
||||||
|
|
||||||
void gic_send_ipi_single(int cpu, unsigned int action)
|
void gic_send_ipi_single(int cpu, unsigned int action)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int intr;
|
unsigned int intr;
|
||||||
|
unsigned int core = cpu_data[cpu].core;
|
||||||
|
|
||||||
pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
|
pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
|
||||||
smp_processor_id(), __func__, cpu, action, read_c0_status());
|
smp_processor_id(), __func__, cpu, action, read_c0_status());
|
||||||
@ -41,6 +43,15 @@ void gic_send_ipi_single(int cpu, unsigned int action)
|
|||||||
}
|
}
|
||||||
|
|
||||||
gic_send_ipi(intr);
|
gic_send_ipi(intr);
|
||||||
|
|
||||||
|
if (mips_cpc_present() && (core != current_cpu_data.core)) {
|
||||||
|
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
|
||||||
|
mips_cpc_lock_other(core);
|
||||||
|
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
|
||||||
|
mips_cpc_unlock_other();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map);
|
|||||||
/* representing cpus for which sibling maps can be computed */
|
/* representing cpus for which sibling maps can be computed */
|
||||||
static cpumask_t cpu_sibling_setup_map;
|
static cpumask_t cpu_sibling_setup_map;
|
||||||
|
|
||||||
|
cpumask_t cpu_coherent_mask;
|
||||||
|
|
||||||
static inline void set_cpu_sibling_map(int cpu)
|
static inline void set_cpu_sibling_map(int cpu)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -114,6 +116,7 @@ asmlinkage void start_secondary(void)
|
|||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
cpu_data[cpu].udelay_val = loops_per_jiffy;
|
cpu_data[cpu].udelay_val = loops_per_jiffy;
|
||||||
|
|
||||||
|
cpu_set(cpu, cpu_coherent_mask);
|
||||||
notify_cpu_starting(cpu);
|
notify_cpu_starting(cpu);
|
||||||
|
|
||||||
set_cpu_online(cpu, true);
|
set_cpu_online(cpu, true);
|
||||||
@ -175,6 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
#ifndef CONFIG_HOTPLUG_CPU
|
#ifndef CONFIG_HOTPLUG_CPU
|
||||||
init_cpu_present(cpu_possible_mask);
|
init_cpu_present(cpu_possible_mask);
|
||||||
#endif
|
#endif
|
||||||
|
cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* preload SMP state for boot cpu */
|
/* preload SMP state for boot cpu */
|
||||||
@ -390,3 +394,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *))
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dump_send_ipi);
|
EXPORT_SYMBOL(dump_send_ipi);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
|
||||||
|
static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
|
||||||
|
|
||||||
|
void tick_broadcast(const struct cpumask *mask)
|
||||||
|
{
|
||||||
|
atomic_t *count;
|
||||||
|
struct call_single_data *csd;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_cpu(cpu, mask) {
|
||||||
|
count = &per_cpu(tick_broadcast_count, cpu);
|
||||||
|
csd = &per_cpu(tick_broadcast_csd, cpu);
|
||||||
|
|
||||||
|
if (atomic_inc_return(count) == 1)
|
||||||
|
smp_call_function_single_async(cpu, csd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tick_broadcast_callee(void *info)
|
||||||
|
{
|
||||||
|
int cpu = smp_processor_id();
|
||||||
|
tick_receive_broadcast();
|
||||||
|
atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init tick_broadcast_init(void)
|
||||||
|
{
|
||||||
|
struct call_single_data *csd;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||||
|
csd = &per_cpu(tick_broadcast_csd, cpu);
|
||||||
|
csd->func = tick_broadcast_callee;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_initcall(tick_broadcast_init);
|
||||||
|
|
||||||
|
#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/context_tracking.h>
|
#include <linux/context_tracking.h>
|
||||||
|
#include <linux/cpu_pm.h>
|
||||||
#include <linux/kexec.h>
|
#include <linux/kexec.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
@ -1837,18 +1838,16 @@ static int __init ulri_disable(char *s)
|
|||||||
}
|
}
|
||||||
__setup("noulri", ulri_disable);
|
__setup("noulri", ulri_disable);
|
||||||
|
|
||||||
void per_cpu_trap_init(bool is_boot_cpu)
|
/* configure STATUS register */
|
||||||
|
static void configure_status(void)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
|
||||||
unsigned int status_set = ST0_CU0;
|
|
||||||
unsigned int hwrena = cpu_hwrena_impl_bits;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable coprocessors and select 32-bit or 64-bit addressing
|
* Disable coprocessors and select 32-bit or 64-bit addressing
|
||||||
* and the 16/32 or 32/32 FPR register model. Reset the BEV
|
* and the 16/32 or 32/32 FPR register model. Reset the BEV
|
||||||
* flag that some firmware may have left set and the TS bit (for
|
* flag that some firmware may have left set and the TS bit (for
|
||||||
* IP27). Set XX for ISA IV code to work.
|
* IP27). Set XX for ISA IV code to work.
|
||||||
*/
|
*/
|
||||||
|
unsigned int status_set = ST0_CU0;
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
|
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
|
||||||
#endif
|
#endif
|
||||||
@ -1859,6 +1858,12 @@ void per_cpu_trap_init(bool is_boot_cpu)
|
|||||||
|
|
||||||
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
|
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
|
||||||
status_set);
|
status_set);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* configure HWRENA register */
|
||||||
|
static void configure_hwrena(void)
|
||||||
|
{
|
||||||
|
unsigned int hwrena = cpu_hwrena_impl_bits;
|
||||||
|
|
||||||
if (cpu_has_mips_r2)
|
if (cpu_has_mips_r2)
|
||||||
hwrena |= 0x0000000f;
|
hwrena |= 0x0000000f;
|
||||||
@ -1868,7 +1873,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
|
|||||||
|
|
||||||
if (hwrena)
|
if (hwrena)
|
||||||
write_c0_hwrena(hwrena);
|
write_c0_hwrena(hwrena);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void configure_exception_vector(void)
|
||||||
|
{
|
||||||
if (cpu_has_veic || cpu_has_vint) {
|
if (cpu_has_veic || cpu_has_vint) {
|
||||||
unsigned long sr = set_c0_status(ST0_BEV);
|
unsigned long sr = set_c0_status(ST0_BEV);
|
||||||
write_c0_ebase(ebase);
|
write_c0_ebase(ebase);
|
||||||
@ -1884,6 +1892,16 @@ void per_cpu_trap_init(bool is_boot_cpu)
|
|||||||
} else
|
} else
|
||||||
set_c0_cause(CAUSEF_IV);
|
set_c0_cause(CAUSEF_IV);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void per_cpu_trap_init(bool is_boot_cpu)
|
||||||
|
{
|
||||||
|
unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
configure_status();
|
||||||
|
configure_hwrena();
|
||||||
|
|
||||||
|
configure_exception_vector();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
|
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
|
||||||
@ -2122,3 +2140,32 @@ void __init trap_init(void)
|
|||||||
|
|
||||||
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
|
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
||||||
|
void *v)
|
||||||
|
{
|
||||||
|
switch (cmd) {
|
||||||
|
case CPU_PM_ENTER_FAILED:
|
||||||
|
case CPU_PM_EXIT:
|
||||||
|
configure_status();
|
||||||
|
configure_hwrena();
|
||||||
|
configure_exception_vector();
|
||||||
|
|
||||||
|
/* Restore register with CPU number for TLB handlers */
|
||||||
|
TLBMISS_HANDLER_RESTORE();
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block trap_pm_notifier_block = {
|
||||||
|
.notifier_call = trap_pm_notifier,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init trap_pm_init(void)
|
||||||
|
{
|
||||||
|
return cpu_pm_register_notifier(&trap_pm_notifier_block);
|
||||||
|
}
|
||||||
|
arch_initcall(trap_pm_init);
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
|
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
|
||||||
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
||||||
*/
|
*/
|
||||||
|
#include <linux/cpu_pm.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
@ -1643,3 +1644,26 @@ void r4k_cache_init(void)
|
|||||||
coherency_setup();
|
coherency_setup();
|
||||||
board_cache_error_setup = r4k_cache_error_setup;
|
board_cache_error_setup = r4k_cache_error_setup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
||||||
|
void *v)
|
||||||
|
{
|
||||||
|
switch (cmd) {
|
||||||
|
case CPU_PM_ENTER_FAILED:
|
||||||
|
case CPU_PM_EXIT:
|
||||||
|
coherency_setup();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block r4k_cache_pm_notifier_block = {
|
||||||
|
.notifier_call = r4k_cache_pm_notifier,
|
||||||
|
};
|
||||||
|
|
||||||
|
int __init r4k_cache_init_pm(void)
|
||||||
|
{
|
||||||
|
return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
|
||||||
|
}
|
||||||
|
arch_initcall(r4k_cache_init_pm);
|
||||||
|
@ -79,7 +79,7 @@ void setup_zero_pages(void)
|
|||||||
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
|
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *kmap_coherent(struct page *page, unsigned long addr)
|
static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
|
||||||
{
|
{
|
||||||
enum fixed_addresses idx;
|
enum fixed_addresses idx;
|
||||||
unsigned long vaddr, flags, entrylo;
|
unsigned long vaddr, flags, entrylo;
|
||||||
@ -93,7 +93,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
|
|||||||
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
|
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
|
||||||
idx += in_interrupt() ? FIX_N_COLOURS : 0;
|
idx += in_interrupt() ? FIX_N_COLOURS : 0;
|
||||||
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
|
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
|
||||||
pte = mk_pte(page, PAGE_KERNEL);
|
pte = mk_pte(page, prot);
|
||||||
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
|
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
|
||||||
entrylo = pte.pte_high;
|
entrylo = pte.pte_high;
|
||||||
#else
|
#else
|
||||||
@ -117,6 +117,16 @@ void *kmap_coherent(struct page *page, unsigned long addr)
|
|||||||
return (void*) vaddr;
|
return (void*) vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *kmap_coherent(struct page *page, unsigned long addr)
|
||||||
|
{
|
||||||
|
return __kmap_pgprot(page, addr, PAGE_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *kmap_noncoherent(struct page *page, unsigned long addr)
|
||||||
|
{
|
||||||
|
return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
|
||||||
|
}
|
||||||
|
|
||||||
void kunmap_coherent(void)
|
void kunmap_coherent(void)
|
||||||
{
|
{
|
||||||
unsigned int wired;
|
unsigned int wired;
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
* Carsten Langgaard, carstenl@mips.com
|
* Carsten Langgaard, carstenl@mips.com
|
||||||
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
|
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
#include <linux/cpu_pm.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
@ -399,7 +400,10 @@ static int __init set_ntlb(char *str)
|
|||||||
|
|
||||||
__setup("ntlb=", set_ntlb);
|
__setup("ntlb=", set_ntlb);
|
||||||
|
|
||||||
void tlb_init(void)
|
/*
|
||||||
|
* Configure TLB (for init or after a CPU has been powered off).
|
||||||
|
*/
|
||||||
|
static void r4k_tlb_configure(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* You should never change this register:
|
* You should never change this register:
|
||||||
@ -431,6 +435,11 @@ void tlb_init(void)
|
|||||||
local_flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
|
|
||||||
/* Did I tell you that ARC SUCKS? */
|
/* Did I tell you that ARC SUCKS? */
|
||||||
|
}
|
||||||
|
|
||||||
|
void tlb_init(void)
|
||||||
|
{
|
||||||
|
r4k_tlb_configure();
|
||||||
|
|
||||||
if (ntlb) {
|
if (ntlb) {
|
||||||
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
|
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
|
||||||
@ -444,3 +453,26 @@ void tlb_init(void)
|
|||||||
|
|
||||||
build_tlb_refill_handler();
|
build_tlb_refill_handler();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
||||||
|
void *v)
|
||||||
|
{
|
||||||
|
switch (cmd) {
|
||||||
|
case CPU_PM_ENTER_FAILED:
|
||||||
|
case CPU_PM_EXIT:
|
||||||
|
r4k_tlb_configure();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block r4k_tlb_pm_notifier_block = {
|
||||||
|
.notifier_call = r4k_tlb_pm_notifier,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init r4k_tlb_init_pm(void)
|
||||||
|
{
|
||||||
|
return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
|
||||||
|
}
|
||||||
|
arch_initcall(r4k_tlb_init_pm);
|
||||||
|
@ -99,10 +99,12 @@ static struct insn insn_table_MM[] = {
|
|||||||
{ insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
|
{ insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
|
||||||
{ insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
|
{ insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
|
||||||
{ insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
|
{ insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
|
||||||
|
{ insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS },
|
||||||
{ insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
|
{ insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
|
||||||
{ insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
|
{ insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
|
||||||
{ insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
|
{ insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
|
||||||
{ insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
|
{ insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
|
||||||
|
{ insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM },
|
||||||
{ insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
|
{ insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
|
||||||
{ insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
|
{ insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
|
||||||
{ insn_dins, 0, 0 },
|
{ insn_dins, 0, 0 },
|
||||||
|
@ -82,6 +82,7 @@ static struct insn insn_table[] = {
|
|||||||
{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
|
{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
|
||||||
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
|
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
|
||||||
{ insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
|
{ insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
|
||||||
|
{ insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
|
||||||
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
|
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
|
||||||
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
|
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
|
||||||
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||||
@ -106,13 +107,16 @@ static struct insn insn_table[] = {
|
|||||||
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
|
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
|
||||||
{ insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
|
{ insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
|
||||||
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||||
|
{ insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE },
|
||||||
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
|
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
|
||||||
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
|
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
|
||||||
{ insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
|
{ insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
|
||||||
{ insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
|
{ insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
|
||||||
{ insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
|
{ insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
|
||||||
|
{ insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM },
|
||||||
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
|
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
|
||||||
{ insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
|
{ insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
|
||||||
|
{ insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD },
|
||||||
{ insn_invalid, 0, 0 }
|
{ insn_invalid, 0, 0 }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -49,12 +49,12 @@ enum opcode {
|
|||||||
insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
|
insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
|
||||||
insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
|
insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
|
||||||
insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
|
insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
|
||||||
insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx,
|
insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_ld,
|
||||||
insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0,
|
insn_ldx, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0,
|
||||||
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
|
insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc,
|
||||||
insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
|
insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
|
||||||
insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor,
|
insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr,
|
||||||
insn_xori,
|
insn_wait, insn_xor, insn_xori, insn_yield,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct insn {
|
struct insn {
|
||||||
@ -200,6 +200,13 @@ Ip_u1u2(op) \
|
|||||||
} \
|
} \
|
||||||
UASM_EXPORT_SYMBOL(uasm_i##op);
|
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||||
|
|
||||||
|
#define I_u2u1(op) \
|
||||||
|
Ip_u1u2(op) \
|
||||||
|
{ \
|
||||||
|
build_insn(buf, insn##op, b, a); \
|
||||||
|
} \
|
||||||
|
UASM_EXPORT_SYMBOL(uasm_i##op);
|
||||||
|
|
||||||
#define I_u1s2(op) \
|
#define I_u1s2(op) \
|
||||||
Ip_u1s2(op) \
|
Ip_u1s2(op) \
|
||||||
{ \
|
{ \
|
||||||
@ -250,6 +257,7 @@ I_u2u1msbdu3(_ext)
|
|||||||
I_u2u1msbu3(_ins)
|
I_u2u1msbu3(_ins)
|
||||||
I_u1(_j)
|
I_u1(_j)
|
||||||
I_u1(_jal)
|
I_u1(_jal)
|
||||||
|
I_u2u1(_jalr)
|
||||||
I_u1(_jr)
|
I_u1(_jr)
|
||||||
I_u2s3u1(_ld)
|
I_u2s3u1(_ld)
|
||||||
I_u2s3u1(_ll)
|
I_u2s3u1(_ll)
|
||||||
@ -270,12 +278,15 @@ I_u2u1u3(_srl)
|
|||||||
I_u2u1u3(_rotr)
|
I_u2u1u3(_rotr)
|
||||||
I_u3u1u2(_subu)
|
I_u3u1u2(_subu)
|
||||||
I_u2s3u1(_sw)
|
I_u2s3u1(_sw)
|
||||||
|
I_u1(_sync)
|
||||||
I_0(_tlbp)
|
I_0(_tlbp)
|
||||||
I_0(_tlbr)
|
I_0(_tlbr)
|
||||||
I_0(_tlbwi)
|
I_0(_tlbwi)
|
||||||
I_0(_tlbwr)
|
I_0(_tlbwr)
|
||||||
|
I_u1(_wait);
|
||||||
I_u3u1u2(_xor)
|
I_u3u1u2(_xor)
|
||||||
I_u2u1u3(_xori)
|
I_u2u1u3(_xori)
|
||||||
|
I_u2u1(_yield)
|
||||||
I_u2u1msbu3(_dins);
|
I_u2u1msbu3(_dins);
|
||||||
I_u2u1msb32u3(_dinsm);
|
I_u2u1msb32u3(_dinsm);
|
||||||
I_u1(_syscall);
|
I_u1(_syscall);
|
||||||
@ -469,6 +480,14 @@ void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
|
|||||||
}
|
}
|
||||||
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
|
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
|
||||||
|
|
||||||
|
void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1,
|
||||||
|
unsigned int r2, int lid)
|
||||||
|
{
|
||||||
|
uasm_r_mips_pc16(r, *p, lid);
|
||||||
|
ISAFUNC(uasm_i_beq)(p, r1, r2, 0);
|
||||||
|
}
|
||||||
|
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq));
|
||||||
|
|
||||||
void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
|
||||||
int lid)
|
int lid)
|
||||||
{
|
{
|
||||||
|
@ -35,6 +35,11 @@ depends on ARM
|
|||||||
source "drivers/cpuidle/Kconfig.arm"
|
source "drivers/cpuidle/Kconfig.arm"
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
menu "MIPS CPU Idle Drivers"
|
||||||
|
depends on MIPS
|
||||||
|
source "drivers/cpuidle/Kconfig.mips"
|
||||||
|
endmenu
|
||||||
|
|
||||||
menu "POWERPC CPU Idle Drivers"
|
menu "POWERPC CPU Idle Drivers"
|
||||||
depends on PPC
|
depends on PPC
|
||||||
source "drivers/cpuidle/Kconfig.powerpc"
|
source "drivers/cpuidle/Kconfig.powerpc"
|
||||||
|
17
drivers/cpuidle/Kconfig.mips
Normal file
17
drivers/cpuidle/Kconfig.mips
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#
|
||||||
|
# MIPS CPU Idle Drivers
|
||||||
|
#
|
||||||
|
config MIPS_CPS_CPUIDLE
|
||||||
|
bool "CPU Idle driver for MIPS CPS platforms"
|
||||||
|
depends on CPU_IDLE
|
||||||
|
depends on SYS_SUPPORTS_MIPS_CPS
|
||||||
|
select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
|
||||||
|
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||||
|
select MIPS_CPS_PM
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Select this option to enable processor idle state management
|
||||||
|
through cpuidle for systems built around the MIPS Coherent
|
||||||
|
Processing System (CPS) architecture. In order to make use of
|
||||||
|
the deepest idle states you will need to ensure that you are
|
||||||
|
also using the CONFIG_MIPS_CPS SMP implementation.
|
@ -14,6 +14,10 @@ obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
|
|||||||
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
|
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
|
||||||
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
|
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# MIPS drivers
|
||||||
|
obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# POWERPC drivers
|
# POWERPC drivers
|
||||||
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
|
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
|
||||||
|
186
drivers/cpuidle/cpuidle-cps.c
Normal file
186
drivers/cpuidle/cpuidle-cps.c
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Imagination Technologies
|
||||||
|
* Author: Paul Burton <paul.burton@imgtec.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License as published by the
|
||||||
|
* Free Software Foundation; either version 2 of the License, or (at your
|
||||||
|
* option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/cpu_pm.h>
|
||||||
|
#include <linux/cpuidle.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
|
||||||
|
#include <asm/idle.h>
|
||||||
|
#include <asm/pm-cps.h>
|
||||||
|
|
||||||
|
/* Enumeration of the various idle states this driver may enter */
|
||||||
|
enum cps_idle_state {
|
||||||
|
STATE_WAIT = 0, /* MIPS wait instruction, coherent */
|
||||||
|
STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */
|
||||||
|
STATE_CLOCK_GATED, /* Core clock gated */
|
||||||
|
STATE_POWER_GATED, /* Core power gated */
|
||||||
|
STATE_COUNT
|
||||||
|
};
|
||||||
|
|
||||||
|
static int cps_nc_enter(struct cpuidle_device *dev,
|
||||||
|
struct cpuidle_driver *drv, int index)
|
||||||
|
{
|
||||||
|
enum cps_pm_state pm_state;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* At least one core must remain powered up & clocked in order for the
|
||||||
|
* system to have any hope of functioning.
|
||||||
|
*
|
||||||
|
* TODO: don't treat core 0 specially, just prevent the final core
|
||||||
|
* TODO: remap interrupt affinity temporarily
|
||||||
|
*/
|
||||||
|
if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT))
|
||||||
|
index = STATE_NC_WAIT;
|
||||||
|
|
||||||
|
/* Select the appropriate cps_pm_state */
|
||||||
|
switch (index) {
|
||||||
|
case STATE_NC_WAIT:
|
||||||
|
pm_state = CPS_PM_NC_WAIT;
|
||||||
|
break;
|
||||||
|
case STATE_CLOCK_GATED:
|
||||||
|
pm_state = CPS_PM_CLOCK_GATED;
|
||||||
|
break;
|
||||||
|
case STATE_POWER_GATED:
|
||||||
|
pm_state = CPS_PM_POWER_GATED;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Notify listeners the CPU is about to power down */
|
||||||
|
if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter())
|
||||||
|
return -EINTR;
|
||||||
|
|
||||||
|
/* Enter that state */
|
||||||
|
err = cps_pm_enter_state(pm_state);
|
||||||
|
|
||||||
|
/* Notify listeners the CPU is back up */
|
||||||
|
if (pm_state == CPS_PM_POWER_GATED)
|
||||||
|
cpu_pm_exit();
|
||||||
|
|
||||||
|
return err ?: index;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct cpuidle_driver cps_driver = {
|
||||||
|
.name = "cpc_cpuidle",
|
||||||
|
.owner = THIS_MODULE,
|
||||||
|
.states = {
|
||||||
|
[STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE,
|
||||||
|
[STATE_NC_WAIT] = {
|
||||||
|
.enter = cps_nc_enter,
|
||||||
|
.exit_latency = 200,
|
||||||
|
.target_residency = 450,
|
||||||
|
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||||
|
.name = "nc-wait",
|
||||||
|
.desc = "non-coherent MIPS wait",
|
||||||
|
},
|
||||||
|
[STATE_CLOCK_GATED] = {
|
||||||
|
.enter = cps_nc_enter,
|
||||||
|
.exit_latency = 300,
|
||||||
|
.target_residency = 700,
|
||||||
|
.flags = CPUIDLE_FLAG_TIME_VALID |
|
||||||
|
CPUIDLE_FLAG_TIMER_STOP,
|
||||||
|
.name = "clock-gated",
|
||||||
|
.desc = "core clock gated",
|
||||||
|
},
|
||||||
|
[STATE_POWER_GATED] = {
|
||||||
|
.enter = cps_nc_enter,
|
||||||
|
.exit_latency = 600,
|
||||||
|
.target_residency = 1000,
|
||||||
|
.flags = CPUIDLE_FLAG_TIME_VALID |
|
||||||
|
CPUIDLE_FLAG_TIMER_STOP,
|
||||||
|
.name = "power-gated",
|
||||||
|
.desc = "core power gated",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.state_count = STATE_COUNT,
|
||||||
|
.safe_state_index = 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void __init cps_cpuidle_unregister(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
struct cpuidle_device *device;
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
device = &per_cpu(cpuidle_dev, cpu);
|
||||||
|
cpuidle_unregister_device(device);
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuidle_unregister_driver(&cps_driver);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init cps_cpuidle_init(void)
|
||||||
|
{
|
||||||
|
int err, cpu, core, i;
|
||||||
|
struct cpuidle_device *device;
|
||||||
|
|
||||||
|
/* Detect supported states */
|
||||||
|
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
|
||||||
|
cps_driver.state_count = STATE_CLOCK_GATED + 1;
|
||||||
|
if (!cps_pm_support_state(CPS_PM_CLOCK_GATED))
|
||||||
|
cps_driver.state_count = STATE_NC_WAIT + 1;
|
||||||
|
if (!cps_pm_support_state(CPS_PM_NC_WAIT))
|
||||||
|
cps_driver.state_count = STATE_WAIT + 1;
|
||||||
|
|
||||||
|
/* Inform the user if some states are unavailable */
|
||||||
|
if (cps_driver.state_count < STATE_COUNT) {
|
||||||
|
pr_info("cpuidle-cps: limited to ");
|
||||||
|
switch (cps_driver.state_count - 1) {
|
||||||
|
case STATE_WAIT:
|
||||||
|
pr_cont("coherent wait\n");
|
||||||
|
break;
|
||||||
|
case STATE_NC_WAIT:
|
||||||
|
pr_cont("non-coherent wait\n");
|
||||||
|
break;
|
||||||
|
case STATE_CLOCK_GATED:
|
||||||
|
pr_cont("clock gating\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the coupled flag on the appropriate states if this system
|
||||||
|
* requires it.
|
||||||
|
*/
|
||||||
|
if (coupled_coherence)
|
||||||
|
for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++)
|
||||||
|
cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED;
|
||||||
|
|
||||||
|
err = cpuidle_register_driver(&cps_driver);
|
||||||
|
if (err) {
|
||||||
|
pr_err("Failed to register CPS cpuidle driver\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
core = cpu_data[cpu].core;
|
||||||
|
device = &per_cpu(cpuidle_dev, cpu);
|
||||||
|
device->cpu = cpu;
|
||||||
|
#ifdef CONFIG_MIPS_MT
|
||||||
|
cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
err = cpuidle_register_device(device);
|
||||||
|
if (err) {
|
||||||
|
pr_err("Failed to register CPU%d cpuidle device\n",
|
||||||
|
cpu);
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
err_out:
|
||||||
|
cps_cpuidle_unregister();
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
device_initcall(cps_cpuidle_init);
|
@ -84,6 +84,7 @@ struct cpuidle_device {
|
|||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
||||||
|
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuidle_get_last_residency - retrieves the last state's residency time
|
* cpuidle_get_last_residency - retrieves the last state's residency time
|
||||||
|
Loading…
x
Reference in New Issue
Block a user