Commit b5f1f372 authored by Philippe Gerum's avatar Philippe Gerum
Browse files

powerpc/ipipe: add support for Book3E

parent cce4d9da
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#ifndef _ASM_POWERPC_EXCEPTION_64E_H #ifndef _ASM_POWERPC_EXCEPTION_64E_H
#define _ASM_POWERPC_EXCEPTION_64E_H #define _ASM_POWERPC_EXCEPTION_64E_H
#include <asm/irq_softstate.h>
/* /*
* SPRGs usage an other considerations... * SPRGs usage an other considerations...
* *
......
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
* implementations as possible. * implementations as possible.
*/ */
#include <asm/irq_softstate.h>
#define EX_R9 0 #define EX_R9 0
#define EX_R10 8 #define EX_R10 8
#define EX_R11 16 #define EX_R11 16
...@@ -332,20 +334,6 @@ do_kvm_##n: \ ...@@ -332,20 +334,6 @@ do_kvm_##n: \
GET_CTR(r10, area); \ GET_CTR(r10, area); \
std r10,_CTR(r1); std r10,_CTR(r1);
#ifdef CONFIG_IPIPE
/* Do NOT alter Rc(eq) in this code; our caller uses it. */
#define COPY_SOFTISTATE(mreg) \
ld mreg,PACAROOTPCPU(r13); \
ld mreg,0(mreg); \
nor mreg,mreg,mreg; \
clrldi mreg,mreg,63; \
std mreg,SOFTE(r1)
#else /* !CONFIG_IPIPE */
#define COPY_SOFTISTATE(mreg) \
lbz mreg,PACASOFTIRQEN(r13); \
std mreg,SOFTE(r1)
#endif /* !CONFIG_IPIPE */
#define EXCEPTION_PROLOG_COMMON_3(n) \ #define EXCEPTION_PROLOG_COMMON_3(n) \
std r2,GPR2(r1); /* save r2 in stackframe */ \ std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
...@@ -353,7 +341,7 @@ do_kvm_##n: \ ...@@ -353,7 +341,7 @@ do_kvm_##n: \
mflr r9; /* Get LR, later save to stack */ \ mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r9,_LINK(r1); \ std r9,_LINK(r1); \
COPY_SOFTISTATE(r10); \ EXC_SAVE_SOFTISTATE(r10); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \ mfspr r11,SPRN_XER; /* save XER in stackframe */ \
std r11,_XER(r1); \ std r11,_XER(r1); \
li r9,(n)+1; \ li r9,(n)+1; \
...@@ -532,46 +520,6 @@ label##_relon_hv: \ ...@@ -532,46 +520,6 @@ label##_relon_hv: \
* runlatch, etc... * runlatch, etc...
*/ */
.macro HARD_ENABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
ld \tmp,PACAKMSR(r13)
ori \tmp,\tmp,MSR_EE
mtmsrd \tmp,1
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
ld \tmp,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd \tmp,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS_RI
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
/*
* For performance reasons we clear RI the same time that we
* clear EE. We only need to clear RI just before we restore r13
* below, but batching it with EE saves us one expensive mtmsrd call.
* We have to be careful to restore RI if we branch anywhere from
* here (eg syscall_exit_work).
*
* CAUTION: using r9-r11 the way they are is assumed by the
* caller.
*/
ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
li r9,MSR_RI
andc r11,r10,r9
mtmsrd r11,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
/* /*
* This addition reconciles our actual IRQ state with the various software * This addition reconciles our actual IRQ state with the various software
* flags that track it. This may call C code. * flags that track it. This may call C code.
...@@ -585,7 +533,6 @@ label##_relon_hv: \ ...@@ -585,7 +533,6 @@ label##_relon_hv: \
mfmsr r11; \ mfmsr r11; \
ori r11,r11,MSR_EE; \ ori r11,r11,MSR_EE; \
mtmsrd r11,1; mtmsrd r11,1;
#define RECONCILE_IRQ_STATE(__rA, __rB) HARD_DISABLE_INTS __rA
#else /* !CONFIG_IPIPE */ #else /* !CONFIG_IPIPE */
#define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11) #define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11)
#endif /* !CONFIG_IPIPE */ #endif /* !CONFIG_IPIPE */
......
...@@ -108,7 +108,9 @@ struct ipipe_ipi_struct { ...@@ -108,7 +108,9 @@ struct ipipe_ipi_struct {
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd); void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
void __ipipe_register_ipi(unsigned int irq); void __ipipe_register_mux_ipi(unsigned int irq);
void __ipipe_finish_ipi_demux(unsigned int irq);
#else #else
#define __ipipe_hook_critical_ipi(ipd) do { } while(0) #define __ipipe_hook_critical_ipi(ipd) do { } while(0)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
* The first virtual interrupt is reserved for the timer (see * The first virtual interrupt is reserved for the timer (see
* __ipipe_early_core_setup). * __ipipe_early_core_setup).
*/ */
#define IPIPE_TIMER_VIRQ IPIPE_VIRQ_BASE #define IPIPE_TIMER_VIRQ (IPIPE_VIRQ_BASE + 0)
#define IPIPE_DOORBELL_VIRQ (IPIPE_VIRQ_BASE + 1)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
...@@ -44,21 +45,19 @@ ...@@ -44,21 +45,19 @@
* implemented by piggybacking the debugger break IPI 0x3, * implemented by piggybacking the debugger break IPI 0x3,
* which is demultiplexed in __ipipe_ipi_demux(). * which is demultiplexed in __ipipe_ipi_demux().
*/ */
#define IPIPE_CRITICAL_IPI (IPIPE_VIRQ_BASE + 2)
#define IPIPE_HRTIMER_IPI (IPIPE_VIRQ_BASE + 3)
#define IPIPE_RESCHEDULE_IPI (IPIPE_VIRQ_BASE + 4)
#define IPIPE_BASE_IPI_OFFSET IPIPE_CRITICAL_IPI
/* these are bit numbers in practice */ /* these are bit numbers in practice */
#define IPIPE_MSG_CRITICAL_IPI 0 #define IPIPE_MSG_CRITICAL_IPI 0
#define IPIPE_MSG_HRTIMER_IPI (IPIPE_MSG_CRITICAL_IPI + 1) #define IPIPE_MSG_HRTIMER_IPI (IPIPE_MSG_CRITICAL_IPI + 1)
#define IPIPE_MSG_RESCHEDULE_IPI (IPIPE_MSG_CRITICAL_IPI + 2) #define IPIPE_MSG_RESCHEDULE_IPI (IPIPE_MSG_CRITICAL_IPI + 2)
#define IPIPE_MSG_IPI_MASK ((1UL << IPIPE_MSG_CRITICAL_IPI) | \ #define IPIPE_MSG_IPI_MASK ((1UL << IPIPE_MSG_CRITICAL_IPI) | \
(1UL << IPIPE_MSG_HRTIMER_IPI) | \ (1UL << IPIPE_MSG_HRTIMER_IPI) | \
(1UL << IPIPE_MSG_RESCHEDULE_IPI)) (1UL << IPIPE_MSG_RESCHEDULE_IPI))
#define IPIPE_CRITICAL_IPI (IPIPE_VIRQ_BASE + 1)
#define IPIPE_HRTIMER_IPI (IPIPE_CRITICAL_IPI + 1)
#define IPIPE_RESCHEDULE_IPI (IPIPE_CRITICAL_IPI + 2)
#define IPIPE_BASE_IPI_OFFSET IPIPE_CRITICAL_IPI
#define ipipe_processor_id() raw_smp_processor_id() #define ipipe_processor_id() raw_smp_processor_id()
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
......
...@@ -74,6 +74,9 @@ static inline void hard_local_irq_enable_notrace(void) ...@@ -74,6 +74,9 @@ static inline void hard_local_irq_enable_notrace(void)
{ {
__asm__ __volatile__("wrteei 1": : :"memory"); __asm__ __volatile__("wrteei 1": : :"memory");
} }
#define hard_local_irq_restore_notrace(x) mtmsr(x)
#else /* !CONFIG_PPC_BOOK3E */ #else /* !CONFIG_PPC_BOOK3E */
static inline void hard_local_irq_disable_notrace(void) static inline void hard_local_irq_disable_notrace(void)
{ {
...@@ -84,6 +87,9 @@ static inline void hard_local_irq_enable_notrace(void) ...@@ -84,6 +87,9 @@ static inline void hard_local_irq_enable_notrace(void)
{ {
__mtmsrd(mfmsr() | MSR_EE, 1); __mtmsrd(mfmsr() | MSR_EE, 1);
} }
#define hard_local_irq_restore_notrace(x) __mtmsrd(x, 1)
#endif /* !CONFIG_PPC_BOOK3E */ #endif /* !CONFIG_PPC_BOOK3E */
static inline unsigned long hard_local_irq_save_notrace(void) static inline unsigned long hard_local_irq_save_notrace(void)
...@@ -93,8 +99,6 @@ static inline unsigned long hard_local_irq_save_notrace(void) ...@@ -93,8 +99,6 @@ static inline unsigned long hard_local_irq_save_notrace(void)
return msr; return msr;
} }
#define hard_local_irq_restore_notrace(x) __mtmsrd(x, 1)
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#ifdef CONFIG_IPIPE #ifdef CONFIG_IPIPE
......
#ifndef _ASM_POWERPC_IRQ_SOFTSTATE_H
#define _ASM_POWERPC_IRQ_SOFTSTATE_H
#ifdef __ASSEMBLY__
.macro HARD_ENABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
ld \tmp,PACAKMSR(r13)
ori \tmp,\tmp,MSR_EE
mtmsrd \tmp,1
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS tmp=r10
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
ld \tmp,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd \tmp,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
.macro HARD_DISABLE_INTS_RI
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
/*
* For performance reasons we clear RI the same time that we
* clear EE. We only need to clear RI just before we restore r13
* below, but batching it with EE saves us one expensive mtmsrd call.
* We have to be careful to restore RI if we branch anywhere from
* here (eg syscall_exit_work).
*
* CAUTION: using r9-r11 the way they are is assumed by the
* caller.
*/
ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
li r9,MSR_RI
andc r11,r10,r9
mtmsrd r11,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
.endm
#ifdef CONFIG_IPIPE
/* Do NOT alter Rc(eq) in this code; our caller uses it. */
#define __COPY_SOFTISTATE(mreg) \
ld mreg,PACAROOTPCPU(r13); \
ld mreg,0(mreg); \
nor mreg,mreg,mreg; \
clrldi mreg,mreg,63; \
/* Do NOT alter Rc(eq) in this code; our caller uses it. */
#define COPY_SOFTISTATE(mreg) \
__COPY_SOFTISTATE(mreg); \
std mreg,SOFTE(r1)
#ifdef CONFIG_PPC_BOOK3E
#define SPECIAL_SAVE_SOFTISTATE(mreg) \
__COPY_SOFTISTATE(mreg); \
SPECIAL_EXC_STORE(mreg, SOFTE)
#endif
#define EXC_SAVE_SOFTISTATE(mreg) \
COPY_SOFTISTATE(mreg)
#define RECONCILE_IRQ_STATE(__rA, __rB) HARD_DISABLE_INTS __rA
#else /* !CONFIG_IPIPE */
#define COPY_SOFTISTATE(mreg) \
lbz mreg,PACASOFTIRQEN(r13); \
std mreg,SOFTE(r1)
#ifdef CONFIG_PPC_BOOK3E
#define SPECIAL_SAVE_SOFTISTATE(mreg) \
lbz mreg,PACASOFTIRQEN(r13); \
SPECIAL_EXC_STORE(mreg, SOFTE)
#endif
#define EXC_SAVE_SOFTISTATE(mreg) \
COPY_SOFTISTATE(mreg)
/*
* This is used by assembly code to soft-disable interrupts first and
* reconcile irq state.
*
* NB: This may call C code, so the caller must be prepared for volatiles to
* be clobbered.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
cmpwi cr0,__rA,0; \
li __rA,0; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
beq 44f; \
stb __rA,PACASOFTIRQEN(r13); \
TRACE_DISABLE_INTS; \
44:
#else
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACAIRQHAPPENED(r13); \
li __rB,0; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \
stb __rB,PACASOFTIRQEN(r13); \
stb __rA,PACAIRQHAPPENED(r13)
#endif /* !CONFIG_TRACE_IRQFLAGS */
#endif /* !CONFIG_IPIPE */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_IRQ_SOFTSTATE_H */
...@@ -38,40 +38,10 @@ ...@@ -38,40 +38,10 @@
#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
/*
* This is used by assembly code to soft-disable interrupts first and
* reconcile irq state.
*
* NB: This may call C code, so the caller must be prepared for volatiles to
* be clobbered.
*/
#ifndef CONFIG_IPIPE
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
cmpwi cr0,__rA,0; \
li __rA,0; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
beq 44f; \
stb __rA,PACASOFTIRQEN(r13); \
TRACE_DISABLE_INTS; \
44:
#endif /* !CONFIG_IPIPE */
#else #else
#define TRACE_ENABLE_INTS #define TRACE_ENABLE_INTS
#define TRACE_DISABLE_INTS #define TRACE_DISABLE_INTS
#ifndef CONFIG_IPIPE
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACAIRQHAPPENED(r13); \
li __rB,0; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \
stb __rB,PACASOFTIRQEN(r13); \
stb __rA,PACAIRQHAPPENED(r13)
#endif /* !CONFIG_IPIPE */
#endif #endif
#endif #endif
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define _ASM_POWERPC_QE_IC_H #define _ASM_POWERPC_QE_IC_H
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/ipipe.h>
struct device_node; struct device_node;
struct qe_ic; struct qe_ic;
......
...@@ -65,7 +65,9 @@ ...@@ -65,7 +65,9 @@
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
special_reg_save: special_reg_save:
#ifndef CONFIG_IPIPE
lbz r9,PACAIRQHAPPENED(r13) lbz r9,PACAIRQHAPPENED(r13)
#endif
RECONCILE_IRQ_STATE(r3,r4) RECONCILE_IRQ_STATE(r3,r4)
/* /*
...@@ -132,15 +134,15 @@ BEGIN_FTR_SECTION ...@@ -132,15 +134,15 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS5,r10 mtspr SPRN_MAS5,r10
mtspr SPRN_MAS8,r10 mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#ifndef CONFIG_IPIPE
SPECIAL_EXC_STORE(r9,IRQHAPPENED) SPECIAL_EXC_STORE(r9,IRQHAPPENED)
#endif
mfspr r10,SPRN_DEAR mfspr r10,SPRN_DEAR
SPECIAL_EXC_STORE(r10,DEAR) SPECIAL_EXC_STORE(r10,DEAR)
mfspr r10,SPRN_ESR mfspr r10,SPRN_ESR
SPECIAL_EXC_STORE(r10,ESR) SPECIAL_EXC_STORE(r10,ESR)
lbz r10,PACASOFTIRQEN(r13) SPECIAL_SAVE_SOFTISTATE(r10)
SPECIAL_EXC_STORE(r10,SOFTE)
ld r10,_NIP(r1) ld r10,_NIP(r1)
SPECIAL_EXC_STORE(r10,CSRR0) SPECIAL_EXC_STORE(r10,CSRR0)
ld r10,_MSR(r1) ld r10,_MSR(r1)
...@@ -206,8 +208,15 @@ BEGIN_FTR_SECTION ...@@ -206,8 +208,15 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS8,r10 mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
lbz r6,PACASOFTIRQEN(r13) #ifdef CONFIG_IPIPE
ld r6,PACAROOTPCPU(r13)
cmpwi cr0,r6,0
bne 1f
TRACE_ENABLE_INTS
1:
#else
ld r5,SOFTE(r1) ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
/* Interrupts had better not already be enabled... */ /* Interrupts had better not already be enabled... */
twnei r6,0 twnei r6,0
...@@ -226,6 +235,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) ...@@ -226,6 +235,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
*/ */
SPECIAL_EXC_LOAD(r10,IRQHAPPENED) SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
stb r10,PACAIRQHAPPENED(r13) stb r10,PACAIRQHAPPENED(r13)
#endif
SPECIAL_EXC_LOAD(r10,DEAR) SPECIAL_EXC_LOAD(r10,DEAR)
mtspr SPRN_DEAR,r10 mtspr SPRN_DEAR,r10
...@@ -350,10 +360,16 @@ ret_from_mc_except: ...@@ -350,10 +360,16 @@ ret_from_mc_except:
#define PROLOG_ADDITION_NONE_DBG(n) #define PROLOG_ADDITION_NONE_DBG(n)
#define PROLOG_ADDITION_NONE_MC(n) #define PROLOG_ADDITION_NONE_MC(n)
#ifdef CONFIG_IPIPE
#define PROLOG_ADDITION_MASKABLE_GEN(n)
#define MASKABLE_EXCEPTION_EXIT b __ipipe_ret_from_except_lite
#else
#define PROLOG_ADDITION_MASKABLE_GEN(n) \ #define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
cmpwi cr0,r10,0; /* yes -> go out of line */ \ cmpwi cr0,r10,0; /* yes -> go out of line */ \
beq masked_interrupt_book3e_##n beq masked_interrupt_book3e_##n
#define MASKABLE_EXCEPTION_EXIT b ret_from_except_lite
#endif
#define PROLOG_ADDITION_2REGS_GEN(n) \ #define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \ std r14,PACA_EXGEN+EX_R14(r13); \
...@@ -397,8 +413,8 @@ exc_##n##_common: \ ...@@ -397,8 +413,8 @@ exc_##n##_common: \
mfspr r8,SPRN_XER; /* save XER in stackframe */ \ mfspr r8,SPRN_XER; /* save XER in stackframe */ \
ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \
ld r12,exception_marker@toc(r2); \ ld r12,exception_marker@toc(r2); \
EXC_SAVE_SOFTISTATE(r11); \
li r0,0; \ li r0,0; \
std r3,GPR10(r1); /* save r10 to stackframe */ \ std r3,GPR10(r1); /* save r10 to stackframe */ \
std r4,GPR11(r1); /* save r11 to stackframe */ \ std r4,GPR11(r1); /* save r11 to stackframe */ \
...@@ -410,7 +426,6 @@ exc_##n##_common: \ ...@@ -410,7 +426,6 @@ exc_##n##_common: \
std r9,0(r1); /* store stack frame back link */ \ std r9,0(r1); /* store stack frame back link */ \
std r10,_CCR(r1); /* store orig CR in stackframe */ \ std r10,_CCR(r1); /* store orig CR in stackframe */ \
std r9,GPR1(r1); /* store stack frame back link */ \ std r9,GPR1(r1); /* store stack frame back link */ \
std r11,SOFTE(r1); /* and save it to stackframe */ \
std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
std r3,_TRAP(r1); /* set trap number */ \ std r3,_TRAP(r1); /* set trap number */ \
std r0,RESULT(r1); /* clear regs->result */ std r0,RESULT(r1); /* clear regs->result */
...@@ -499,7 +514,7 @@ exc_##n##_bad_stack: \ ...@@ -499,7 +514,7 @@ exc_##n##_bad_stack: \
CHECK_NAPPING(); \ CHECK_NAPPING(); \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \ bl hdlr; \
b ret_from_except_lite; MASKABLE_EXCEPTION_EXIT;
/* This value is used to mark exception frames on the stack. */ /* This value is used to mark exception frames on the stack. */
.section ".toc","aw" .section ".toc","aw"
...@@ -545,6 +560,16 @@ interrupt_base_book3e: /* fake trap */ ...@@ -545,6 +560,16 @@ interrupt_base_book3e: /* fake trap */
.globl interrupt_end_book3e .globl interrupt_end_book3e
interrupt_end_book3e: interrupt_end_book3e:
#ifdef CONFIG_IPIPE
#define BOOKE_EXTIRQ_HANDLER __ipipe_grab_irq
#define BOOKE_TIMER_HANDLER __ipipe_grab_timer
#define BOOKE_DBELL_HANDLER __ipipe_grab_doorbell
#else
#define BOOKE_EXTIRQ_HANDLER do_IRQ
#define BOOKE_TIMER_HANDLER timer_interrupt
#define BOOKE_DBELL_HANDLER doorbell_exception
#endif
/* Critical Input Interrupt */ /* Critical Input Interrupt */
START_EXCEPTION(critical_input); START_EXCEPTION(critical_input);
CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
...@@ -591,8 +616,8 @@ interrupt_end_book3e: ...@@ -591,8 +616,8 @@ interrupt_end_book3e:
/* External Input Interrupt */ /* External Input Interrupt */
MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
external_input, do_IRQ, ACK_NONE) external_input, BOOKE_EXTIRQ_HANDLER, ACK_NONE)
/* Alignment */ /* Alignment */
START_EXCEPTION(alignment); START_EXCEPTION(alignment);
NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
...@@ -676,7 +701,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -676,7 +701,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Decrementer Interrupt */ /* Decrementer Interrupt */
MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
decrementer, timer_interrupt, ACK_DEC) decrementer, BOOKE_TIMER_HANDLER, ACK_DEC)
/* Fixed Interval Timer Interrupt */ /* Fixed Interval Timer Interrupt */
MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
...@@ -855,7 +880,7 @@ kernel_dbg_exc: ...@@ -855,7 +880,7 @@ kernel_dbg_exc:
/* Doorbell interrupt */ /* Doorbell interrupt */
MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
doorbell, doorbell_exception, ACK_NONE) doorbell, BOOKE_DBELL_HANDLER, ACK_NONE)
/* Doorbell critical Interrupt */ /* Doorbell critical Interrupt */
START_EXCEPTION(doorbell_crit); START_EXCEPTION(doorbell_crit);
...@@ -928,6 +953,7 @@ kernel_dbg_exc: ...@@ -928,6 +953,7 @@ kernel_dbg_exc:
bl .unknown_exception