Commit 747003a8 authored by Philippe Gerum's avatar Philippe Gerum

ipipe: port to 3.3.x/blackfin

parent 4dc1c173
......@@ -72,6 +72,8 @@ source "kernel/Kconfig.preempt"
source "kernel/Kconfig.freezer"
source "kernel/ipipe/Kconfig"
menu "Blackfin Processor Options"
comment "Processor and Board Settings"
......
......@@ -28,7 +28,7 @@
#include <linux/list.h>
#include <linux/threads.h>
#include <linux/irq.h>
#include <linux/ipipe_percpu.h>
#include <linux/ipipe_domain.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include <asm/bitops.h>
......@@ -36,10 +36,7 @@
#include <asm/traps.h>
#include <asm/bitsperlong.h>
#define IPIPE_ARCH_STRING "1.16-01"
#define IPIPE_MAJOR_NUMBER 1
#define IPIPE_MINOR_NUMBER 16
#define IPIPE_PATCH_NUMBER 1
#define IPIPE_CORE_RELEASE 1
#ifdef CONFIG_SMP
#error "I-pipe/blackfin: SMP not implemented"
......@@ -47,28 +44,9 @@
#define ipipe_processor_id() 0
#endif /* CONFIG_SMP */
#define prepare_arch_switch(next) \
do { \
ipipe_schedule_notify(current, next); \
hard_local_irq_disable(); \
} while (0)
#define task_hijacked(p) \
({ \
int __x__ = __ipipe_root_domain_p; \
if (__x__) \
hard_local_irq_enable(); \
!__x__; \
})
struct ipipe_domain;
struct ipipe_sysinfo {
int sys_nr_cpus; /* Number of CPUs on board */
int sys_hrtimer_irq; /* hrtimer device IRQ */
u64 sys_hrtimer_freq; /* hrtimer device frequency */
u64 sys_hrclock_freq; /* hrclock device frequency */
u64 sys_cpu_freq; /* CPU frequency (Hz) */
struct ipipe_arch_sysinfo {
};
#define ipipe_read_tsc(t) \
......@@ -93,48 +71,24 @@ struct ipipe_sysinfo {
/* Private interface -- Internal use only */
#define __ipipe_check_platform() do { } while (0)
#define __ipipe_init_platform() do { } while (0)
extern atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
extern unsigned long __ipipe_irq_lvmask;
extern struct ipipe_domain ipipe_root;
/* enable/disable_irqdesc _must_ be used in pairs. */
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd,
unsigned irq);
unsigned int irq);
void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
unsigned irq);
#define __ipipe_enable_irq(irq) \
do { \
struct irq_desc *desc = irq_to_desc(irq); \
struct irq_chip *chip = get_irq_desc_chip(desc); \
chip->irq_unmask(&desc->irq_data); \
} while (0)
#define __ipipe_disable_irq(irq) \
do { \
struct irq_desc *desc = irq_to_desc(irq); \
struct irq_chip *chip = get_irq_desc_chip(desc); \
chip->irq_mask(&desc->irq_data); \
} while (0)
static inline int __ipipe_check_tickdev(const char *devname)
{
return 1;
}
unsigned int irq);
void __ipipe_enable_pipeline(void);
#define __ipipe_hook_critical_ipi(ipd) do { } while (0)
void ___ipipe_sync_pipeline(void);
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
int __ipipe_get_irq_priority(unsigned int irq);
......@@ -143,37 +97,12 @@ void __ipipe_serial_debug(const char *fmt, ...);
asmlinkage void __ipipe_call_irqtail(unsigned long addr);
DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
extern unsigned long __ipipe_core_clock;
extern unsigned long __ipipe_freq_scale;
extern unsigned long __ipipe_irq_tail_hook;
static inline unsigned long __ipipe_ffnz(unsigned long ul)
{
return ffs(ul) - 1;
}
#define __ipipe_do_root_xirq(ipd, irq) \
((ipd)->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)))
#define __ipipe_run_irqtail(irq) /* Must be a macro */ \
do { \
unsigned long __pending; \
CSYNC(); \
__pending = bfin_read_IPEND(); \
if (__pending & 0x8000) { \
__pending &= ~0x8010; \
if (__pending && (__pending & (__pending - 1)) == 0) \
__ipipe_call_irqtail(__ipipe_irq_tail_hook); \
} \
} while (0)
#define __ipipe_syscall_watched_p(p, sc) \
(ipipe_notifier_enabled_p(p) || (unsigned long)sc >= NR_syscalls)
#ifdef CONFIG_BF561
#define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val)
#define bfin_write_TIMER_ENABLE(val) bfin_write_TMRS8_ENABLE(val)
......@@ -188,11 +117,11 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
#define __ipipe_root_tick_p(regs) ((regs->ipend & 0x10) != 0)
#else /* !CONFIG_IPIPE */
static inline void ipipe_mute_pic(void) { }
#define task_hijacked(p) 0
#define ipipe_trap_notify(t, r) 0
#define __ipipe_root_tick_p(regs) 1
static inline void ipipe_unmute_pic(void) { }
static inline void ipipe_notify_root_preemption(void) { }
#endif /* !CONFIG_IPIPE */
......@@ -204,6 +133,4 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
#endif
#define ipipe_update_tick_evtdev(evtdev) do { } while (0)
#endif /* !__ASM_BLACKFIN_IPIPE_H */
......@@ -33,42 +33,51 @@
#define IPIPE_SYNCDEFER_FLAG 15
#define IPIPE_SYNCDEFER_MASK (1L << IPIPE_SYNCDEFER_MASK)
/* Blackfin traps -- i.e. exception vector numbers */
#define IPIPE_NR_FAULTS 52 /* We leave a gap after VEC_ILL_RES. */
/* Pseudo-vectors used for kernel events */
#define IPIPE_FIRST_EVENT IPIPE_NR_FAULTS
#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT)
#define IPIPE_EVENT_SCHEDULE (IPIPE_FIRST_EVENT + 1)
#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 2)
#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 3)
#define IPIPE_EVENT_INIT (IPIPE_FIRST_EVENT + 4)
#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 5)
#define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 6)
#define IPIPE_EVENT_RETURN (IPIPE_FIRST_EVENT + 7)
#define IPIPE_LAST_EVENT IPIPE_EVENT_RETURN
#define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1)
#define IPIPE_TIMER_IRQ IRQ_CORETMR
#define __IPIPE_FEATURE_SYSINFO_V2 1
/*
* Blackfin traps -- i.e. exception vector numbers, we leave a gap
* after VEC_ILL_RES.
*/
#define IPIPE_TRAP_MAYDAY 52 /* Internal recovery trap */
#define IPIPE_NR_FAULTS 53
#ifndef __ASSEMBLY__
extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
extern unsigned long __ipipe_root_status;
void __ipipe_stall_root(void);
void ipipe_stall_root(void);
unsigned long __ipipe_test_and_stall_root(void);
unsigned long ipipe_test_and_stall_root(void);
unsigned long __ipipe_test_root(void);
unsigned long ipipe_test_root(void);
void __ipipe_lock_root(void);
void __ipipe_unlock_root(void);
#endif /* !__ASSEMBLY__ */
int __ipipe_do_sync_check(void);
#define __ipipe_sync_check __ipipe_do_sync_check()
static inline unsigned long __ipipe_ffnz(unsigned long ul)
{
return ffs(ul) - 1;
}
#define __ipipe_run_irqtail(irq) /* Must be a macro */ \
do { \
unsigned long __pending; \
CSYNC(); \
__pending = bfin_read_IPEND(); \
if (__pending & 0x8000) { \
__pending &= ~0x8010; \
if (__pending && (__pending & (__pending - 1)) == 0) \
__ipipe_call_irqtail(__ipipe_irq_tail_hook); \
} \
} while (0)
#define __ipipe_syscall_watched_p(p, sc) \
(ipipe_notifier_enabled_p(p) || (unsigned long)sc >= NR_syscalls)
#define __IPIPE_FEATURE_SYSINFO_V2 1
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_IPIPE */
......
......@@ -95,41 +95,30 @@ static inline notrace void __hard_local_irq_restore(unsigned long flags)
* we redeclare the required bits we cannot pick from
* <asm/ipipe_base.h> to prevent circular dependencies.
*/
void __ipipe_stall_root(void);
void __ipipe_unstall_root(void);
unsigned long __ipipe_test_root(void);
unsigned long __ipipe_test_and_stall_root(void);
void __ipipe_restore_root(unsigned long flags);
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
struct ipipe_domain;
extern struct ipipe_domain ipipe_root;
void ipipe_check_context(struct ipipe_domain *ipd);
#define __check_irqop_context(ipd) ipipe_check_context(&ipipe_root)
#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */
#define __check_irqop_context(ipd) do { } while (0)
#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */
void ipipe_stall_root(void);
void ipipe_unstall_root(void);
unsigned long ipipe_test_root(void);
unsigned long ipipe_test_and_stall_root(void);
void ipipe_restore_root(unsigned long flags);
/*
* Interrupt pipe interface to linux/irqflags.h.
*/
static inline notrace void arch_local_irq_disable(void)
{
__check_irqop_context();
__ipipe_stall_root();
ipipe_stall_root();
barrier();
}
static inline notrace void arch_local_irq_enable(void)
{
barrier();
__check_irqop_context();
__ipipe_unstall_root();
ipipe_unstall_root();
}
static inline notrace unsigned long arch_local_save_flags(void)
{
return __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags;
return ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags;
}
static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
......@@ -141,8 +130,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
__check_irqop_context();
flags = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags;
flags = ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags;
barrier();
return flags;
......@@ -150,8 +138,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
static inline notrace void arch_local_irq_restore(unsigned long flags)
{
__check_irqop_context();
__ipipe_restore_root(flags == bfin_no_irqs);
ipipe_restore_root(flags == bfin_no_irqs);
}
static inline notrace unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
......@@ -215,8 +202,28 @@ static inline notrace void hard_local_irq_restore(unsigned long flags)
# define hard_local_irq_restore(flags) __hard_local_irq_restore(flags)
#endif /* !CONFIG_IPIPE_TRACE_IRQSOFF */
#define hard_local_irq_save_cond() hard_local_irq_save()
#define hard_local_irq_restore_cond(flags) hard_local_irq_restore(flags)
#define hard_cond_local_irq_save() hard_local_irq_save()
#define hard_cond_local_irq_restore(flags) hard_local_irq_restore(flags)
static inline notrace unsigned long hard_local_irq_save_notrace(void)
{
return __hard_local_irq_save();
}
static inline notrace void hard_local_irq_restore_notrace(unsigned long flags)
{
return __hard_local_irq_restore(flags);
}
static inline notrace void hard_local_irq_disable_notrace(void)
{
return __hard_local_irq_disable();
}
static inline notrace void hard_local_irq_enable_notrace(void)
{
return __hard_local_irq_enable();
}
#else /* !CONFIG_IPIPE */
......@@ -224,7 +231,7 @@ static inline notrace void hard_local_irq_restore(unsigned long flags)
* Direct interface to linux/irqflags.h.
*/
#define arch_local_save_flags() hard_local_save_flags()
#define arch_local_irq_save(flags) __hard_local_irq_save()
#define arch_local_irq_save() __hard_local_irq_save()
#define arch_local_irq_restore(flags) __hard_local_irq_restore(flags)
#define arch_local_irq_enable() __hard_local_irq_enable()
#define arch_local_irq_disable() __hard_local_irq_disable()
......@@ -238,48 +245,17 @@ static inline notrace void hard_local_irq_restore(unsigned long flags)
#define hard_local_irq_restore(flags) __hard_local_irq_restore(flags)
#define hard_local_irq_enable() __hard_local_irq_enable()
#define hard_local_irq_disable() __hard_local_irq_disable()
#define hard_local_irq_save_cond() hard_local_save_flags()
#define hard_local_irq_restore_cond(flags) do { (void)(flags); } while (0)
#define hard_cond_local_irq_save() hard_local_save_flags()
#define hard_cond_local_irq_restore(flags) do { (void)(flags); } while (0)
#endif /* !CONFIG_IPIPE */
#ifdef CONFIG_SMP
#define hard_local_irq_save_smp() hard_local_irq_save()
#define hard_local_irq_restore_smp(flags) hard_local_irq_restore(flags)
#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
#define hard_smp_local_irq_save() hard_local_irq_save()
#define hard_smp_local_irq_restore(flags) hard_local_irq_restore(flags)
#else
#define hard_local_irq_save_smp() hard_local_save_flags()
#define hard_local_irq_restore_smp(flags) do { (void)(flags); } while (0)
#define hard_smp_local_irq_save() hard_local_save_flags()
#define hard_smp_local_irq_restore(flags) do { (void)(flags); } while (0)
#endif
/*
* Remap the arch-neutral IRQ state manipulation macros to the
* blackfin-specific hard_local_irq_* API.
*/
#define local_irq_save_hw(flags) \
do { \
(flags) = hard_local_irq_save(); \
} while (0)
#define local_irq_restore_hw(flags) \
do { \
hard_local_irq_restore(flags); \
} while (0)
#define local_irq_disable_hw() \
do { \
hard_local_irq_disable(); \
} while (0)
#define local_irq_enable_hw() \
do { \
hard_local_irq_enable(); \
} while (0)
#define local_irq_save_hw_notrace(flags) \
do { \
(flags) = __hard_local_irq_save(); \
} while (0)
#define local_irq_restore_hw_notrace(flags) \
do { \
__hard_local_irq_restore(flags); \
} while (0)
#define irqs_disabled_hw() hard_irqs_disabled()
#endif
......@@ -97,8 +97,11 @@ static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next
}
#ifdef CONFIG_IPIPE
#define lock_mm_switch(flags) flags = hard_local_irq_save_cond()
#define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags)
#define lock_mm_switch(flags) \
do { \
flags = hard_cond_local_irq_save(); \
} while (0)
#define unlock_mm_switch(flags) hard_cond_local_irq_restore(flags)
#else
#define lock_mm_switch(flags) do { (void)(flags); } while (0)
#define unlock_mm_switch(flags) do { (void)(flags); } while (0)
......@@ -205,9 +208,9 @@ static inline void destroy_context(struct mm_struct *mm)
}
#define ipipe_mm_switch_protect(flags) \
flags = hard_local_irq_save_cond()
flags = hard_cond_local_irq_save()
#define ipipe_mm_switch_unprotect(flags) \
hard_local_irq_restore_cond(flags)
hard_cond_local_irq_restore(flags)
#endif
......@@ -28,6 +28,14 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_IPIPE
#include <ipipe/thread_info.h>
#else
struct ipipe_threadinfo {
};
static inline void __ipipe_init_threadinfo(struct ipipe_threadinfo *p) { }
#endif
typedef unsigned long mm_segment_t;
/*
......@@ -43,6 +51,7 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* address limit */
struct restart_block restart_block;
struct ipipe_threadinfo ipipe_data;
#ifndef CONFIG_SMP
struct l1_scratch_task_info l1_task_info;
#endif
......
......@@ -43,4 +43,12 @@ extern void bfin_coretmr_init(void);
extern void bfin_coretmr_clockevent_init(void);
#endif
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
#ifdef CONFIG_IPIPE
extern void bfin_ipipe_coretmr_register(void)
#else /* !CONFIG_IPIPE */
#define bfin_ipipe_coretmr_register() do { } while (0)
#endif /* !CONFIG_IPIPE */
#endif
#endif
......@@ -153,5 +153,10 @@ int main(void)
DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
#endif
#ifdef CONFIG_IPIPE
DEFINE(IPIPE_CURRENT_DOMAIN, offsetof(struct ipipe_percpu_data, curr));
DEFINE(IPIPE_DOMAIN_DESC, offsetof(struct ipipe_percpu_domain_data, domain));
#endif
return 0;
}
......@@ -395,6 +395,12 @@ static int portmux_group_check(unsigned short per)
* MODIFICATION HISTORY :
**************************************************************/
#ifdef CONFIG_IPIPE
#define IPIPE_GPIO_ACCESS 1
#else
#define IPIPE_GPIO_ACCESS 0
#endif
/* Set a specific bit */
#define SET_GPIO(name) \
......@@ -422,7 +428,7 @@ SET_GPIO(both) /* set_gpio_both() */
void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
if (ANOMALY_05000311 || ANOMALY_05000323) \
if (ANOMALY_05000311 || ANOMALY_05000323 || IPIPE_GPIO_ACCESS) \
flags = hard_local_irq_save(); \
if (arg) \
gpio_array[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
......@@ -430,6 +436,8 @@ void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
gpio_array[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \
if (ANOMALY_05000311 || ANOMALY_05000323) { \
AWA_DUMMY_READ(name); \
} \
if (ANOMALY_05000311 || ANOMALY_05000323 || IPIPE_GPIO_ACCESS) { \
hard_local_irq_restore(flags); \
} \
} \
......@@ -442,11 +450,13 @@ SET_GPIO_SC(data)
void set_gpio_toggle(unsigned gpio)
{
unsigned long flags;
if (ANOMALY_05000311 || ANOMALY_05000323)
if (ANOMALY_05000311 || ANOMALY_05000323 || IPIPE_GPIO_ACCESS)
flags = hard_local_irq_save();
gpio_array[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
if (ANOMALY_05000311 || ANOMALY_05000323) {
AWA_DUMMY_READ(toggle);
}
if (ANOMALY_05000311 || ANOMALY_05000323 || IPIPE_GPIO_ACCESS) {
hard_local_irq_restore(flags);
}
}
......@@ -459,11 +469,13 @@ EXPORT_SYMBOL(set_gpio_toggle);
void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
if (ANOMALY_05000311 || ANOMALY_05000323) \
if (ANOMALY_05000311 || ANOMALY_05000323 || IPIPE_GPIO_ACCESS) \
flags = hard_local_irq_save(); \
gpio_array[gpio_bank(gpio)]->name = arg; \
if (ANOMALY_05000311 || ANOMALY_05000323) { \
AWA_DUMMY_READ(name); \
} \
if (ANOMALY_05000311 || ANOMALY_05000323 || IPIPE_GPIO_ACCESS) { \
hard_local_irq_restore(flags); \
} \
} \
......
......@@ -33,27 +33,30 @@
#include <linux/io.h>
#include <asm/system.h>
#include <linux/atomic.h>
#include <linux/ipipe_tickdev.h>
#include <asm/irq_handler.h>
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
#include <asm/blackfin.h>
#include <asm/time.h>
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
static void __ipipe_do_IRQ(unsigned int irq, void *cookie);
static void __ipipe_no_irqtail(void);
unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
EXPORT_SYMBOL(__ipipe_irq_tail_hook);
unsigned long __ipipe_irq_tail_hook = (unsigned long)__ipipe_no_irqtail;
EXPORT_SYMBOL_GPL(__ipipe_irq_tail_hook);
unsigned long __ipipe_core_clock;
EXPORT_SYMBOL(__ipipe_core_clock);
EXPORT_SYMBOL_GPL(__ipipe_core_clock);
unsigned long __ipipe_freq_scale;
EXPORT_SYMBOL(__ipipe_freq_scale);
EXPORT_SYMBOL_GPL(__ipipe_freq_scale);
atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
EXPORT_SYMBOL(__ipipe_irq_lvmask);
EXPORT_SYMBOL_GPL(__ipipe_irq_lvmask);
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
{
......@@ -72,95 +75,24 @@ void __ipipe_enable_pipeline(void)
__ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;
for (irq = 0; irq < NR_IRQS; ++irq)
ipipe_virtualize_irq(ipipe_root_domain,
irq,
(ipipe_irq_handler_t)&asm_do_IRQ,
NULL,
&__ipipe_ack_irq,
IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
ipipe_request_irq(ipipe_root_domain, irq,
__ipipe_do_IRQ, NULL,
__ipipe_ack_irq);
}
/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
void __ipipe_handle_irq(unsigned int irq, struct pt_regs *regs) /* hw IRQs off */
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();