ipipe.c 9.21 KB
Newer Older
Philippe Gerum's avatar
Philippe Gerum committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
/* -*- linux-c -*-
 * linux/arch/powerpc/kernel/ipipe.c
 *
 * Copyright (C) 2005 Heikki Lindholm (PPC64 port).
 * Copyright (C) 2004 Wolfgang Grandegger (Adeos/ppc port over 2.4).
 * Copyright (C) 2002-2012 Philippe Gerum.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
 * USA; either version 2 of the License, or (at your option) any later
 * version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Architecture-dependent I-PIPE core support for PowerPC 32/64bit.
 */

#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/ipipe_tickdev.h>
#include <asm/reg.h>
#include <asm/switch_to.h>
#include <asm/mmu_context.h>
#include <asm/unistd.h>
#include <asm/machdep.h>
#include <asm/atomic.h>
#include <asm/hardirq.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/runlatch.h>
#include <asm/debug.h>

static void __ipipe_do_IRQ(unsigned int irq, void *cookie);

static void __ipipe_do_timer(unsigned int irq, void *cookie);

#define DECREMENTER_MAX	0x7fffffff

#ifdef CONFIG_SMP

static DEFINE_PER_CPU(struct ipipe_ipi_struct, ipipe_ipi_message);

unsigned int __ipipe_ipi_irq = NR_IRQS + 1; /* dummy value */

#ifdef CONFIG_DEBUGGER
cpumask_t __ipipe_dbrk_pending;	/* pending debugger break IPIs */
#endif

void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
{
	unsigned int ipi = IPIPE_CRITICAL_IPI;

	ipd->irqs[ipi].ackfn = NULL;
	ipd->irqs[ipi].handler = __ipipe_do_critical_sync;
	ipd->irqs[ipi].cookie = NULL;
	ipd->irqs[ipi].control = IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK;
}

void __ipipe_register_ipi(unsigned int irq)
{
	__ipipe_ipi_irq = irq;
}

static void __ipipe_ipi_demux(int irq, struct pt_regs *regs)
{
	struct irq_desc *desc = irq_to_desc(irq);
	int ipi, cpu = ipipe_processor_id();

	desc->ipipe_ack(irq, desc);

	kstat_incr_irq_this_cpu(irq);

	while (per_cpu(ipipe_ipi_message, cpu).value & IPIPE_MSG_IPI_MASK) {
		for (ipi = IPIPE_MSG_CRITICAL_IPI; ipi <= IPIPE_MSG_RESCHEDULE_IPI; ++ipi) {
			if (test_and_clear_bit(ipi, &per_cpu(ipipe_ipi_message, cpu).value)) {
				mb();
				__ipipe_handle_irq(ipi + IPIPE_BASE_IPI_OFFSET, NULL);
			}
		}
	}

#ifdef CONFIG_DEBUGGER
	/*
	 * The debugger IPI handler should be NMI-safe, so let's call
	 * it immediately in case the IPI is pending.
	 */
	if (cpumask_test_cpu(cpu, &__ipipe_dbrk_pending)) {
		cpumask_clear_cpu(cpu, &__ipipe_dbrk_pending);
		debugger_ipi(regs);
	}
#endif /* CONFIG_DEBUGGER */

	ipipe_end_irq(irq);
}

void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask)
{
	if (WARN_ON_ONCE(irq_get_chip(irq)->irq_set_affinity == NULL))
		return;

Philippe Gerum's avatar
Philippe Gerum committed
116
	if (WARN_ON_ONCE(cpumask_any_and(&cpumask, cpu_online_mask) >= nr_cpu_ids))
Philippe Gerum's avatar
Philippe Gerum committed
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
		return;

	irq_get_chip(irq)->irq_set_affinity(irq_get_irq_data(irq), &cpumask, true);
}
EXPORT_SYMBOL_GPL(ipipe_set_irq_affinity);

void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask)
{
	unsigned long flags;
	int cpu, me;

	flags = hard_local_irq_save();

	ipi -= IPIPE_BASE_IPI_OFFSET;
	for_each_online_cpu(cpu) {
		if (cpumask_test_cpu(cpu, &cpumask))
			set_bit(ipi, &per_cpu(ipipe_ipi_message, cpu).value);
	}
	mb();

	if (unlikely(cpumask_empty(&cpumask)))
		goto out;

	me = ipipe_processor_id();
Philippe Gerum's avatar
Philippe Gerum committed
141
	for_each_cpu(cpu, &cpumask) {
Philippe Gerum's avatar
Philippe Gerum committed
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
		if (cpu != me)
			smp_ops->message_pass(cpu, PPC_MSG_IPIPE_DEMUX);
	}
out:
	hard_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ipipe_send_ipi);

void ipipe_stall_root(void)
{
	unsigned long flags;

	ipipe_root_only();
	flags = hard_local_irq_save();
	set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
	hard_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ipipe_stall_root);

unsigned long ipipe_test_and_stall_root(void)
{
	unsigned long flags;
	int x;

	ipipe_root_only();
	flags = hard_local_irq_save();
	x = test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
	hard_local_irq_restore(flags);

	return x;
}
EXPORT_SYMBOL(ipipe_test_and_stall_root);

unsigned long ipipe_test_root(void)
{
	unsigned long flags;
	int x;

	flags = hard_local_irq_save();
	x = test_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
	hard_local_irq_restore(flags);

	return x;
}
EXPORT_SYMBOL_GPL(ipipe_test_root);

#endif	/* CONFIG_SMP */

void __ipipe_early_core_setup(void)
{
	unsigned int virq;
	/*
	 * Allocate all the virtual IRQs we need. We expect fixed virq
	 * numbers starting at IPIPE_VIRQ_BASE, so we request them
	 * early.
	 */
	virq = ipipe_alloc_virq();
	BUG_ON(virq != IPIPE_TIMER_VIRQ);
#ifdef CONFIG_SMP
	virq = ipipe_alloc_virq();
	BUG_ON(virq != IPIPE_CRITICAL_IPI);
	virq = ipipe_alloc_virq();
	BUG_ON(virq != IPIPE_HRTIMER_IPI);
	virq = ipipe_alloc_virq();
	BUG_ON(virq != IPIPE_RESCHEDULE_IPI);
#endif
}

/*
 * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
 * interrupts are off, and secondary CPUs are still lost in space.
 */
void __ipipe_enable_pipeline(void)
{
	unsigned long flags;
	unsigned int irq;

	flags = ipipe_critical_enter(NULL);

	/* First, intercept all interrupts from the root
	 * domain. Regular Linux interrupt handlers will receive
	 * raw_cpu_ptr(&ipipe_percpu.tick_regs) for external IRQs,
	 * whatever cookie is passed here.
	 */
	for (irq = 0; irq < NR_IRQS; irq++)
		ipipe_request_irq(ipipe_root_domain,
				  irq,
				  __ipipe_do_IRQ, NULL,
				  NULL);
	/*
	 * We use a virtual IRQ to handle the timer irq (decrementer
	 * trap) which was allocated early in
	 * __ipipe_early_core_setup().
	 */
	ipipe_request_irq(ipipe_root_domain,
			  IPIPE_TIMER_VIRQ,
			  __ipipe_do_timer, NULL,
			  NULL);

	ipipe_critical_exit(flags);
}

int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
{
	info->sys_nr_cpus = num_online_cpus();
	info->sys_cpu_freq = __ipipe_cpu_freq;
	info->sys_hrtimer_irq = per_cpu(ipipe_percpu.hrtimer_irq, 0);
	info->sys_hrtimer_freq = __ipipe_hrtimer_freq;
	info->sys_hrclock_freq = __ipipe_hrclock_freq;

	return 0;
}
EXPORT_SYMBOL_GPL(ipipe_get_sysinfo);

static int __ipipe_exit_irq(struct pt_regs *regs)
{
	int root = __ipipe_root_p;

	if (root) {
#ifdef CONFIG_PPC_970_NAP
		struct thread_info *ti = current_thread_info();
		/* Emulate the napping check when 100% sure we do run
		 * over the root context. */
		if (test_and_clear_bit(TLF_NAPPING, &ti->local_flags))
			regs->nip = regs->link;
#endif
#ifdef CONFIG_PPC64
		ppc64_runlatch_on();
#endif
	}

	/*
	 * Testing for user_regs() eliminates foreign stack contexts,
	 * including from legacy domains (CONFIG_IPIPE_LEGACY) which
	 * did not set the foreign stack bit (foreign stacks are
	 * always kernel-based).
	 */
	if (user_mode(regs) && ipipe_test_thread_flag(TIP_MAYDAY))
		__ipipe_call_mayday(regs);

	if (root && !test_bit(IPIPE_STALL_FLAG, &__ipipe_root_status))
		return 1;

	return 0;
}

int __ipipe_grab_irq(struct pt_regs *regs)
{
	int irq;

	irq = ppc_md.get_irq();
	if (unlikely(irq == NO_IRQ)) {
		__this_cpu_add(irq_stat.spurious_irqs, 1);
		return __ipipe_exit_irq(regs);
	}

	if (likely(irq != NO_IRQ)) {
		ipipe_trace_irq_entry(irq);
#ifdef CONFIG_SMP
		/* Check for cascaded I-pipe IPIs */
		if (irq == __ipipe_ipi_irq)
			__ipipe_ipi_demux(irq, regs);
		else
#endif /* CONFIG_SMP */
			__ipipe_handle_irq(irq, regs);
	}

	ipipe_trace_irq_exit(irq);

	return __ipipe_exit_irq(regs);
}

static void __ipipe_do_IRQ(unsigned int irq, void *cookie)
{
	struct pt_regs *regs, *old_regs;

	/* Any sensible register frame will do for non-timer IRQs. */
	regs = raw_cpu_ptr(&ipipe_percpu.tick_regs);
	old_regs = set_irq_regs(regs);
	___do_irq(irq, regs);
	set_irq_regs(old_regs);
}

static void __ipipe_do_timer(unsigned int irq, void *cookie)
{
	check_stack_overflow();
	timer_interrupt(raw_cpu_ptr(&ipipe_percpu.tick_regs));
}

int __ipipe_grab_timer(struct pt_regs *regs)
{
	struct pt_regs *tick_regs;
	struct ipipe_domain *ipd;

	ipd = __ipipe_current_domain;

	set_dec(DECREMENTER_MAX);

	ipipe_trace_irq_entry(IPIPE_TIMER_VIRQ);

	tick_regs = raw_cpu_ptr(&ipipe_percpu.tick_regs);
	tick_regs->msr = regs->msr;
	tick_regs->nip = regs->nip;
	if (ipd != &ipipe_root)
		/* Tick should not be charged to Linux. */
		tick_regs->msr &= ~MSR_EE;

	__ipipe_handle_irq(IPIPE_TIMER_VIRQ, NULL);

	ipipe_trace_irq_exit(IPIPE_TIMER_VIRQ);

	return __ipipe_exit_irq(regs);
}

EXPORT_SYMBOL_GPL(show_stack);
EXPORT_SYMBOL_GPL(_switch);
#ifndef CONFIG_SMP
EXPORT_SYMBOL_GPL(last_task_used_math);
#endif
#ifdef CONFIG_IPIPE_LEGACY
#ifdef CONFIG_PPC64
EXPORT_PER_CPU_SYMBOL(ppc64_tlb_batch);
EXPORT_SYMBOL_GPL(switch_slb);
EXPORT_SYMBOL_GPL(__flush_tlb_pending);
#else  /* !CONFIG_PPC64 */
void atomic_set_mask(unsigned long mask, unsigned long *ptr);
void atomic_clear_mask(unsigned long mask, unsigned long *ptr);
EXPORT_SYMBOL_GPL(atomic_set_mask);
EXPORT_SYMBOL_GPL(atomic_clear_mask);
#endif	/* !CONFIG_PPC64 */
#endif /* !CONFIG_IPIPE_LEGACY */