ipipe.h 18.1 KB
Newer Older
1
2
3
4
/* -*- linux-c -*-
 * include/linux/ipipe.h
 *
 * Copyright (C) 2002-2014 Philippe Gerum.
5
 *               2007 Jan Kiszka.
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
 * USA; either version 2 of the License, or (at your option) any later
 * version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

#ifndef __LINUX_IPIPE_H
#define __LINUX_IPIPE_H

#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/irq.h>
#include <linux/thread_info.h>
#include <linux/ipipe_debug.h>
#include <asm/ptrace.h>
#ifdef CONFIG_HAVE_IPIPE_SUPPORT
#include <asm/ipipe.h>
#endif

37
38
struct cpuidle_device;
struct cpuidle_state;
39
40
41
42
43
struct kvm_vcpu;
struct ipipe_vm_notifier;
struct irq_desc;
struct task_struct;
struct mm_struct;
44

45
46
47
48
#ifdef CONFIG_IPIPE

#include <linux/ipipe_domain.h>

49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
#define IPIPE_CORE_APIREV  CONFIG_IPIPE_CORE_APIREV

#include <linux/ipipe_domain.h>
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/ipipe_base.h>

struct pt_regs;
struct ipipe_domain;

struct ipipe_vm_notifier {
	void (*handler)(struct ipipe_vm_notifier *nfy);
};

static inline int ipipe_virtual_irq_p(unsigned int irq)
{
	return irq >= IPIPE_VIRQ_BASE && irq < IPIPE_NR_IRQS;
}

void __ipipe_init_early(void);

void __ipipe_init(void);

#ifdef CONFIG_PROC_FS
void __ipipe_init_proc(void);
#ifdef CONFIG_IPIPE_TRACE
void __ipipe_init_tracer(void);
#else /* !CONFIG_IPIPE_TRACE */
static inline void __ipipe_init_tracer(void) { }
#endif /* CONFIG_IPIPE_TRACE */
#else	/* !CONFIG_PROC_FS */
static inline void __ipipe_init_proc(void) { }
#endif	/* CONFIG_PROC_FS */

void __ipipe_restore_root_nosync(unsigned long x);

#define IPIPE_IRQF_NOACK    0x1
#define IPIPE_IRQF_NOSYNC   0x2

void __ipipe_dispatch_irq(unsigned int irq, int flags);

void __ipipe_do_sync_stage(void);

void __ipipe_do_sync_pipeline(struct ipipe_domain *top);

void __ipipe_lock_irq(unsigned int irq);

void __ipipe_unlock_irq(unsigned int irq);

void __ipipe_do_critical_sync(unsigned int irq, void *cookie);

void __ipipe_ack_edge_irq(struct irq_desc *desc);

void __ipipe_nop_irq(struct irq_desc *desc);

static inline void __ipipe_idle(void)
{
	ipipe_unstall_root();
}

#ifndef __ipipe_sync_check
#define __ipipe_sync_check	1
#endif

static inline void __ipipe_sync_stage(void)
{
	if (likely(__ipipe_sync_check))
		__ipipe_do_sync_stage();
}

#ifndef __ipipe_run_irqtail
#define __ipipe_run_irqtail(irq) do { } while(0)
#endif

int __ipipe_log_printk(const char *fmt, va_list args);
void __ipipe_flush_printk(unsigned int irq, void *cookie);

#define __ipipe_get_cpu(flags)	({ (flags) = hard_preempt_disable(); ipipe_processor_id(); })
#define __ipipe_put_cpu(flags)	hard_preempt_enable(flags)

int __ipipe_notify_kevent(int event, void *data);

#define __ipipe_report_sigwake(p)					\
	do {								\
		if (ipipe_notifier_enabled_p(p))			\
			__ipipe_notify_kevent(IPIPE_KEVT_SIGWAKE, p);	\
	} while (0)

struct ipipe_cpu_migration_data {
	struct task_struct *task;
	int dest_cpu;
};

#define __ipipe_report_setaffinity(__p, __dest_cpu)			\
	do {								\
		struct ipipe_cpu_migration_data d = {			\
			.task = (__p),					\
			.dest_cpu = (__dest_cpu),			\
		};							\
		if (ipipe_notifier_enabled_p(__p))			\
			__ipipe_notify_kevent(IPIPE_KEVT_SETAFFINITY, &d); \
	} while (0)

#define __ipipe_report_exit(p)						\
	do {								\
		if (ipipe_notifier_enabled_p(p))			\
			__ipipe_notify_kevent(IPIPE_KEVT_EXIT, p);	\
	} while (0)

#define __ipipe_report_setsched(p)					\
	do {								\
		if (ipipe_notifier_enabled_p(p))			\
			__ipipe_notify_kevent(IPIPE_KEVT_SETSCHED, p); \
	} while (0)

#define __ipipe_report_schedule(prev, next)				\
do {									\
	if (ipipe_notifier_enabled_p(next) ||				\
	    ipipe_notifier_enabled_p(prev)) {				\
		__this_cpu_write(ipipe_percpu.rqlock_owner, prev);	\
		__ipipe_notify_kevent(IPIPE_KEVT_SCHEDULE, next);	\
	}								\
} while (0)

#define __ipipe_report_cleanup(mm)					\
	__ipipe_notify_kevent(IPIPE_KEVT_CLEANUP, mm)

#define __ipipe_report_clockfreq_update(freq)				\
	__ipipe_notify_kevent(IPIPE_KEVT_CLOCKFREQ, &(freq))

struct ipipe_ptrace_resume_data {
	struct task_struct *task;
	long request;
};

#define __ipipe_report_ptrace_resume(__p, __request)			\
	do {								\
		struct ipipe_ptrace_resume_data d = {			\
			.task = (__p),					\
			.request = (__request),				\
		};							\
		if (ipipe_notifier_enabled_p(__p))			\
			__ipipe_notify_kevent(IPIPE_KEVT_PTRESUME, &d); \
	} while (0)

int __ipipe_notify_syscall(struct pt_regs *regs);

int __ipipe_notify_trap(int exception, struct pt_regs *regs);

#define __ipipe_report_trap(exception, regs)				\
	__ipipe_notify_trap(exception, regs)

void __ipipe_call_mayday(struct pt_regs *regs);

int __ipipe_notify_user_intreturn(void);

#define __ipipe_serial_debug(__fmt, __args...)	raw_printk(__fmt, ##__args)

#ifndef ipipe_root_nr_syscalls
#define ipipe_root_nr_syscalls(ti)	NR_syscalls
#endif

struct ipipe_trap_data {
	int exception;
	struct pt_regs *regs;
};

216
217
218
219
220
/* ipipe_set_hooks(..., enables) */
#define IPIPE_SYSCALL	__IPIPE_SYSCALL_E
#define IPIPE_TRAP	__IPIPE_TRAP_E
#define IPIPE_KEVENT	__IPIPE_KEVENT_E

221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
struct ipipe_sysinfo {
	int sys_nr_cpus;	/* Number of CPUs on board */
	int sys_hrtimer_irq;	/* hrtimer device IRQ */
	u64 sys_hrtimer_freq;	/* hrtimer device frequency */
	u64 sys_hrclock_freq;	/* hrclock device frequency */
	u64 sys_cpu_freq;	/* CPU frequency (Hz) */
	struct ipipe_arch_sysinfo arch;
};

struct ipipe_work_header {
	size_t size;
	void (*handler)(struct ipipe_work_header *work);
};

extern unsigned int __ipipe_printk_virq;

void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq);

239
240
241
242
243
244
245
246
247
248
249
250
void __ipipe_complete_domain_migration(void);

int __ipipe_switch_tail(void);

int __ipipe_migrate_head(void);

void __ipipe_reenter_root(void);

void __ipipe_share_current(int flags);

void __ipipe_arch_share_current(int flags);

251
252
253
254
int __ipipe_disable_ondemand_mappings(struct task_struct *p);

int __ipipe_pin_vma(struct mm_struct *mm, struct vm_area_struct *vma);

255
256
257
258
259
260
261
/*
 * Obsolete - no arch implements PIC muting anymore. Null helpers are
 * kept for building legacy co-kernel releases.
 */
static inline void ipipe_mute_pic(void) { }
static inline void ipipe_unmute_pic(void) { }

262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH

#define prepare_arch_switch(next)			\
	do {						\
		hard_local_irq_enable();		\
		__ipipe_report_schedule(current, next);	\
	} while(0)

#ifndef ipipe_get_active_mm
static inline struct mm_struct *ipipe_get_active_mm(void)
{
	return __this_cpu_read(ipipe_percpu.active_mm);
}
#define ipipe_get_active_mm ipipe_get_active_mm
#endif

#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */

#define prepare_arch_switch(next)			\
	do {						\
		__ipipe_report_schedule(current, next);	\
		hard_local_irq_disable();		\
	} while(0)

#ifndef ipipe_get_active_mm
#define ipipe_get_active_mm()	(current->active_mm)
#endif

#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */

292
293
294
295
296
static inline bool __ipipe_hrclock_ok(void)
{
	return __ipipe_hrclock_freq != 0;
}

297
298
299
300
301
302
303
304
305
306
307
308
309
310
static inline void __ipipe_nmi_enter(void)
{
	__this_cpu_write(ipipe_percpu.nmi_state, __ipipe_root_status);
	__set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
	ipipe_save_context_nmi();
}

static inline void __ipipe_nmi_exit(void)
{
	ipipe_restore_context_nmi();
	if (!test_bit(IPIPE_STALL_FLAG, raw_cpu_ptr(&ipipe_percpu.nmi_state)))
		__clear_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
}

311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
/* KVM-side calls, hw IRQs off. */
static inline void __ipipe_enter_vm(struct ipipe_vm_notifier *vmf)
{
	struct ipipe_percpu_data *p;

	p = raw_cpu_ptr(&ipipe_percpu);
	p->vm_notifier = vmf;
	barrier();
}

static inline void __ipipe_exit_vm(void)
{
	struct ipipe_percpu_data *p;

	p = raw_cpu_ptr(&ipipe_percpu);
	p->vm_notifier = NULL;
	barrier();
}

/* Client-side call, hw IRQs off. */
void __ipipe_notify_vm_preemption(void);

333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
static inline void __ipipe_sync_pipeline(struct ipipe_domain *top)
{
	if (__ipipe_current_domain != top) {
		__ipipe_do_sync_pipeline(top);
		return;
	}
	if (!test_bit(IPIPE_STALL_FLAG, &ipipe_this_cpu_context(top)->status))
		__ipipe_sync_stage();
}

void ipipe_register_head(struct ipipe_domain *ipd,
			 const char *name);

void ipipe_unregister_head(struct ipipe_domain *ipd);

int ipipe_request_irq(struct ipipe_domain *ipd,
		      unsigned int irq,
		      ipipe_irq_handler_t handler,
		      void *cookie,
		      ipipe_irq_ackfn_t ackfn);

void ipipe_free_irq(struct ipipe_domain *ipd,
		    unsigned int irq);

void ipipe_raise_irq(unsigned int irq);

359
360
361
362
363
364
void ipipe_set_hooks(struct ipipe_domain *ipd,
		     int enables);

int ipipe_handle_syscall(struct thread_info *ti,
			 unsigned long nr, struct pt_regs *regs);

365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
unsigned int ipipe_alloc_virq(void);

void ipipe_free_virq(unsigned int virq);

static inline void ipipe_post_irq_head(unsigned int irq)
{
	__ipipe_set_irq_pending(ipipe_head_domain, irq);
}

static inline void ipipe_post_irq_root(unsigned int irq)
{
	__ipipe_set_irq_pending(&ipipe_root, irq);
}

static inline void ipipe_stall_head(void)
{
	hard_local_irq_disable();
	__set_bit(IPIPE_STALL_FLAG, &__ipipe_head_status);
}

static inline unsigned long ipipe_test_and_stall_head(void)
{
	hard_local_irq_disable();
	return __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_head_status);
}

static inline unsigned long ipipe_test_head(void)
{
	unsigned long flags, ret;

	flags = hard_smp_local_irq_save();
	ret = test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status);
	hard_smp_local_irq_restore(flags);

	return ret;
}

void ipipe_unstall_head(void);

void __ipipe_restore_head(unsigned long x);

static inline void ipipe_restore_head(unsigned long x)
{
	ipipe_check_irqoff();
	if ((x ^ test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status)) & 1)
		__ipipe_restore_head(x);
}

void __ipipe_post_work_root(struct ipipe_work_header *work);

#define ipipe_post_work_root(p, header)			\
	do {						\
		void header_not_at_start(void);		\
		if (offsetof(typeof(*(p)), header)) {	\
			header_not_at_start();		\
		}					\
		__ipipe_post_work_root(&(p)->header);	\
	} while (0)

int ipipe_get_sysinfo(struct ipipe_sysinfo *sysinfo);

unsigned long ipipe_critical_enter(void (*syncfn)(void));

void ipipe_critical_exit(unsigned long flags);

void ipipe_prepare_panic(void);

#ifdef CONFIG_SMP
#ifndef ipipe_smp_p
#define ipipe_smp_p (1)
#endif
void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask);
void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask);
#else  /* !CONFIG_SMP */
#define ipipe_smp_p (0)
static inline
void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask) { }
static inline void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask) { }
static inline void ipipe_disable_smp(void) { }
#endif	/* CONFIG_SMP */

static inline void ipipe_restore_root_nosync(unsigned long x)
{
	unsigned long flags;

	flags = hard_smp_local_irq_save();
	__ipipe_restore_root_nosync(x);
	hard_smp_local_irq_restore(flags);
}

/* Must be called hw IRQs off. */
static inline void ipipe_lock_irq(unsigned int irq)
{
	struct ipipe_domain *ipd = __ipipe_current_domain;
	if (ipd == ipipe_root_domain)
		__ipipe_lock_irq(irq);
}

/* Must be called hw IRQs off. */
static inline void ipipe_unlock_irq(unsigned int irq)
{
	struct ipipe_domain *ipd = __ipipe_current_domain;
	if (ipd == ipipe_root_domain)
		__ipipe_unlock_irq(irq);
}

471
472
473
474
475
476
477
static inline struct ipipe_threadinfo *ipipe_current_threadinfo(void)
{
	return &current_thread_info()->ipipe_data;
}

#define ipipe_task_threadinfo(p) (&task_thread_info(p)->ipipe_data)

478
int ipipe_enable_irq(unsigned int irq);
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516

static inline void ipipe_disable_irq(unsigned int irq)
{
	struct irq_desc *desc;
	struct irq_chip *chip;

	desc = irq_to_desc(irq);
	if (desc == NULL)
		return;

	chip = irq_desc_get_chip(desc);

	if (WARN_ON_ONCE(chip->irq_disable == NULL && chip->irq_mask == NULL))
		return;

	if (chip->irq_disable)
		chip->irq_disable(&desc->irq_data);
	else
		chip->irq_mask(&desc->irq_data);
}

static inline void ipipe_end_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (desc)
		desc->ipipe_end(desc);
}

static inline int ipipe_chained_irq_p(struct irq_desc *desc)
{
	void __ipipe_chained_irq(struct irq_desc *desc);

	return desc->handle_irq == __ipipe_chained_irq;
}

static inline void ipipe_handle_demuxed_irq(unsigned int cascade_irq)
{
Philippe Gerum's avatar
Philippe Gerum committed
517
	ipipe_trace_irq_entry(cascade_irq);
518
	__ipipe_dispatch_irq(cascade_irq, IPIPE_IRQF_NOSYNC);
Philippe Gerum's avatar
Philippe Gerum committed
519
	ipipe_trace_irq_exit(cascade_irq);
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
}

static inline void __ipipe_init_threadflags(struct thread_info *ti)
{
	ti->ipipe_flags = 0;
}

static inline
void ipipe_set_ti_thread_flag(struct thread_info *ti, int flag)
{
	set_bit(flag, &ti->ipipe_flags);
}

static inline
void ipipe_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
	clear_bit(flag, &ti->ipipe_flags);
}

static inline
void ipipe_test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
	test_and_clear_bit(flag, &ti->ipipe_flags);
}

static inline
int ipipe_test_ti_thread_flag(struct thread_info *ti, int flag)
{
	return test_bit(flag, &ti->ipipe_flags);
}

#define ipipe_set_thread_flag(flag) \
	ipipe_set_ti_thread_flag(current_thread_info(), flag)

#define ipipe_clear_thread_flag(flag) \
	ipipe_clear_ti_thread_flag(current_thread_info(), flag)

#define ipipe_test_and_clear_thread_flag(flag) \
	ipipe_test_and_clear_ti_thread_flag(current_thread_info(), flag)

#define ipipe_test_thread_flag(flag) \
	ipipe_test_ti_thread_flag(current_thread_info(), flag)

563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
#define ipipe_enable_notifier(p)					\
	ipipe_set_ti_thread_flag(task_thread_info(p), TIP_NOTIFY)

#define ipipe_disable_notifier(p)					\
	do {								\
		struct thread_info *ti = task_thread_info(p);		\
		ipipe_clear_ti_thread_flag(ti, TIP_NOTIFY);		\
		ipipe_clear_ti_thread_flag(ti, TIP_MAYDAY);		\
	} while (0)

#define ipipe_notifier_enabled_p(p)					\
	ipipe_test_ti_thread_flag(task_thread_info(p), TIP_NOTIFY)

#define ipipe_raise_mayday(p)						\
	do {								\
		struct thread_info *ti = task_thread_info(p);		\
		ipipe_check_irqoff();					\
		if (ipipe_test_ti_thread_flag(ti, TIP_NOTIFY))		\
			ipipe_set_ti_thread_flag(ti, TIP_MAYDAY);	\
	} while (0)

584
585
586
587
588
589
590
591
592
#define ipipe_enable_user_intret_notifier()				\
	ipipe_set_thread_flag(TIP_USERINTRET)

#define ipipe_disable_user_intret_notifier()				\
	ipipe_clear_thread_flag(TIP_USERINTRET)

#define ipipe_user_intret_notifier_enabled(ti)				\
	ipipe_test_ti_thread_flag(ti, TIP_USERINTRET)

Philippe Gerum's avatar
Philippe Gerum committed
593
594
595
596
597
598
#ifdef CONFIG_IPIPE_TRACE
void __ipipe_tracer_hrclock_initialized(void);
#else /* !CONFIG_IPIPE_TRACE */
#define __ipipe_tracer_hrclock_initialized()	do { } while(0)
#endif /* !CONFIG_IPIPE_TRACE */

599
600
601
602
603
604
605
606
607
608
609
610
611
612
#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
#define ipipe_mm_switch_protect(__flags)	do { (void)(__flags); } while (0)
#define ipipe_mm_switch_unprotect(__flags)	do { (void)(__flags); } while (0)
#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
#define ipipe_mm_switch_protect(__flags)		\
	do {						\
		(__flags) = hard_local_irq_save();	\
	} while (0)
#define ipipe_mm_switch_unprotect(__flags)		\
	do {						\
		hard_local_irq_restore(__flags);	\
	} while (0)
#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */

613
614
615
bool ipipe_enter_cpuidle(struct cpuidle_device *dev,
			 struct cpuidle_state *state);

616
617
#else	/* !CONFIG_IPIPE */

618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
static inline void __ipipe_init_early(void) { }

static inline void __ipipe_init(void) { }

static inline void __ipipe_init_proc(void) { }

static inline void __ipipe_idle(void) { }

static inline void __ipipe_report_sigwake(struct task_struct *p) { }

static inline void __ipipe_report_setaffinity(struct task_struct *p,
					      int dest_cpu) { }

static inline void __ipipe_report_setsched(struct task_struct *p) { }

static inline void __ipipe_report_exit(struct task_struct *p) { }

static inline void __ipipe_report_cleanup(struct mm_struct *mm) { }

static inline void __ipipe_report_ptrace_resume(struct task_struct *p,
						long request) { }

#define __ipipe_report_trap(exception, regs)  0

#define hard_preempt_disable()		({ preempt_disable(); 0; })
#define hard_preempt_enable(flags)	({ preempt_enable(); (void)(flags); })

#define __ipipe_get_cpu(flags)		({ (void)(flags); get_cpu(); })
#define __ipipe_put_cpu(flags)		\
	do {				\
		(void)(flags);		\
		put_cpu();		\
	} while (0)

#define __ipipe_root_tick_p(regs)	1

#define ipipe_handle_domain_irq(__domain, __hwirq, __regs)	\
	handle_domain_irq(__domain, __hwirq, __regs)

#define ipipe_handle_demuxed_irq(irq)		generic_handle_irq(irq)

#define __ipipe_enter_vm(vmf)	do { } while (0)

static inline void __ipipe_exit_vm(void) { }

static inline void __ipipe_notify_vm_preemption(void) { }

#define __ipipe_notify_user_intreturn()	0

#define __ipipe_serial_debug(__fmt, __args...)	do { } while (0)

669
670
671
#define __ipipe_root_p		1
#define ipipe_root_p		1

672
673
674
#define ipipe_mm_switch_protect(__flags)	do { (void)(__flags); } while (0)
#define ipipe_mm_switch_unprotect(__flags)	do { (void)(__flags); } while (0)

675
676
static inline void __ipipe_init_threadflags(struct thread_info *ti) { }

677
678
679
680
681
682
683
static inline void __ipipe_complete_domain_migration(void) { }

static inline int __ipipe_switch_tail(void)
{
	return 0;
}

684
685
686
687
688
689
690
691
692
693
static inline void __ipipe_nmi_enter(void) { }

static inline void __ipipe_nmi_exit(void) { }

#define ipipe_processor_id()	smp_processor_id()

static inline void ipipe_lock_irq(unsigned int irq) { }

static inline void ipipe_unlock_irq(unsigned int irq) { }

694
695
696
697
698
699
700
static inline
int ipipe_handle_syscall(struct thread_info *ti,
			 unsigned long nr, struct pt_regs *regs)
{
	return 0;
}

701
702
703
704
705
706
707
static inline
bool ipipe_enter_cpuidle(struct cpuidle_device *dev,
			 struct cpuidle_state *state)
{
	return true;
}

708
709
#define ipipe_user_intret_notifier_enabled(ti)	0

710
711
#endif	/* !CONFIG_IPIPE */

712
713
714
715
716
717
718
719
720
#ifdef CONFIG_IPIPE_WANT_PTE_PINNING
void __ipipe_pin_mapping_globally(unsigned long start,
				  unsigned long end);
#else
static inline void __ipipe_pin_mapping_globally(unsigned long start,
						unsigned long end)
{ }
#endif

721
#endif	/* !__LINUX_IPIPE_H */