pgtable.h 34.1 KB
Newer Older
1
2
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
3

4
5
#include <asm-generic/5level-fixup.h>

6
7
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
8
#include <linux/bug.h>
9
#endif
10

11
12
13
14
15
/*
 * Common bits between hash and Radix page table
 */
#define _PAGE_BIT_SWAP_TYPE	0

16
#define _PAGE_RO		0
17
#define _PAGE_SHARED		0
18

19
20
21
22
23
24
25
26
27
28
29
#define _PAGE_EXEC		0x00001 /* execute permission */
#define _PAGE_WRITE		0x00002 /* write access allowed */
#define _PAGE_READ		0x00004	/* read access allowed */
#define _PAGE_RW		(_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX		(_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED	0x00008 /* kernel access only */
#define _PAGE_SAO		0x00010 /* Strong access order */
#define _PAGE_NON_IDEMPOTENT	0x00020 /* non idempotent memory */
#define _PAGE_TOLERANT		0x00030 /* tolerant memory, cache inhibited */
#define _PAGE_DIRTY		0x00080 /* C: page changed */
#define _PAGE_ACCESSED		0x00100 /* R: page referenced */
30
/*
31
 * Software bits
32
 */
33
34
35
36
#define _RPAGE_SW0		0x2000000000000000UL
#define _RPAGE_SW1		0x00800
#define _RPAGE_SW2		0x00400
#define _RPAGE_SW3		0x00200
37
38
39
40
#define _RPAGE_RSV1		0x1000000000000000UL
#define _RPAGE_RSV2		0x0800000000000000UL
#define _RPAGE_RSV3		0x0400000000000000UL
#define _RPAGE_RSV4		0x0200000000000000UL
41
42
43
44
45
46
47
48
49

#define _PAGE_PTE		0x4000000000000000UL	/* distinguishes PTEs from pointers */
#define _PAGE_PRESENT		0x8000000000000000UL	/* pte contains a translation */

/*
 * Top and bottom bits of RPN which can be used by hash
 * translation mode, because we expect them to be zero
 * otherwise.
 */
50
51
#define _RPAGE_RPN0		0x01000
#define _RPAGE_RPN1		0x02000
52
53
54
55
#define _RPAGE_RPN44		0x0100000000000000UL
#define _RPAGE_RPN43		0x0080000000000000UL
#define _RPAGE_RPN42		0x0040000000000000UL
#define _RPAGE_RPN41		0x0020000000000000UL
56

57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
/* Max physical address bit as per radix table */
#define _RPAGE_PA_MAX		57

/*
 * Max physical address bit we will use for now.
 *
 * This is mostly a hardware limitation and for now Power9 has
 * a 51 bit limit.
 *
 * This is different from the number of physical bit required to address
 * the last byte of memory. That is defined by MAX_PHYSMEM_BITS.
 * MAX_PHYSMEM_BITS is a linux limitation imposed by the maximum
 * number of sections we can support (SECTIONS_SHIFT).
 *
 * This is different from Radix page table limitation above and
 * should always be less than that. The limit is done such that
 * we can overload the bits between _RPAGE_PA_MAX and _PAGE_PA_MAX
 * for hash linux page table specific bits.
 *
 * In order to be compatible with future hardware generations we keep
 * some offsets and limit this for now to 53
 */
#define _PAGE_PA_MAX		53

81
82
#define _PAGE_SOFT_DIRTY	_RPAGE_SW3 /* software: software dirty tracking */
#define _PAGE_SPECIAL		_RPAGE_SW2 /* software: special page */
83
84
85
#define _PAGE_DEVMAP		_RPAGE_SW1 /* software: ZONE_DEVICE page */
#define __HAVE_ARCH_PTE_DEVMAP

86
87
88
89
90
91
92
/*
 * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
 * Instead of fixing all of them, add an alternate define which
 * maps CI pte mapping.
 */
#define _PAGE_NO_CACHE		_PAGE_TOLERANT
/*
93
94
95
 * We support _RPAGE_PA_MAX bit real address in pte. On the linux side
 * we are limited by _PAGE_PA_MAX. Clear everything above _PAGE_PA_MAX
 * and every thing below PAGE_SHIFT;
96
 */
97
#define PTE_RPN_MASK	(((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
/*
 * set of bits not changed in pmd_modify. Even though we have hash specific bits
 * in here, on radix we expect them to be zero.
 */
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
			 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
			 _PAGE_SOFT_DIRTY)
/*
 * user access blocked by key
 */
#define _PAGE_KERNEL_RW		(_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO		 (_PAGE_PRIVILEGED | _PAGE_READ)
#define _PAGE_KERNEL_RWX	(_PAGE_PRIVILEGED | _PAGE_DIRTY |	\
				 _PAGE_RW | _PAGE_EXEC)
/*
 * No page size encoding in the linux PTE
 */
#define _PAGE_PSIZE		0
/*
 * _PAGE_CHG_MASK masks of bits that are to be preserved across
 * pgprot changes
 */
#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
			 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE |	\
			 _PAGE_SOFT_DIRTY)
/*
 * Mask of bits returned by pte_pgprot()
 */
#define PAGE_PROT_BITS  (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
			 H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
			 _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_EXEC | \
			 _PAGE_SOFT_DIRTY)
130
/*
131
132
133
134
 * We define 2 sets of base prot bits, one for basic pages (ie,
 * cacheable kernel and user pages) and one for non cacheable
 * pages. We always set _PAGE_COHERENT when SMP is enabled or
 * the processor might need it for DMA coherency.
135
 */
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
#define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
#define _PAGE_BASE	(_PAGE_BASE_NC)

/* Permission masks used to generate the __P and __S table,
 *
 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
 *
 * Write permissions imply read permissions for now (we could make write-only
 * pages on BookE but we don't bother for now). Execute permission control is
 * possible on platforms that define _PAGE_EXEC
 *
 * Note due to the way vm flags are laid out, the bits are XWR
 */
#define PAGE_NONE	__pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_RW)
#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)

#define __P000	PAGE_NONE
#define __P001	PAGE_READONLY
#define __P010	PAGE_COPY
#define __P011	PAGE_COPY
#define __P100	PAGE_READONLY_X
#define __P101	PAGE_READONLY_X
#define __P110	PAGE_COPY_X
#define __P111	PAGE_COPY_X

#define __S000	PAGE_NONE
#define __S001	PAGE_READONLY
#define __S010	PAGE_SHARED
#define __S011	PAGE_SHARED
#define __S100	PAGE_READONLY_X
#define __S101	PAGE_READONLY_X
#define __S110	PAGE_SHARED_X
#define __S111	PAGE_SHARED_X

/* Permission masks used for kernel mappings */
#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
				 _PAGE_TOLERANT)
#define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
				 _PAGE_NON_IDEMPOTENT)
#define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)

/*
 * Protection used for kernel text. We want the debuggers to be able to
 * set breakpoints anywhere, so don't write protect the kernel text
 * on platforms where such control is possible.
 */
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
#else
#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
#endif

/* Make modules code happy. We don't set RO yet */
#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
#define PAGE_AGP		(PAGE_KERNEL_NC)
200

201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#ifndef __ASSEMBLY__
/*
 * page table defines
 */
extern unsigned long __pte_index_size;
extern unsigned long __pmd_index_size;
extern unsigned long __pud_index_size;
extern unsigned long __pgd_index_size;
extern unsigned long __pmd_cache_index;
#define PTE_INDEX_SIZE  __pte_index_size
#define PMD_INDEX_SIZE  __pmd_index_size
#define PUD_INDEX_SIZE  __pud_index_size
#define PGD_INDEX_SIZE  __pgd_index_size
#define PMD_CACHE_INDEX __pmd_cache_index
/*
 * Because of use of pte fragments and THP, size of page table
 * are not always derived out of index size above.
 */
extern unsigned long __pte_table_size;
extern unsigned long __pmd_table_size;
extern unsigned long __pud_table_size;
extern unsigned long __pgd_table_size;
#define PTE_TABLE_SIZE	__pte_table_size
#define PMD_TABLE_SIZE	__pmd_table_size
#define PUD_TABLE_SIZE	__pud_table_size
#define PGD_TABLE_SIZE	__pgd_table_size
227
228
229
230
231
232
233

extern unsigned long __pmd_val_bits;
extern unsigned long __pud_val_bits;
extern unsigned long __pgd_val_bits;
#define PMD_VAL_BITS	__pmd_val_bits
#define PUD_VAL_BITS	__pud_val_bits
#define PGD_VAL_BITS	__pgd_val_bits
234
235
236
237
238
239

extern unsigned long __pte_frag_nr;
#define PTE_FRAG_NR __pte_frag_nr
extern unsigned long __pte_frag_size_shift;
#define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266

#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
#define PTRS_PER_PUD	(1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)

/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE	(1UL << PMD_SHIFT)
#define PMD_MASK	(~(PMD_SIZE-1))

/* PUD_SHIFT determines what a third-level page table entry can map */
#define PUD_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
#define PUD_SIZE	(1UL << PUD_SHIFT)
#define PUD_MASK	(~(PUD_SIZE-1))

/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
#define PGDIR_SHIFT	(PUD_SHIFT + PUD_INDEX_SIZE)
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))

/* Bits to mask out from a PMD to get to the PTE page */
#define PMD_MASKED_BITS		0xc0000000000000ffUL
/* Bits to mask out from a PUD to get to the PMD page */
#define PUD_MASKED_BITS		0xc0000000000000ffUL
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS		0xc0000000000000ffUL
267
268
269
270
271
272
273
274
275
276
277
278

extern unsigned long __vmalloc_start;
extern unsigned long __vmalloc_end;
#define VMALLOC_START	__vmalloc_start
#define VMALLOC_END	__vmalloc_end

extern unsigned long __kernel_virt_start;
extern unsigned long __kernel_virt_size;
#define KERN_VIRT_START __kernel_virt_start
#define KERN_VIRT_SIZE  __kernel_virt_size
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
279
extern unsigned long pci_io_base;
280
#endif /* __ASSEMBLY__ */
281

282
#include <asm/book3s/64/hash.h>
283
#include <asm/book3s/64/radix.h>
284

285
286
287
288
289
290
#ifdef CONFIG_PPC_64K_PAGES
#include <asm/book3s/64/pgtable-64k.h>
#else
#include <asm/book3s/64/pgtable-4k.h>
#endif

291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
#include <asm/barrier.h>
/*
 * The second half of the kernel virtual space is used for IO mappings,
 * it's itself carved into the PIO region (ISA and PHB IO space) and
 * the ioremap space
 *
 *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
 *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
 */
#define KERN_IO_START	(KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
#define FULL_IO_SIZE	0x80000000ul
#define  ISA_IO_BASE	(KERN_IO_START)
#define  ISA_IO_END	(KERN_IO_START + 0x10000ul)
#define  PHB_IO_BASE	(ISA_IO_END)
#define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE	(PHB_IO_END)
#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE)

310
311
312
313
314
315
/* Advertise special mapping type for AGP */
#define HAVE_PAGE_AGP

/* Advertise support for _PAGE_SPECIAL */
#define __HAVE_ARCH_PTE_SPECIAL

316
317
318
319
320
321
322
323
324
325
326
#ifndef __ASSEMBLY__

/*
 * This is the default implementation of various PTE accessors, it's
 * used in all cases except Book3S with 64K pages where we have a
 * concept of sub-pages
 */
#ifndef __real_pte

#define __real_pte(e,p)		((real_pte_t){(e)})
#define __rpte_to_pte(r)	((r).pte)
327
#define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343

#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
	do {							         \
		index = 0;					         \
		shift = mmu_psize_defs[psize].shift;		         \

#define pte_iterate_hashed_end() } while(0)

/*
 * We expect this to be called only for user addresses or kernel virtual
 * addresses other than the linear mapping.
 */
#define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K

#endif /* __real_pte */

344
345
346
347
348
349
350
351
static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
				       pte_t *ptep, unsigned long clr,
				       unsigned long set, int huge)
{
	if (radix_enabled())
		return radix__pte_update(mm, addr, ptep, clr, set, huge);
	return hash__pte_update(mm, addr, ptep, clr, set, huge);
}
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
/*
 * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update.
 * We currently remove entries from the hashtable regardless of whether
 * the entry was young or dirty.
 *
 * We should be more intelligent about this but for the moment we override
 * these functions and force a tlb flush unconditionally
 * For radix: H_PAGE_HASHPTE should be zero. Hence we can use the same
 * function for both hash and radix.
 */
static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
					      unsigned long addr, pte_t *ptep)
{
	unsigned long old;

367
	if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
368
369
370
371
372
373
374
375
376
377
378
379
380
		return 0;
	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
	return (old & _PAGE_ACCESSED) != 0;
}

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(__vma, __addr, __ptep)	\
({								\
	int __r;						\
	__r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
	__r;							\
})

381
static inline int __pte_write(pte_t pte)
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
{
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
}

#ifdef CONFIG_NUMA_BALANCING
#define pte_savedwrite pte_savedwrite
static inline bool pte_savedwrite(pte_t pte)
{
	/*
	 * Saved write ptes are prot none ptes that doesn't have
	 * privileged bit sit. We mark prot none as one which has
	 * present and pviliged bit set and RWX cleared. To mark
	 * protnone which used to have _PAGE_WRITE set we clear
	 * the privileged bit.
	 */
	return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
}
#else
#define pte_savedwrite pte_savedwrite
static inline bool pte_savedwrite(pte_t pte)
{
	return false;
}
#endif

407
408
409
410
411
static inline int pte_write(pte_t pte)
{
	return __pte_write(pte) || pte_savedwrite(pte);
}

412
413
414
415
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
				      pte_t *ptep)
{
416
	if (__pte_write(*ptep))
417
418
419
		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
	else if (unlikely(pte_savedwrite(*ptep)))
		pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
420
421
422
423
424
}

static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
					   unsigned long addr, pte_t *ptep)
{
425
426
427
428
	/*
	 * We should not find protnone for hugetlb, but this complete the
	 * interface.
	 */
429
	if (__pte_write(*ptep))
430
431
432
		pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
	else if (unlikely(pte_savedwrite(*ptep)))
		pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
433
434
435
436
437
438
439
440
441
442
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long addr, pte_t *ptep)
{
	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
	return __pte(old);
}

443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
					    unsigned long addr,
					    pte_t *ptep, int full)
{
	if (full && radix_enabled()) {
		/*
		 * Let's skip the DD1 style pte update here. We know that
		 * this is a full mm pte clear and hence can be sure there is
		 * no parallel set_pte.
		 */
		return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
	}
	return ptep_get_and_clear(mm, addr, ptep);
}


460
461
462
463
464
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t * ptep)
{
	pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480

static inline int pte_dirty(pte_t pte)
{
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
}

static inline int pte_young(pte_t pte)
{
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
}

static inline int pte_special(pte_t pte)
{
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
}

481
482
483
484
485
static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }

#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline bool pte_soft_dirty(pte_t pte)
{
486
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
487
}
488

489
490
491
492
493
494
495
496
497
498
499
500
501
502
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
}

static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
	return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

#ifdef CONFIG_NUMA_BALANCING
static inline int pte_protnone(pte_t pte)
{
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
	return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
		cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
}

#define pte_mk_savedwrite pte_mk_savedwrite
static inline pte_t pte_mk_savedwrite(pte_t pte)
{
	/*
	 * Used by Autonuma subsystem to preserve the write bit
	 * while marking the pte PROT_NONE. Only allow this
	 * on PROT_NONE pte
	 */
	VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
		  cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
	return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
}

#define pte_clear_savedwrite pte_clear_savedwrite
static inline pte_t pte_clear_savedwrite(pte_t pte)
{
	/*
	 * Used by KSM subsystem to make a protnone pte readonly.
	 */
	VM_BUG_ON(!pte_protnone(pte));
	return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
}
529
530
531
532
533
534
535
#else
#define pte_clear_savedwrite pte_clear_savedwrite
static inline pte_t pte_clear_savedwrite(pte_t pte)
{
	VM_WARN_ON(1);
	return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
536
537
538
539
#endif /* CONFIG_NUMA_BALANCING */

static inline int pte_present(pte_t pte)
{
540
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
}
/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 *
 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
 * long for now.
 */
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
	return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) |
		     pgprot_val(pgprot));
}

static inline unsigned long pte_pfn(pte_t pte)
{
	return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
}

/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
563
564
	if (unlikely(pte_savedwrite(pte)))
		return pte_clear_savedwrite(pte);
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
	return __pte(pte_val(pte) & ~_PAGE_WRITE);
}

static inline pte_t pte_mkclean(pte_t pte)
{
	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
}

static inline pte_t pte_mkold(pte_t pte)
{
	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
}

static inline pte_t pte_mkwrite(pte_t pte)
{
	/*
	 * write implies read, hence set both
	 */
	return __pte(pte_val(pte) | _PAGE_RW);
}

static inline pte_t pte_mkdirty(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}

static inline pte_t pte_mkyoung(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_ACCESSED);
}

static inline pte_t pte_mkspecial(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_SPECIAL);
}

static inline pte_t pte_mkhuge(pte_t pte)
{
	return pte;
}

606
607
608
609
610
611
612
613
614
615
static inline pte_t pte_mkdevmap(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
}

static inline int pte_devmap(pte_t pte)
{
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP));
}

616
617
618
619
620
621
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	/* FIXME!! check whether this need to be a conditional */
	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}

622
623
static inline bool pte_user(pte_t pte)
{
624
	return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
}

/* Encode and de-code a swap entry */
#define MAX_SWAPFILES_CHECK() do { \
	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
	/*							\
	 * Don't have overlapping bits with _PAGE_HPTEFLAGS	\
	 * We filter HPTEFLAGS on set_pte.			\
	 */							\
	BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
	BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);	\
	} while (0)
/*
 * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
 */
#define SWP_TYPE_BITS 5
#define __swp_type(x)		(((x).val >> _PAGE_BIT_SWAP_TYPE) \
				& ((1UL << SWP_TYPE_BITS) - 1))
#define __swp_offset(x)		(((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
#define __swp_entry(type, offset)	((swp_entry_t) { \
				((type) << _PAGE_BIT_SWAP_TYPE) \
				| (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
/*
 * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
 * swap type and offset we get from swap and convert that to pte to find a
 * matching pte in linux page table.
 * Clear bits not found in swap entries here.
 */
#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
#define __swp_entry_to_pte(x)	__pte((x).val | _PAGE_PTE)

#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SWP_SOFT_DIRTY   (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
#else
#define _PAGE_SWP_SOFT_DIRTY	0UL
#endif /* CONFIG_MEM_SOFT_DIRTY */

#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
}
667

668
669
static inline bool pte_swp_soft_dirty(pte_t pte)
{
670
	return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
671
}
672

673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
	return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

static inline bool check_pte_access(unsigned long access, unsigned long ptev)
{
	/*
	 * This check for _PAGE_RWX and _PAGE_PRESENT bits
	 */
	if (access & ~ptev)
		return false;
	/*
	 * This check for access to privilege space
	 */
	if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
		return false;

	return true;
}
694
695
696
697
/*
 * Generic functions with hash/radix callbacks
 */

698
static inline void __ptep_set_access_flags(struct mm_struct *mm,
699
700
					   pte_t *ptep, pte_t entry,
					   unsigned long address)
701
702
{
	if (radix_enabled())
703
		return radix__ptep_set_access_flags(mm, ptep, entry, address);
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
	return hash__ptep_set_access_flags(ptep, entry);
}

#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
	if (radix_enabled())
		return radix__pte_same(pte_a, pte_b);
	return hash__pte_same(pte_a, pte_b);
}

static inline int pte_none(pte_t pte)
{
	if (radix_enabled())
		return radix__pte_none(pte);
	return hash__pte_none(pte);
}

static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
				pte_t *ptep, pte_t pte, int percpu)
{
	if (radix_enabled())
		return radix__set_pte_at(mm, addr, ptep, pte, percpu);
	return hash__set_pte_at(mm, addr, ptep, pte, percpu);
}
729

730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
#define _PAGE_CACHE_CTL	(_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)

#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
			_PAGE_NON_IDEMPOTENT);
}

#define pgprot_noncached_wc pgprot_noncached_wc
static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
{
	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
			_PAGE_TOLERANT);
}

#define pgprot_cached pgprot_cached
static inline pgprot_t pgprot_cached(pgprot_t prot)
{
	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
}

#define pgprot_writecombine pgprot_writecombine
static inline pgprot_t pgprot_writecombine(pgprot_t prot)
{
	return pgprot_noncached_wc(prot);
}
/*
 * check a pte mapping have cache inhibited property
 */
static inline bool pte_ci(pte_t pte)
{
	unsigned long pte_v = pte_val(pte);

	if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
	    ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
		return true;
	return false;
}

770
771
772
773
774
775
776
777
778
779
static inline void pmd_set(pmd_t *pmdp, unsigned long val)
{
	*pmdp = __pmd(val);
}

static inline void pmd_clear(pmd_t *pmdp)
{
	*pmdp = __pmd(0);
}

780
781
782
783
784
785
786
787
788
789
static inline int pmd_none(pmd_t pmd)
{
	return !pmd_raw(pmd);
}

static inline int pmd_present(pmd_t pmd)
{

	return !pmd_none(pmd);
}
790

791
792
793
794
795
796
797
static inline int pmd_bad(pmd_t pmd)
{
	if (radix_enabled())
		return radix__pmd_bad(pmd);
	return hash__pmd_bad(pmd);
}

798
799
800
801
802
803
804
805
806
807
static inline void pud_set(pud_t *pudp, unsigned long val)
{
	*pudp = __pud(val);
}

static inline void pud_clear(pud_t *pudp)
{
	*pudp = __pud(0);
}

808
809
810
811
812
813
814
815
816
static inline int pud_none(pud_t pud)
{
	return !pud_raw(pud);
}

static inline int pud_present(pud_t pud)
{
	return !pud_none(pud);
}
817
818

extern struct page *pud_page(pud_t pud);
819
extern struct page *pmd_page(pmd_t pmd);
820
821
static inline pte_t pud_pte(pud_t pud)
{
822
	return __pte_raw(pud_raw(pud));
823
824
825
826
}

static inline pud_t pte_pud(pte_t pte)
{
827
	return __pud_raw(pte_raw(pte));
828
829
}
#define pud_write(pud)		pte_write(pud_pte(pud))
830
831
832
833
834
835
836
837
838

static inline int pud_bad(pud_t pud)
{
	if (radix_enabled())
		return radix__pud_bad(pud);
	return hash__pud_bad(pud);
}


839
#define pgd_write(pgd)		pte_write(pgd_pte(pgd))
840
841
842
843
static inline void pgd_set(pgd_t *pgdp, unsigned long val)
{
	*pgdp = __pgd(val);
}
844

845
846
847
848
849
static inline void pgd_clear(pgd_t *pgdp)
{
	*pgdp = __pgd(0);
}

850
851
852
853
854
855
856
857
858
static inline int pgd_none(pgd_t pgd)
{
	return !pgd_raw(pgd);
}

static inline int pgd_present(pgd_t pgd)
{
	return !pgd_none(pgd);
}
859
860
861

static inline pte_t pgd_pte(pgd_t pgd)
{
862
	return __pte_raw(pgd_raw(pgd));
863
864
865
866
}

static inline pgd_t pte_pgd(pte_t pte)
{
867
	return __pgd_raw(pte_raw(pte));
868
869
}

870
871
872
873
874
875
876
static inline int pgd_bad(pgd_t pgd)
{
	if (radix_enabled())
		return radix__pgd_bad(pgd);
	return hash__pgd_bad(pgd);
}

877
878
extern struct page *pgd_page(pgd_t pgd);

879
880
881
882
883
884
885
886
887
888
889
890
/* Pointers in the page table tree are physical addresses */
#define __pgtable_ptr_val(ptr)	__pa(ptr)

#define pmd_page_vaddr(pmd)	__va(pmd_val(pmd) & ~PMD_MASKED_BITS)
#define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
#define pgd_page_vaddr(pgd)	__va(pgd_val(pgd) & ~PGD_MASKED_BITS)

#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))

891
892
893
894
895
896
897
/*
 * Find an entry in a page-table-directory.  We combine the address region
 * (the high order N bits) and the pgd portion of the address.
 */

#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))

898
899
#define pud_offset(pgdp, addr)	\
	(((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
900
#define pmd_offset(pudp,addr) \
901
	(((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
902
#define pte_offset_kernel(dir,addr) \
903
	(((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
904
905
906
907
908
909
910
911
912
913
914
915

#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
#define pte_unmap(pte)			do { } while(0)

/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

#define pte_ERROR(e) \
	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
916
917
#define pud_ERROR(e) \
	pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
918
919
920
#define pgd_ERROR(e) \
	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))

921
922
static inline int map_kernel_page(unsigned long ea, unsigned long pa,
				  unsigned long flags)
923
{
924
925
926
927
928
929
930
	if (radix_enabled()) {
#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
		unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
		WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
#endif
		return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
	}
931
	return hash__map_kernel_page(ea, pa, flags);
932
}
933
934
935
936

static inline int __meminit vmemmap_create_mapping(unsigned long start,
						   unsigned long page_size,
						   unsigned long phys)
937
{
938
939
	if (radix_enabled())
		return radix__vmemmap_create_mapping(start, page_size, phys);
940
	return hash__vmemmap_create_mapping(start, page_size, phys);
941
}
942
943
944
945

#ifdef CONFIG_MEMORY_HOTPLUG
static inline void vmemmap_remove_mapping(unsigned long start,
					  unsigned long page_size)
946
{
947
948
	if (radix_enabled())
		return radix__vmemmap_remove_mapping(start, page_size);
949
	return hash__vmemmap_remove_mapping(start, page_size);
950
}
951
#endif
952
953
954
955
struct page *realmode_pfn_to_page(unsigned long pfn);

static inline pte_t pmd_pte(pmd_t pmd)
{
956
	return __pte_raw(pmd_raw(pmd));
957
958
959
960
}

static inline pmd_t pte_pmd(pte_t pte)
{
961
	return __pmd_raw(pte_raw(pte));
962
963
964
965
966
967
968
969
970
971
972
973
}

static inline pte_t *pmdp_ptep(pmd_t *pmd)
{
	return (pte_t *)pmd;
}
#define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
#define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd)		pte_young(pmd_pte(pmd))
#define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
974
#define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
975
976
#define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
977
978
#define pmd_mk_savedwrite(pmd)	pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
#define pmd_clear_savedwrite(pmd)	pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
979
980
981
982
983
984
985

#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
#define pmd_mksoft_dirty(pmd)  pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

986
987
988
989
990
991
#ifdef CONFIG_NUMA_BALANCING
static inline int pmd_protnone(pmd_t pmd)
{
	return pte_protnone(pmd_pte(pmd));
}
#endif /* CONFIG_NUMA_BALANCING */
992
993
994

#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd)		pte_write(pmd_pte(pmd))
995
#define __pmd_write(pmd)	__pte_write(pmd_pte(pmd))
996
#define pmd_savedwrite(pmd)	pte_savedwrite(pmd_pte(pmd))
997

998
999
1000
1001
1002
1003
1004
1005
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		       pmd_t *pmdp, pmd_t pmd);
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
				 pmd_t *pmd);
1006
1007
1008
extern int hash__has_transparent_hugepage(void);
static inline int has_transparent_hugepage(void)
{
1009
1010
	if (radix_enabled())
		return radix__has_transparent_hugepage();
1011
1012
	return hash__has_transparent_hugepage();
}
1013
#define has_transparent_hugepage has_transparent_hugepage
1014

1015
1016
1017
static inline unsigned long
pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
		    unsigned long clr, unsigned long set)
1018
{
1019
1020
	if (radix_enabled())
		return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set);
1021
1022
1023
1024
1025
	return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
}

static inline int pmd_large(pmd_t pmd)
{
1026
	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
}

static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
	return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
}
/*
 * For radix we should always find H_PAGE_HASHPTE zero. Hence
 * the below will work for radix too
 */
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
					      unsigned long addr, pmd_t *pmdp)
{
	unsigned long old;

1042
	if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
1043
1044
1045
1046
1047
1048
1049
1050
1051
		return 0;
	old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
	return ((old & _PAGE_ACCESSED) != 0);
}

#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
				      pmd_t *pmdp)
{
1052
	if (__pmd_write((*pmdp)))
1053
1054
1055
		pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
	else if (unlikely(pmd_savedwrite(*pmdp)))
		pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
1056
1057
}

1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
static inline int pmd_trans_huge(pmd_t pmd)
{
	if (radix_enabled())
		return radix__pmd_trans_huge(pmd);
	return hash__pmd_trans_huge(pmd);
}

#define __HAVE_ARCH_PMD_SAME
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
	if (radix_enabled())
		return radix__pmd_same(pmd_a, pmd_b);
	return hash__pmd_same(pmd_a, pmd_b);
}

1073
1074
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
1075
1076
1077
	if (radix_enabled())
		return radix__pmd_mkhuge(pmd);
	return hash__pmd_mkhuge(pmd);
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
}

#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pmd_t *pmdp,
				 pmd_t entry, int dirty);

#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long address, pmd_t *pmdp);

#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1090
1091
1092
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
					    unsigned long addr, pmd_t *pmdp)
{
1093
1094
	if (radix_enabled())
		return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
1095
1096
	return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
}
1097

1098
1099
1100
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
					unsigned long address, pmd_t *pmdp)
{
1101
1102
	if (radix_enabled())
		return radix__pmdp_collapse_flush(vma, address, pmdp);
1103
1104
	return hash__pmdp_collapse_flush(vma, address, pmdp);
}
1105
1106
1107
#define pmdp_collapse_flush pmdp_collapse_flush

#define __HAVE_ARCH_PGTABLE_DEPOSIT
1108
1109
1110
static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
					      pmd_t *pmdp, pgtable_t pgtable)
{
1111
1112
	if (radix_enabled())
		return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
1113
1114
1115
	return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
}

1116
#define __HAVE_ARCH_PGTABLE_WITHDRAW
1117
1118
1119
static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
						    pmd_t *pmdp)
{
1120
1121
	if (radix_enabled())
		return radix__pgtable_trans_huge_withdraw(mm, pmdp);
1122
1123
	return hash__pgtable_trans_huge_withdraw(mm, pmdp);
}
1124
1125
1126
1127
1128

#define __HAVE_ARCH_PMDP_INVALIDATE
extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
			    pmd_t *pmdp);

1129
#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
1130
1131
1132
static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
					   unsigned long address, pmd_t *pmdp)
{
1133
1134
	if (radix_enabled())
		return radix__pmdp_huge_split_prepare(vma, address, pmdp);
1135
1136
	return hash__pmdp_huge_split_prepare(vma, address, pmdp);
}
1137

1138
1139
1140
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
1141
1142
					 struct spinlock *old_pmd_ptl,
					 struct vm_area_struct *vma)
1143
{
1144
1145
	if (radix_enabled())
		return false;
1146
1147
1148
1149
1150
1151
1152
	/*
	 * Archs like ppc64 use pgtable to store per pmd
	 * specific information. So when we switch the pmd,
	 * we should also withdraw and deposit the pgtable
	 */
	return true;
}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162


#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
static inline bool arch_needs_pgtable_deposit(void)
{
	if (radix_enabled())
		return false;
	return true;
}

1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182

static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
	return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
}

static inline int pmd_devmap(pmd_t pmd)
{
	return pte_devmap(pmd_pte(pmd));
}

static inline int pud_devmap(pud_t pud)
{
	return 0;
}

static inline int pgd_devmap(pgd_t pgd)
{
	return 0;
}
1183
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194

static inline const int pud_pfn(pud_t pud)
{
	/*
	 * Currently all calls to pud_pfn() are gated around a pud_devmap()
	 * check so this should never be used. If it grows another user we
	 * want to know about it.
	 */
	BUILD_BUG();
	return 0;
}
1195

1196
1197
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */