hugetlb.c 26.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
David Gibson's avatar
David Gibson committed
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

David Gibson's avatar
David Gibson committed
18
19
20
21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23
24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
27
28
29
30
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
31
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
32
33
34
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;

35
36
37
38
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
39

40
41
42
43
44
45
46
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
47
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
48
49
50
51
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
52
			   unsigned long addr, struct vm_area_struct *vma)
53
54
55
56
57
58
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
59
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
60
61
62
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
63
64
65
66
67
68
69
70
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

71
72
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
				unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
73
{
74
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
75
	struct page *page = NULL;
76
	struct mempolicy *mpol;
77
	struct zonelist *zonelist = huge_zonelist(vma, address,
78
					htlb_alloc_mask, &mpol);
79
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
80

81
	for (z = zonelist->zones; *z; z++) {
82
		nid = zone_to_nid(*z);
83
		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
84
85
86
87
88
89
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
90
91
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
Ken Chen's avatar
Ken Chen committed
92
			break;
93
		}
Linus Torvalds's avatar
Linus Torvalds committed
94
	}
95
	mpol_free(mpol);	/* unref if mpol !NULL */
Linus Torvalds's avatar
Linus Torvalds committed
96
97
98
	return page;
}

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

114
115
static void free_huge_page(struct page *page)
{
116
	int nid = page_to_nid(page);
117

118
	BUG_ON(page_count(page));
119
120
121
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
122
123
124
125
126
127
128
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
129
130
131
	spin_unlock(&hugetlb_lock);
}

132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

167
static int alloc_fresh_huge_page(void)
Linus Torvalds's avatar
Linus Torvalds committed
168
{
169
	static int prev_nid;
Linus Torvalds's avatar
Linus Torvalds committed
170
	struct page *page;
171
172
	int nid;

173
174
175
176
177
178
179
	/*
	 * Copy static prev_nid to local nid, work on that, then copy it
	 * back to prev_nid afterwards: otherwise there's a window in which
	 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
	 * But we don't need to use a spin_lock here: it really doesn't
	 * matter if occasionally a racer chooses the same nid as we do.
	 */
180
	nid = next_node(prev_nid, node_online_map);
181
182
	if (nid == MAX_NUMNODES)
		nid = first_node(node_online_map);
183
184
	prev_nid = nid;

185
	page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
186
					HUGETLB_PAGE_ORDER);
Linus Torvalds's avatar
Linus Torvalds committed
187
	if (page) {
188
		set_compound_page_dtor(page, free_huge_page);
189
		spin_lock(&hugetlb_lock);
Linus Torvalds's avatar
Linus Torvalds committed
190
191
		nr_huge_pages++;
		nr_huge_pages_node[page_to_nid(page)]++;
192
		spin_unlock(&hugetlb_lock);
193
194
		put_page(page); /* free it into the hugepage allocator */
		return 1;
Linus Torvalds's avatar
Linus Torvalds committed
195
	}
196
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
197
198
}

199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;

	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
	if (page) {
		set_compound_page_dtor(page, free_huge_page);
		spin_lock(&hugetlb_lock);
		nr_huge_pages++;
		nr_huge_pages_node[page_to_nid(page)]++;
		surplus_huge_pages++;
		surplus_huge_pages_node[page_to_nid(page)]++;
		spin_unlock(&hugetlb_lock);
	}

	return page;
}

219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
	if (needed <= 0)
		return 0;

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
	 * allocator.
	 */
	needed += allocated;
	ret = 0;
free:
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
		list_del(&page->lru);
		if ((--needed) >= 0)
			enqueue_huge_page(page);
		else
			update_and_free_page(page);
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
void return_unused_surplus_pages(unsigned long unused_resv_pages)
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

	nr_pages = min(unused_resv_pages, surplus_huge_pages);

	while (nr_pages) {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
		}
	}
}

321
322
static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
323
{
324
	struct page *page = NULL;
325
	int use_reserved_page = vma->vm_flags & VM_MAYSHARE;
Linus Torvalds's avatar
Linus Torvalds committed
326
327

	spin_lock(&hugetlb_lock);
328
	if (!use_reserved_page && (free_huge_pages <= resv_huge_pages))
329
		goto fail;
330
331
332
333
334

	page = dequeue_huge_page(vma, addr);
	if (!page)
		goto fail;

Linus Torvalds's avatar
Linus Torvalds committed
335
	spin_unlock(&hugetlb_lock);
336
	set_page_refcounted(page);
Linus Torvalds's avatar
Linus Torvalds committed
337
	return page;
338

339
fail:
340
	spin_unlock(&hugetlb_lock);
341
342
343
344
345
346

	/*
	 * Private mappings do not use reserved huge pages so the allocation
	 * may have failed due to an undersized hugetlb pool.  Try to grab a
	 * surplus huge page from the buddy allocator.
	 */
347
	if (!use_reserved_page)
348
349
350
		page = alloc_buddy_huge_page(vma, addr);

	return page;
351
352
}

Linus Torvalds's avatar
Linus Torvalds committed
353
354
355
356
static int __init hugetlb_init(void)
{
	unsigned long i;

357
358
359
	if (HPAGE_SHIFT == 0)
		return 0;

Linus Torvalds's avatar
Linus Torvalds committed
360
361
362
363
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

	for (i = 0; i < max_huge_pages; ++i) {
364
		if (!alloc_fresh_huge_page())
Linus Torvalds's avatar
Linus Torvalds committed
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

381
382
383
384
385
386
387
388
389
390
391
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

Linus Torvalds's avatar
Linus Torvalds committed
392
393
394
395
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
396
397
	int i;

Linus Torvalds's avatar
Linus Torvalds committed
398
399
400
401
402
403
404
405
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
406
			free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds's avatar
Linus Torvalds committed
407
408
409
410
411
412
413
414
415
416
417
			if (count >= nr_huge_pages)
				return;
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

418
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
Linus Torvalds's avatar
Linus Torvalds committed
419
420
static unsigned long set_max_huge_pages(unsigned long count)
{
421
	unsigned long min_count, ret;
Linus Torvalds's avatar
Linus Torvalds committed
422

423
424
425
426
427
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
428
	spin_lock(&hugetlb_lock);
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		int ret;
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}
	if (count >= persistent_huge_pages)
		goto out;

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
	 */
	min_count = max(count, resv_huge_pages);
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
461
		struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds's avatar
Linus Torvalds committed
462
463
464
465
		if (!page)
			break;
		update_and_free_page(page);
	}
466
467
468
469
470
471
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
472
	spin_unlock(&hugetlb_lock);
473
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
474
475
476
477
478
479
480
481
482
483
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
484
485
486
487
488
489
490
491
492
493
494
495
496

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
497
498
499
500
501
502
503
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
504
			"HugePages_Rsvd:  %5lu\n"
505
			"HugePages_Surp:  %5lu\n"
Linus Torvalds's avatar
Linus Torvalds committed
506
507
508
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
509
			resv_huge_pages,
510
			surplus_huge_pages,
Linus Torvalds's avatar
Linus Torvalds committed
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
		"Node %d HugePages_Free:  %5u\n",
		nid, nr_huge_pages_node[nid],
		nid, free_huge_pages_node[nid]);
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
Nick Piggin's avatar
Nick Piggin committed
535
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds's avatar
Linus Torvalds committed
536
537
{
	BUG();
Nick Piggin's avatar
Nick Piggin committed
538
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
539
540
541
}

struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggin's avatar
Nick Piggin committed
542
	.fault = hugetlb_vm_op_fault,
Linus Torvalds's avatar
Linus Torvalds committed
543
544
};

545
546
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
David Gibson's avatar
David Gibson committed
547
548
549
{
	pte_t entry;

550
	if (writable) {
David Gibson's avatar
David Gibson committed
551
552
553
554
555
556
557
558
559
560
561
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

562
563
564
565
566
567
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
568
569
570
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
571
572
573
}


David Gibson's avatar
David Gibson committed
574
575
576
577
578
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
579
	unsigned long addr;
580
581
582
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson's avatar
David Gibson committed
583

584
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
585
586
587
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
David Gibson's avatar
David Gibson committed
588
589
590
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
591
		spin_lock(&dst->page_table_lock);
592
		spin_lock(&src->page_table_lock);
593
		if (!pte_none(*src_pte)) {
594
595
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
596
597
598
599
600
601
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
602
		spin_unlock(&dst->page_table_lock);
David Gibson's avatar
David Gibson committed
603
604
605
606
607
608
609
	}
	return 0;

nomem:
	return -ENOMEM;
}

610
611
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
David Gibson's avatar
David Gibson committed
612
613
614
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
615
	pte_t *ptep;
David Gibson's avatar
David Gibson committed
616
617
	pte_t pte;
	struct page *page;
618
	struct page *tmp;
619
620
621
622
623
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
624
	LIST_HEAD(page_list);
David Gibson's avatar
David Gibson committed
625
626
627
628
629

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

630
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
631
	for (address = start; address < end; address += HPAGE_SIZE) {
632
		ptep = huge_pte_offset(mm, address);
633
		if (!ptep)
634
635
			continue;

636
637
638
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

639
		pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson's avatar
David Gibson committed
640
641
		if (pte_none(pte))
			continue;
642

David Gibson's avatar
David Gibson committed
643
		page = pte_page(pte);
644
645
		if (pte_dirty(pte))
			set_page_dirty(page);
646
		list_add(&page->lru, &page_list);
David Gibson's avatar
David Gibson committed
647
	}
Linus Torvalds's avatar
Linus Torvalds committed
648
	spin_unlock(&mm->page_table_lock);
649
	flush_tlb_range(vma, start, end);
650
651
652
653
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
Linus Torvalds's avatar
Linus Torvalds committed
654
}
David Gibson's avatar
David Gibson committed
655

656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

674
675
676
677
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
678
	int avoidcopy;
679
680
681
682
683
684
685
686

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
Nick Piggin's avatar
Nick Piggin committed
687
		return 0;
688
689
690
	}

	page_cache_get(old_page);
691
	new_page = alloc_huge_page(vma, address);
692
693
694

	if (!new_page) {
		page_cache_release(old_page);
695
		return VM_FAULT_OOM;
696
697
698
	}

	spin_unlock(&mm->page_table_lock);
699
	copy_huge_page(new_page, old_page, address, vma);
700
701
702
703
704
705
706
707
708
709
710
711
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
Nick Piggin's avatar
Nick Piggin committed
712
	return 0;
713
714
}

715
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
716
			unsigned long address, pte_t *ptep, int write_access)
717
718
{
	int ret = VM_FAULT_SIGBUS;
719
720
721
722
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
723
	pte_t new_pte;
724
725
726
727
728
729
730
731
732

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
733
734
735
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
736
737
738
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
739
740
741
742
743
		if (hugetlb_get_quota(mapping))
			goto out;
		page = alloc_huge_page(vma, address);
		if (!page) {
			hugetlb_put_quota(mapping);
744
			ret = VM_FAULT_OOM;
745
746
			goto out;
		}
747
		clear_huge_page(page, address);
748

749
750
751
752
753
754
755
756
757
758
759
760
761
762
		if (vma->vm_flags & VM_SHARED) {
			int err;

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				hugetlb_put_quota(mapping);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
		} else
			lock_page(page);
	}
763

764
	spin_lock(&mm->page_table_lock);
765
766
767
768
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

Nick Piggin's avatar
Nick Piggin committed
769
	ret = 0;
770
	if (!pte_none(*ptep))
771
772
		goto backout;

773
774
775
776
777
778
779
780
781
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

782
	spin_unlock(&mm->page_table_lock);
783
784
	unlock_page(page);
out:
785
	return ret;
786
787
788
789
790
791
792

backout:
	spin_unlock(&mm->page_table_lock);
	hugetlb_put_quota(mapping);
	unlock_page(page);
	put_page(page);
	goto out;
793
794
}

795
796
797
798
799
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
800
	int ret;
801
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
802
803
804
805
806

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

807
808
809
810
811
812
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
813
	entry = *ptep;
814
815
816
817
818
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
819

Nick Piggin's avatar
Nick Piggin committed
820
	ret = 0;
821
822
823
824
825
826
827

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);
	spin_unlock(&mm->page_table_lock);
828
	mutex_unlock(&hugetlb_instantiation_mutex);
829
830

	return ret;
831
832
}

David Gibson's avatar
David Gibson committed
833
834
835
836
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
			unsigned long *position, int *length, int i)
{
837
838
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
David Gibson's avatar
David Gibson committed
839
840
	int remainder = *length;

841
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
842
	while (vaddr < vma->vm_end && remainder) {
843
844
		pte_t *pte;
		struct page *page;
David Gibson's avatar
David Gibson committed
845

846
847
848
849
850
851
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
		 * each hugepage.  We have to make * sure we get the
		 * first, for the page indexing below to work.
		 */
		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
David Gibson's avatar
David Gibson committed
852

853
854
		if (!pte || pte_none(*pte)) {
			int ret;
David Gibson's avatar
David Gibson committed
855

856
857
858
			spin_unlock(&mm->page_table_lock);
			ret = hugetlb_fault(mm, vma, vaddr, 0);
			spin_lock(&mm->page_table_lock);
859
			if (!(ret & VM_FAULT_ERROR))
860
				continue;
David Gibson's avatar
David Gibson committed
861

862
863
864
865
866
867
			remainder = 0;
			if (!i)
				i = -EFAULT;
			break;
		}

868
869
870
		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
		page = pte_page(*pte);
same_page:
871
872
		if (pages) {
			get_page(page);
873
			pages[i] = page + pfn_offset;
874
		}
David Gibson's avatar
David Gibson committed
875
876
877
878
879

		if (vmas)
			vmas[i] = vma;

		vaddr += PAGE_SIZE;
880
		++pfn_offset;
David Gibson's avatar
David Gibson committed
881
882
		--remainder;
		++i;
883
884
885
886
887
888
889
890
		if (vaddr < vma->vm_end && remainder &&
				pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
			/*
			 * We use pfn_offset to avoid touching the pageframes
			 * of this compound page.
			 */
			goto same_page;
		}
David Gibson's avatar
David Gibson committed
891
	}
892
	spin_unlock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
893
894
895
896
897
	*length = remainder;
	*position = vaddr;

	return i;
}
898
899
900
901
902
903
904
905
906
907
908
909

void hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	pte_t *ptep;
	pte_t pte;

	BUG_ON(address >= end);
	flush_cache_range(vma, address, end);

910
	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
911
912
913
914
915
	spin_lock(&mm->page_table_lock);
	for (; address < end; address += HPAGE_SIZE) {
		ptep = huge_pte_offset(mm, address);
		if (!ptep)
			continue;
916
917
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;
918
919
920
921
922
923
924
		if (!pte_none(*ptep)) {
			pte = huge_ptep_get_and_clear(mm, address, ptep);
			pte = pte_mkhuge(pte_modify(pte, newprot));
			set_huge_pte_at(mm, address, ptep, pte);
		}
	}
	spin_unlock(&mm->page_table_lock);
925
	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
926
927
928
929

	flush_tlb_range(vma, start, end);
}

930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
struct file_region {
	struct list_head link;
	long from;
	long to;
};

static long region_add(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg, *trg;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;

	/* Check for and consume any regions we now overlap with. */
	nrg = rg;
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			break;

		/* If this area reaches higher then extend our area to
		 * include it completely.  If this is not the first area
		 * which we intend to reuse, free it. */
		if (rg->to > t)
			t = rg->to;
		if (rg != nrg) {
			list_del(&rg->link);
			kfree(rg);
		}
	}
	nrg->from = f;
	nrg->to = t;
	return 0;
}

static long region_chg(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg;
	long chg = 0;

	/* Locate the region we are before or in. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* If we are below the current region then a new region is required.
	 * Subtle, allocate a new region at the position but make it zero
	 * size such that we can guarentee to record the reservation. */
	if (&rg->link == head || t < rg->from) {
		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
		if (nrg == 0)
			return -ENOMEM;
		nrg->from = f;
		nrg->to   = f;
		INIT_LIST_HEAD(&nrg->link);
		list_add(&nrg->link, rg->link.prev);

		return t - f;
	}

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;
	chg = t - f;

	/* Check for and consume any regions we now overlap with. */
	list_for_each_entry(rg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			return chg;

		/* We overlap with this area, if it extends futher than
		 * us then we must extend ourselves.  Account for its
		 * existing reservation. */
		if (rg->to > t) {
			chg += rg->to - t;
			t = rg->to;
		}
		chg -= rg->to - rg->from;
	}
	return chg;
}

static long region_truncate(struct list_head *head, long end)
{
	struct file_region *rg, *trg;
	long chg = 0;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (end <= rg->to)
			break;
	if (&rg->link == head)
		return 0;

	/* If we are in the middle of a region then adjust it. */
	if (end > rg->from) {
		chg = rg->to - end;
		rg->to = end;
		rg = list_entry(rg->link.next, typeof(*rg), link);
	}

	/* Drop any remaining regions. */
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		chg += rg->to - rg->from;
		list_del(&rg->link);
		kfree(rg);
	}
	return chg;
}

static int hugetlb_acct_memory(long delta)
{
	int ret = -ENOMEM;

	spin_lock(&hugetlb_lock);
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
	/*
	 * When cpuset is configured, it breaks the strict hugetlb page
	 * reservation as the accounting is done on a global variable. Such
	 * reservation is completely rubbish in the presence of cpuset because
	 * the reservation is not checked against page availability for the
	 * current cpuset. Application can still potentially OOM'ed by kernel
	 * with lack of free htlb page in cpuset that the task is in.
	 * Attempt to enforce strict accounting with cpuset is almost
	 * impossible (or too ugly) because cpuset is too fluid that
	 * task or memory node can be dynamically moved between cpusets.
	 *
	 * The change of semantics for shared hugetlb mapping with cpuset is
	 * undesirable. However, in order to preserve some of the semantics,
	 * we fall back to check against current free page availability as
	 * a best attempt and hopefully to minimize the impact of changing
	 * semantics that cpuset has.
	 */
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
	if (delta > 0) {
		if (gather_surplus_pages(delta) < 0)
			goto out;

		if (delta > cpuset_mems_nr(free_huge_pages_node))
			goto out;
	}

	ret = 0;
	resv_huge_pages += delta;
	if (delta < 0)
		return_unused_surplus_pages((unsigned long) -delta);

out:
	spin_unlock(&hugetlb_lock);
	return ret;
}

int hugetlb_reserve_pages(struct inode *inode, long from, long to)
{
	long ret, chg;

	chg = region_chg(&inode->i_mapping->private_list, from, to);
	if (chg < 0)
		return chg;
1098

1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
	ret = hugetlb_acct_memory(chg);
	if (ret < 0)
		return ret;
	region_add(&inode->i_mapping->private_list, from, to);
	return 0;
}

void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
	long chg = region_truncate(&inode->i_mapping->private_list, offset);
	hugetlb_acct_memory(freed - chg);
}