hugetlb.c 32.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
David Gibson's avatar
David Gibson committed
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

David Gibson's avatar
David Gibson committed
18
19
20
21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23
24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
27
static unsigned long nr_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
28
unsigned long max_huge_pages;
29
unsigned long sysctl_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
33
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34
35
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
36
static int hugetlb_next_nid;
37

38
39
40
41
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
42

43
44
45
46
47
48
49
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
50
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
51
52
53
54
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
55
			   unsigned long addr, struct vm_area_struct *vma)
56
57
58
59
60
61
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
62
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
63
64
65
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
66
67
68
69
70
71
72
73
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
static struct page *dequeue_huge_page(void)
{
	int nid;
	struct page *page = NULL;

	for (nid = 0; nid < MAX_NUMNODES; ++nid) {
		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			break;
		}
	}
	return page;
}

static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
93
				unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
94
{
95
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
96
	struct page *page = NULL;
97
	struct mempolicy *mpol;
98
	struct zonelist *zonelist = huge_zonelist(vma, address,
99
					htlb_alloc_mask, &mpol);
100
	struct zone *zone, **z;
Linus Torvalds's avatar
Linus Torvalds committed
101

102
103
104
	for_each_zone_zonelist(zone, z, zonelist, MAX_NR_ZONES - 1) {
		nid = zone_to_nid(zone);
		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
105
106
107
108
109
110
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
111
112
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
Ken Chen's avatar
Ken Chen committed
113
			break;
114
		}
Linus Torvalds's avatar
Linus Torvalds committed
115
	}
116
	mpol_free(mpol);	/* unref if mpol !NULL */
Linus Torvalds's avatar
Linus Torvalds committed
117
118
119
	return page;
}

120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

135
136
static void free_huge_page(struct page *page)
{
137
	int nid = page_to_nid(page);
138
	struct address_space *mapping;
139

140
	mapping = (struct address_space *) page_private(page);
141
	set_page_private(page, 0);
142
	BUG_ON(page_count(page));
143
144
145
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
146
147
148
149
150
151
152
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
153
	spin_unlock(&hugetlb_lock);
154
	if (mapping)
155
		hugetlb_put_quota(mapping, 1);
156
157
}

158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

193
static struct page *alloc_fresh_huge_page_node(int nid)
Linus Torvalds's avatar
Linus Torvalds committed
194
195
{
	struct page *page;
196

197
198
199
	page = alloc_pages_node(nid,
		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
		HUGETLB_PAGE_ORDER);
Linus Torvalds's avatar
Linus Torvalds committed
200
	if (page) {
201
		set_compound_page_dtor(page, free_huge_page);
202
		spin_lock(&hugetlb_lock);
Linus Torvalds's avatar
Linus Torvalds committed
203
		nr_huge_pages++;
204
		nr_huge_pages_node[nid]++;
205
		spin_unlock(&hugetlb_lock);
206
		put_page(page); /* free it into the hugepage allocator */
Linus Torvalds's avatar
Linus Torvalds committed
207
	}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

	return page;
}

static int alloc_fresh_huge_page(void)
{
	struct page *page;
	int start_nid;
	int next_nid;
	int ret = 0;

	start_nid = hugetlb_next_nid;

	do {
		page = alloc_fresh_huge_page_node(hugetlb_next_nid);
		if (page)
			ret = 1;
		/*
		 * Use a helper variable to find the next node and then
		 * copy it back to hugetlb_next_nid afterwards:
		 * otherwise there's a window in which a racer might
		 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
		 * But we don't need to use a spin_lock here: it really
		 * doesn't matter if occasionally a racer chooses the
		 * same nid as we do.  Move nid forward in the mask even
		 * if we just successfully allocated a hugepage so that
		 * the next caller gets hugepages on the next node.
		 */
		next_nid = next_node(hugetlb_next_nid, node_online_map);
		if (next_nid == MAX_NUMNODES)
			next_nid = first_node(node_online_map);
		hugetlb_next_nid = next_nid;
	} while (!page && hugetlb_next_nid != start_nid);

	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
243
244
}

245
246
247
248
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;
249
	unsigned int nid;
250

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
	/*
	 * Assume we will successfully allocate the surplus page to
	 * prevent racing processes from causing the surplus to exceed
	 * overcommit
	 *
	 * This however introduces a different race, where a process B
	 * tries to grow the static hugepage pool while alloc_pages() is
	 * called by process A. B will only examine the per-node
	 * counters in determining if surplus huge pages can be
	 * converted to normal huge pages in adjust_pool_surplus(). A
	 * won't be able to increment the per-node counter, until the
	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
	 * no more huge pages can be converted from surplus to normal
	 * state (and doesn't try to convert again). Thus, we have a
	 * case where a surplus huge page exists, the pool is grown, and
	 * the surplus huge page still exists after, even though it
	 * should just have been converted to a normal huge page. This
	 * does not leak memory, though, as the hugepage will be freed
	 * once it is out of use. It also does not allow the counters to
	 * go out of whack in adjust_pool_surplus() as we don't modify
	 * the node values until we've gotten the hugepage and only the
	 * per-node value is checked there.
	 */
	spin_lock(&hugetlb_lock);
	if (surplus_huge_pages >= nr_overcommit_huge_pages) {
		spin_unlock(&hugetlb_lock);
		return NULL;
	} else {
		nr_huge_pages++;
		surplus_huge_pages++;
	}
	spin_unlock(&hugetlb_lock);

284
285
	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
286
287

	spin_lock(&hugetlb_lock);
288
	if (page) {
289
290
291
292
293
294
		/*
		 * This page is now managed by the hugetlb allocator and has
		 * no users -- drop the buddy allocator's reference.
		 */
		put_page_testzero(page);
		VM_BUG_ON(page_count(page));
295
		nid = page_to_nid(page);
296
		set_compound_page_dtor(page, free_huge_page);
297
298
299
300
301
302
303
304
		/*
		 * We incremented the global counters already
		 */
		nr_huge_pages_node[nid]++;
		surplus_huge_pages_node[nid]++;
	} else {
		nr_huge_pages--;
		surplus_huge_pages--;
305
	}
306
	spin_unlock(&hugetlb_lock);
307
308
309
310

	return page;
}

311
312
313
314
315
316
317
318
319
320
321
322
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
323
324
	if (needed <= 0) {
		resv_huge_pages += delta;
325
		return 0;
326
	}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
364
365
366
	 * allocator.  Commit the entire reservation here to prevent another
	 * process from stealing the pages as they are added to the pool but
	 * before they are reserved.
367
368
	 */
	needed += allocated;
369
	resv_huge_pages += delta;
370
371
372
373
374
375
	ret = 0;
free:
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
		list_del(&page->lru);
		if ((--needed) >= 0)
			enqueue_huge_page(page);
376
377
		else {
			/*
378
379
380
			 * The page has a reference count of zero already, so
			 * call free_huge_page directly instead of using
			 * put_page.  This must be done with hugetlb_lock
381
382
383
384
			 * unlocked which is safe because free_huge_page takes
			 * hugetlb_lock before deciding how to free the page.
			 */
			spin_unlock(&hugetlb_lock);
385
			free_huge_page(page);
386
387
			spin_lock(&hugetlb_lock);
		}
388
389
390
391
392
393
394
395
396
397
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
398
static void return_unused_surplus_pages(unsigned long unused_resv_pages)
399
400
401
402
403
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

404
405
406
407
408
409
410
411
	/*
	 * We want to release as many surplus pages as possible, spread
	 * evenly across all nodes. Iterate across all nodes until we
	 * can no longer free unreserved surplus pages. This occurs when
	 * the nodes with surplus pages have no free pages.
	 */
	unsigned long remaining_iterations = num_online_nodes();

412
413
414
	/* Uncommit the reservation */
	resv_huge_pages -= unused_resv_pages;

415
416
	nr_pages = min(unused_resv_pages, surplus_huge_pages);

417
	while (remaining_iterations-- && nr_pages) {
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
435
			remaining_iterations = num_online_nodes();
436
437
438
439
		}
	}
}

440
441
442

static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
						unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
443
{
444
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
445
446

	spin_lock(&hugetlb_lock);
447
	page = dequeue_huge_page_vma(vma, addr);
Linus Torvalds's avatar
Linus Torvalds committed
448
	spin_unlock(&hugetlb_lock);
449
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
450
}
451

452
453
454
455
static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct page *page = NULL;
456

457
458
459
	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
		return ERR_PTR(-VM_FAULT_SIGBUS);

460
461
	spin_lock(&hugetlb_lock);
	if (free_huge_pages > resv_huge_pages)
462
		page = dequeue_huge_page_vma(vma, addr);
463
	spin_unlock(&hugetlb_lock);
Ken Chen's avatar
Ken Chen committed
464
	if (!page) {
465
		page = alloc_buddy_huge_page(vma, addr);
Ken Chen's avatar
Ken Chen committed
466
467
468
469
470
471
		if (!page) {
			hugetlb_put_quota(vma->vm_file->f_mapping, 1);
			return ERR_PTR(-VM_FAULT_OOM);
		}
	}
	return page;
472
473
474
475
476
477
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
{
	struct page *page;
478
479
	struct address_space *mapping = vma->vm_file->f_mapping;

480
481
482
483
	if (vma->vm_flags & VM_MAYSHARE)
		page = alloc_huge_page_shared(vma, addr);
	else
		page = alloc_huge_page_private(vma, addr);
484
485

	if (!IS_ERR(page)) {
486
		set_page_refcounted(page);
487
		set_page_private(page, (unsigned long) mapping);
488
489
	}
	return page;
490
491
}

Linus Torvalds's avatar
Linus Torvalds committed
492
493
494
495
static int __init hugetlb_init(void)
{
	unsigned long i;

496
497
498
	if (HPAGE_SHIFT == 0)
		return 0;

Linus Torvalds's avatar
Linus Torvalds committed
499
500
501
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

502
503
	hugetlb_next_nid = first_node(node_online_map);

Linus Torvalds's avatar
Linus Torvalds committed
504
	for (i = 0; i < max_huge_pages; ++i) {
505
		if (!alloc_fresh_huge_page())
Linus Torvalds's avatar
Linus Torvalds committed
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

522
523
524
525
526
527
528
529
530
531
532
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

Linus Torvalds's avatar
Linus Torvalds committed
533
534
535
536
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
537
538
	int i;

Linus Torvalds's avatar
Linus Torvalds committed
539
540
541
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
542
543
			if (count >= nr_huge_pages)
				return;
Linus Torvalds's avatar
Linus Torvalds committed
544
545
546
547
548
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
549
			free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds's avatar
Linus Torvalds committed
550
551
552
553
554
555
556
557
558
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

559
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
Linus Torvalds's avatar
Linus Torvalds committed
560
561
static unsigned long set_max_huge_pages(unsigned long count)
{
562
	unsigned long min_count, ret;
Linus Torvalds's avatar
Linus Torvalds committed
563

564
565
566
567
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
568
569
570
571
572
573
	 *
	 * We might race with alloc_buddy_huge_page() here and be unable
	 * to convert a surplus huge page to a normal huge page. That is
	 * not critical, though, it just means the overall size of the
	 * pool might be one hugepage larger than it needs to be, but
	 * within all the constraints specified by the sysctls.
574
	 */
Linus Torvalds's avatar
Linus Torvalds committed
575
	spin_lock(&hugetlb_lock);
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		int ret;
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
602
603
604
605
606
607
608
609
	 *
	 * By placing pages into the surplus state independent of the
	 * overcommit value, we are allowing the surplus pool size to
	 * exceed overcommit. There are few sane options here. Since
	 * alloc_buddy_huge_page() is checking the global counter,
	 * though, we'll note that we're not allowed to exceed surplus
	 * and won't grow the pool anywhere else. Not until one of the
	 * sysctls are changed, or the surplus pages go out of use.
610
	 */
611
612
	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
	min_count = max(count, min_count);
613
614
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
615
		struct page *page = dequeue_huge_page();
Linus Torvalds's avatar
Linus Torvalds committed
616
617
618
619
		if (!page)
			break;
		update_and_free_page(page);
	}
620
621
622
623
624
625
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
626
	spin_unlock(&hugetlb_lock);
627
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
628
629
630
631
632
633
634
635
636
637
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
638
639
640
641
642
643
644
645
646
647
648
649
650

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

651
652
653
654
655
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
656
657
	spin_lock(&hugetlb_lock);
	nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
658
659
660
661
	spin_unlock(&hugetlb_lock);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
662
663
664
665
666
667
668
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
669
			"HugePages_Rsvd:  %5lu\n"
670
			"HugePages_Surp:  %5lu\n"
Linus Torvalds's avatar
Linus Torvalds committed
671
672
673
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
674
			resv_huge_pages,
675
			surplus_huge_pages,
Linus Torvalds's avatar
Linus Torvalds committed
676
677
678
679
680
681
682
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
683
684
		"Node %d HugePages_Free:  %5u\n"
		"Node %d HugePages_Surp:  %5u\n",
Linus Torvalds's avatar
Linus Torvalds committed
685
		nid, nr_huge_pages_node[nid],
686
687
		nid, free_huge_pages_node[nid],
		nid, surplus_huge_pages_node[nid]);
Linus Torvalds's avatar
Linus Torvalds committed
688
689
690
691
692
693
694
695
696
697
698
699
700
701
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
Nick Piggin's avatar
Nick Piggin committed
702
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds's avatar
Linus Torvalds committed
703
704
{
	BUG();
Nick Piggin's avatar
Nick Piggin committed
705
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
706
707
708
}

struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggin's avatar
Nick Piggin committed
709
	.fault = hugetlb_vm_op_fault,
Linus Torvalds's avatar
Linus Torvalds committed
710
711
};

712
713
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
David Gibson's avatar
David Gibson committed
714
715
716
{
	pte_t entry;

717
	if (writable) {
David Gibson's avatar
David Gibson committed
718
719
720
721
722
723
724
725
726
727
728
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

729
730
731
732
733
734
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
735
736
737
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
738
739
740
}


David Gibson's avatar
David Gibson committed
741
742
743
744
745
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
746
	unsigned long addr;
747
748
749
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson's avatar
David Gibson committed
750

751
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
752
753
754
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
David Gibson's avatar
David Gibson committed
755
756
757
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
758
759
760
761
762

		/* If the pagetables are shared don't copy or take references */
		if (dst_pte == src_pte)
			continue;

763
		spin_lock(&dst->page_table_lock);
764
		spin_lock(&src->page_table_lock);
765
		if (!pte_none(*src_pte)) {
766
767
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
768
769
770
771
772
773
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
774
		spin_unlock(&dst->page_table_lock);
David Gibson's avatar
David Gibson committed
775
776
777
778
779
780
781
	}
	return 0;

nomem:
	return -ENOMEM;
}

782
783
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
David Gibson's avatar
David Gibson committed
784
785
786
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
787
	pte_t *ptep;
David Gibson's avatar
David Gibson committed
788
789
	pte_t pte;
	struct page *page;
790
	struct page *tmp;
791
792
793
794
795
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
796
	LIST_HEAD(page_list);
David Gibson's avatar
David Gibson committed
797
798
799
800
801

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

802
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
803
	for (address = start; address < end; address += HPAGE_SIZE) {
804
		ptep = huge_pte_offset(mm, address);
805
		if (!ptep)
806
807
			continue;

808
809
810
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

811
		pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson's avatar
David Gibson committed
812
813
		if (pte_none(pte))
			continue;
814

David Gibson's avatar
David Gibson committed
815
		page = pte_page(pte);
816
817
		if (pte_dirty(pte))
			set_page_dirty(page);
818
		list_add(&page->lru, &page_list);
David Gibson's avatar
David Gibson committed
819
	}
Linus Torvalds's avatar
Linus Torvalds committed
820
	spin_unlock(&mm->page_table_lock);
821
	flush_tlb_range(vma, start, end);
822
823
824
825
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
Linus Torvalds's avatar
Linus Torvalds committed
826
}
David Gibson's avatar
David Gibson committed
827

828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

846
847
848
849
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
850
	int avoidcopy;
851
852
853
854
855
856
857
858

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
Nick Piggin's avatar
Nick Piggin committed
859
		return 0;
860
861
862
	}

	page_cache_get(old_page);
863
	new_page = alloc_huge_page(vma, address);
864

865
	if (IS_ERR(new_page)) {
866
		page_cache_release(old_page);
867
		return -PTR_ERR(new_page);
868
869
870
	}

	spin_unlock(&mm->page_table_lock);
871
	copy_huge_page(new_page, old_page, address, vma);
Nick Piggin's avatar
Nick Piggin committed
872
	__SetPageUptodate(new_page);
873
874
875
876
877
878
879
880
881
882
883
884
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
Nick Piggin's avatar
Nick Piggin committed
885
	return 0;
886
887
}

888
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
889
			unsigned long address, pte_t *ptep, int write_access)
890
891
{
	int ret = VM_FAULT_SIGBUS;
892
893
894
895
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
896
	pte_t new_pte;
897
898
899
900
901
902
903
904
905

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
906
907
908
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
909
910
911
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
912
		page = alloc_huge_page(vma, address);
913
914
		if (IS_ERR(page)) {
			ret = -PTR_ERR(page);
915
916
			goto out;
		}
917
		clear_huge_page(page, address);
Nick Piggin's avatar
Nick Piggin committed
918
		__SetPageUptodate(page);
919

920
921
		if (vma->vm_flags & VM_SHARED) {
			int err;
Ken Chen's avatar
Ken Chen committed
922
			struct inode *inode = mapping->host;
923
924
925
926
927
928
929
930

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
Ken Chen's avatar
Ken Chen committed
931
932
933
934

			spin_lock(&inode->i_lock);
			inode->i_blocks += BLOCKS_PER_HUGEPAGE;
			spin_unlock(&inode->i_lock);
935
936
937
		} else
			lock_page(page);
	}
938

939
	spin_lock(&mm->page_table_lock);
940
941
942
943
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

Nick Piggin's avatar
Nick Piggin committed
944
	ret = 0;
945
	if (!pte_none(*ptep))
946
947
		goto backout;

948
949
950
951
952
953
954
955
956
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

957
	spin_unlock(&mm->page_table_lock);
958
959
	unlock_page(page);
out:
960
	return ret;
961
962
963
964
965
966

backout:
	spin_unlock(&mm->page_table_lock);
	unlock_page(page);
	put_page(page);
	goto out;
967
968
}

969
970
971
972
973
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
974
	int ret;
975
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
976
977
978
979
980

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

981
982
983
984
985
986
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
987
	entry = *ptep;
988
989
990
991
992
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
993

Nick Piggin's avatar
Nick Piggin committed
994
	ret = 0;
995
996
997
998
999
1000

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);