hugetlb.c 33.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
David Gibson's avatar
David Gibson committed
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

David Gibson's avatar
David Gibson committed
18
19
20
21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23
24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
27
static unsigned long nr_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
28
unsigned long max_huge_pages;
29
unsigned long sysctl_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
33
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34
35
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
36
static int hugetlb_next_nid;
37

38
39
40
41
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
42

43
44
45
46
47
48
49
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
50
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
51
52
53
54
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
55
			   unsigned long addr, struct vm_area_struct *vma)
56
57
58
59
60
61
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
62
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
63
64
65
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
66
67
68
69
70
71
72
73
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
static struct page *dequeue_huge_page(void)
{
	int nid;
	struct page *page = NULL;

	for (nid = 0; nid < MAX_NUMNODES; ++nid) {
		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			break;
		}
	}
	return page;
}

static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
93
				unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
94
{
95
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
96
	struct page *page = NULL;
97
	struct mempolicy *mpol;
98
	nodemask_t *nodemask;
99
	struct zonelist *zonelist = huge_zonelist(vma, address,
100
					htlb_alloc_mask, &mpol, &nodemask);
101
102
	struct zone *zone;
	struct zoneref *z;
Linus Torvalds's avatar
Linus Torvalds committed
103

104
105
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
						MAX_NR_ZONES - 1, nodemask) {
106
107
		nid = zone_to_nid(zone);
		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
108
109
110
111
112
113
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
114
115
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
Ken Chen's avatar
Ken Chen committed
116
			break;
117
		}
Linus Torvalds's avatar
Linus Torvalds committed
118
	}
119
	mpol_cond_put(mpol);
Linus Torvalds's avatar
Linus Torvalds committed
120
121
122
	return page;
}

123
124
125
126
127
128
129
130
131
132
133
134
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
135
	arch_release_hugepage(page);
136
137
138
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

139
140
static void free_huge_page(struct page *page)
{
141
	int nid = page_to_nid(page);
142
	struct address_space *mapping;
143

144
	mapping = (struct address_space *) page_private(page);
145
	set_page_private(page, 0);
146
	BUG_ON(page_count(page));
147
148
149
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
150
151
152
153
154
155
156
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
157
	spin_unlock(&hugetlb_lock);
158
	if (mapping)
159
		hugetlb_put_quota(mapping, 1);
160
161
}

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

197
static struct page *alloc_fresh_huge_page_node(int nid)
Linus Torvalds's avatar
Linus Torvalds committed
198
199
{
	struct page *page;
200

201
	page = alloc_pages_node(nid,
202
203
		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
						__GFP_REPEAT|__GFP_NOWARN,
204
		HUGETLB_PAGE_ORDER);
Linus Torvalds's avatar
Linus Torvalds committed
205
	if (page) {
206
207
		if (arch_prepare_hugepage(page)) {
			__free_pages(page, HUGETLB_PAGE_ORDER);
208
			return NULL;
209
		}
210
		set_compound_page_dtor(page, free_huge_page);
211
		spin_lock(&hugetlb_lock);
Linus Torvalds's avatar
Linus Torvalds committed
212
		nr_huge_pages++;
213
		nr_huge_pages_node[nid]++;
214
		spin_unlock(&hugetlb_lock);
215
		put_page(page); /* free it into the hugepage allocator */
Linus Torvalds's avatar
Linus Torvalds committed
216
	}
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250

	return page;
}

static int alloc_fresh_huge_page(void)
{
	struct page *page;
	int start_nid;
	int next_nid;
	int ret = 0;

	start_nid = hugetlb_next_nid;

	do {
		page = alloc_fresh_huge_page_node(hugetlb_next_nid);
		if (page)
			ret = 1;
		/*
		 * Use a helper variable to find the next node and then
		 * copy it back to hugetlb_next_nid afterwards:
		 * otherwise there's a window in which a racer might
		 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
		 * But we don't need to use a spin_lock here: it really
		 * doesn't matter if occasionally a racer chooses the
		 * same nid as we do.  Move nid forward in the mask even
		 * if we just successfully allocated a hugepage so that
		 * the next caller gets hugepages on the next node.
		 */
		next_nid = next_node(hugetlb_next_nid, node_online_map);
		if (next_nid == MAX_NUMNODES)
			next_nid = first_node(node_online_map);
		hugetlb_next_nid = next_nid;
	} while (!page && hugetlb_next_nid != start_nid);

251
252
253
254
255
	if (ret)
		count_vm_event(HTLB_BUDDY_PGALLOC);
	else
		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);

256
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
257
258
}

259
260
261
262
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;
263
	unsigned int nid;
264

265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
	/*
	 * Assume we will successfully allocate the surplus page to
	 * prevent racing processes from causing the surplus to exceed
	 * overcommit
	 *
	 * This however introduces a different race, where a process B
	 * tries to grow the static hugepage pool while alloc_pages() is
	 * called by process A. B will only examine the per-node
	 * counters in determining if surplus huge pages can be
	 * converted to normal huge pages in adjust_pool_surplus(). A
	 * won't be able to increment the per-node counter, until the
	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
	 * no more huge pages can be converted from surplus to normal
	 * state (and doesn't try to convert again). Thus, we have a
	 * case where a surplus huge page exists, the pool is grown, and
	 * the surplus huge page still exists after, even though it
	 * should just have been converted to a normal huge page. This
	 * does not leak memory, though, as the hugepage will be freed
	 * once it is out of use. It also does not allow the counters to
	 * go out of whack in adjust_pool_surplus() as we don't modify
	 * the node values until we've gotten the hugepage and only the
	 * per-node value is checked there.
	 */
	spin_lock(&hugetlb_lock);
	if (surplus_huge_pages >= nr_overcommit_huge_pages) {
		spin_unlock(&hugetlb_lock);
		return NULL;
	} else {
		nr_huge_pages++;
		surplus_huge_pages++;
	}
	spin_unlock(&hugetlb_lock);

298
299
	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
					__GFP_REPEAT|__GFP_NOWARN,
300
					HUGETLB_PAGE_ORDER);
301
302

	spin_lock(&hugetlb_lock);
303
	if (page) {
304
305
306
307
308
309
		/*
		 * This page is now managed by the hugetlb allocator and has
		 * no users -- drop the buddy allocator's reference.
		 */
		put_page_testzero(page);
		VM_BUG_ON(page_count(page));
310
		nid = page_to_nid(page);
311
		set_compound_page_dtor(page, free_huge_page);
312
313
314
315
316
		/*
		 * We incremented the global counters already
		 */
		nr_huge_pages_node[nid]++;
		surplus_huge_pages_node[nid]++;
317
		__count_vm_event(HTLB_BUDDY_PGALLOC);
318
319
320
	} else {
		nr_huge_pages--;
		surplus_huge_pages--;
321
		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
322
	}
323
	spin_unlock(&hugetlb_lock);
324
325
326
327

	return page;
}

328
329
330
331
332
333
334
335
336
337
338
339
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
340
341
	if (needed <= 0) {
		resv_huge_pages += delta;
342
		return 0;
343
	}
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
381
382
383
	 * allocator.  Commit the entire reservation here to prevent another
	 * process from stealing the pages as they are added to the pool but
	 * before they are reserved.
384
385
	 */
	needed += allocated;
386
	resv_huge_pages += delta;
387
388
	ret = 0;
free:
389
	/* Free the needed pages to the hugetlb pool */
390
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
391
392
		if ((--needed) < 0)
			break;
393
		list_del(&page->lru);
394
395
396
397
398
399
400
401
		enqueue_huge_page(page);
	}

	/* Free unnecessary surplus pages to the buddy allocator */
	if (!list_empty(&surplus_list)) {
		spin_unlock(&hugetlb_lock);
		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
			list_del(&page->lru);
402
			/*
403
404
405
			 * The page has a reference count of zero already, so
			 * call free_huge_page directly instead of using
			 * put_page.  This must be done with hugetlb_lock
406
407
408
			 * unlocked which is safe because free_huge_page takes
			 * hugetlb_lock before deciding how to free the page.
			 */
409
			free_huge_page(page);
410
		}
411
		spin_lock(&hugetlb_lock);
412
413
414
415
416
417
418
419
420
421
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
422
static void return_unused_surplus_pages(unsigned long unused_resv_pages)
423
424
425
426
427
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

428
429
430
431
432
433
434
435
	/*
	 * We want to release as many surplus pages as possible, spread
	 * evenly across all nodes. Iterate across all nodes until we
	 * can no longer free unreserved surplus pages. This occurs when
	 * the nodes with surplus pages have no free pages.
	 */
	unsigned long remaining_iterations = num_online_nodes();

436
437
438
	/* Uncommit the reservation */
	resv_huge_pages -= unused_resv_pages;

439
440
	nr_pages = min(unused_resv_pages, surplus_huge_pages);

441
	while (remaining_iterations-- && nr_pages) {
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
459
			remaining_iterations = num_online_nodes();
460
461
462
463
		}
	}
}

464
465
466

static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
						unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
467
{
468
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
469
470

	spin_lock(&hugetlb_lock);
471
	page = dequeue_huge_page_vma(vma, addr);
Linus Torvalds's avatar
Linus Torvalds committed
472
	spin_unlock(&hugetlb_lock);
473
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
474
}
475

476
477
478
479
static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct page *page = NULL;
480

481
482
483
	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
		return ERR_PTR(-VM_FAULT_SIGBUS);

484
485
	spin_lock(&hugetlb_lock);
	if (free_huge_pages > resv_huge_pages)
486
		page = dequeue_huge_page_vma(vma, addr);
487
	spin_unlock(&hugetlb_lock);
Ken Chen's avatar
Ken Chen committed
488
	if (!page) {
489
		page = alloc_buddy_huge_page(vma, addr);
Ken Chen's avatar
Ken Chen committed
490
491
492
493
494
495
		if (!page) {
			hugetlb_put_quota(vma->vm_file->f_mapping, 1);
			return ERR_PTR(-VM_FAULT_OOM);
		}
	}
	return page;
496
497
498
499
500
501
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
{
	struct page *page;
502
503
	struct address_space *mapping = vma->vm_file->f_mapping;

504
505
506
507
	if (vma->vm_flags & VM_MAYSHARE)
		page = alloc_huge_page_shared(vma, addr);
	else
		page = alloc_huge_page_private(vma, addr);
508
509

	if (!IS_ERR(page)) {
510
		set_page_refcounted(page);
511
		set_page_private(page, (unsigned long) mapping);
512
513
	}
	return page;
514
515
}

Linus Torvalds's avatar
Linus Torvalds committed
516
517
518
519
static int __init hugetlb_init(void)
{
	unsigned long i;

520
521
522
	if (HPAGE_SHIFT == 0)
		return 0;

Linus Torvalds's avatar
Linus Torvalds committed
523
524
525
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

526
527
	hugetlb_next_nid = first_node(node_online_map);

Linus Torvalds's avatar
Linus Torvalds committed
528
	for (i = 0; i < max_huge_pages; ++i) {
529
		if (!alloc_fresh_huge_page())
Linus Torvalds's avatar
Linus Torvalds committed
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

546
547
548
549
550
551
552
553
554
555
556
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

Linus Torvalds's avatar
Linus Torvalds committed
557
558
559
560
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
561
562
	int i;

Linus Torvalds's avatar
Linus Torvalds committed
563
564
565
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
566
567
			if (count >= nr_huge_pages)
				return;
Linus Torvalds's avatar
Linus Torvalds committed
568
569
570
571
572
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
573
			free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds's avatar
Linus Torvalds committed
574
575
576
577
578
579
580
581
582
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

583
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
Linus Torvalds's avatar
Linus Torvalds committed
584
585
static unsigned long set_max_huge_pages(unsigned long count)
{
586
	unsigned long min_count, ret;
Linus Torvalds's avatar
Linus Torvalds committed
587

588
589
590
591
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
592
593
594
595
596
597
	 *
	 * We might race with alloc_buddy_huge_page() here and be unable
	 * to convert a surplus huge page to a normal huge page. That is
	 * not critical, though, it just means the overall size of the
	 * pool might be one hugepage larger than it needs to be, but
	 * within all the constraints specified by the sysctls.
598
	 */
Linus Torvalds's avatar
Linus Torvalds committed
599
	spin_lock(&hugetlb_lock);
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
625
626
627
628
629
630
631
632
	 *
	 * By placing pages into the surplus state independent of the
	 * overcommit value, we are allowing the surplus pool size to
	 * exceed overcommit. There are few sane options here. Since
	 * alloc_buddy_huge_page() is checking the global counter,
	 * though, we'll note that we're not allowed to exceed surplus
	 * and won't grow the pool anywhere else. Not until one of the
	 * sysctls are changed, or the surplus pages go out of use.
633
	 */
634
635
	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
	min_count = max(count, min_count);
636
637
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
638
		struct page *page = dequeue_huge_page();
Linus Torvalds's avatar
Linus Torvalds committed
639
640
641
642
		if (!page)
			break;
		update_and_free_page(page);
	}
643
644
645
646
647
648
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
649
	spin_unlock(&hugetlb_lock);
650
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
651
652
653
654
655
656
657
658
659
660
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
661
662
663
664
665
666
667
668
669
670
671
672
673

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

674
675
676
677
678
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
679
680
	spin_lock(&hugetlb_lock);
	nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
681
682
683
684
	spin_unlock(&hugetlb_lock);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
685
686
687
688
689
690
691
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
692
			"HugePages_Rsvd:  %5lu\n"
693
			"HugePages_Surp:  %5lu\n"
Linus Torvalds's avatar
Linus Torvalds committed
694
695
696
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
697
			resv_huge_pages,
698
			surplus_huge_pages,
Linus Torvalds's avatar
Linus Torvalds committed
699
700
701
702
703
704
705
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
706
707
		"Node %d HugePages_Free:  %5u\n"
		"Node %d HugePages_Surp:  %5u\n",
Linus Torvalds's avatar
Linus Torvalds committed
708
		nid, nr_huge_pages_node[nid],
709
710
		nid, free_huge_pages_node[nid],
		nid, surplus_huge_pages_node[nid]);
Linus Torvalds's avatar
Linus Torvalds committed
711
712
713
714
715
716
717
718
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
static int hugetlb_acct_memory(long delta)
{
	int ret = -ENOMEM;

	spin_lock(&hugetlb_lock);
	/*
	 * When cpuset is configured, it breaks the strict hugetlb page
	 * reservation as the accounting is done on a global variable. Such
	 * reservation is completely rubbish in the presence of cpuset because
	 * the reservation is not checked against page availability for the
	 * current cpuset. Application can still potentially OOM'ed by kernel
	 * with lack of free htlb page in cpuset that the task is in.
	 * Attempt to enforce strict accounting with cpuset is almost
	 * impossible (or too ugly) because cpuset is too fluid that
	 * task or memory node can be dynamically moved between cpusets.
	 *
	 * The change of semantics for shared hugetlb mapping with cpuset is
	 * undesirable. However, in order to preserve some of the semantics,
	 * we fall back to check against current free page availability as
	 * a best attempt and hopefully to minimize the impact of changing
	 * semantics that cpuset has.
	 */
	if (delta > 0) {
		if (gather_surplus_pages(delta) < 0)
			goto out;

		if (delta > cpuset_mems_nr(free_huge_pages_node)) {
			return_unused_surplus_pages(delta);
			goto out;
		}
	}

	ret = 0;
	if (delta < 0)
		return_unused_surplus_pages((unsigned long) -delta);

out:
	spin_unlock(&hugetlb_lock);
	return ret;
}

Linus Torvalds's avatar
Linus Torvalds committed
760
761
762
763
764
765
/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
Nick Piggin's avatar
Nick Piggin committed
766
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds's avatar
Linus Torvalds committed
767
768
{
	BUG();
Nick Piggin's avatar
Nick Piggin committed
769
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
770
771
772
}

struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggin's avatar
Nick Piggin committed
773
	.fault = hugetlb_vm_op_fault,
Linus Torvalds's avatar
Linus Torvalds committed
774
775
};

776
777
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
David Gibson's avatar
David Gibson committed
778
779
780
{
	pte_t entry;

781
	if (writable) {
David Gibson's avatar
David Gibson committed
782
783
784
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
785
		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
David Gibson's avatar
David Gibson committed
786
787
788
789
790
791
792
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

793
794
795
796
797
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

798
799
	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
800
801
		update_mmu_cache(vma, address, entry);
	}
802
803
804
}


David Gibson's avatar
David Gibson committed
805
806
807
808
809
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
810
	unsigned long addr;
811
812
813
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson's avatar
David Gibson committed
814

815
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
816
817
818
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
David Gibson's avatar
David Gibson committed
819
820
821
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
822
823
824
825
826

		/* If the pagetables are shared don't copy or take references */
		if (dst_pte == src_pte)
			continue;

827
		spin_lock(&dst->page_table_lock);
Nick Piggin's avatar
Nick Piggin committed
828
		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
829
		if (!huge_pte_none(huge_ptep_get(src_pte))) {
830
			if (cow)
831
832
				huge_ptep_set_wrprotect(src, addr, src_pte);
			entry = huge_ptep_get(src_pte);
833
834
835
836
837
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
838
		spin_unlock(&dst->page_table_lock);
David Gibson's avatar
David Gibson committed
839
840
841
842
843
844
845
	}
	return 0;

nomem:
	return -ENOMEM;
}

846
847
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
David Gibson's avatar
David Gibson committed
848
849
850
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
851
	pte_t *ptep;
David Gibson's avatar
David Gibson committed
852
853
	pte_t pte;
	struct page *page;
854
	struct page *tmp;
855
856
857
858
859
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
860
	LIST_HEAD(page_list);
David Gibson's avatar
David Gibson committed
861
862
863
864
865

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

866
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
867
	for (address = start; address < end; address += HPAGE_SIZE) {
868
		ptep = huge_pte_offset(mm, address);
869
		if (!ptep)
870
871
			continue;

872
873
874
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

875
		pte = huge_ptep_get_and_clear(mm, address, ptep);
876
		if (huge_pte_none(pte))
David Gibson's avatar
David Gibson committed
877
			continue;
878

David Gibson's avatar
David Gibson committed
879
		page = pte_page(pte);
880
881
		if (pte_dirty(pte))
			set_page_dirty(page);
882
		list_add(&page->lru, &page_list);
David Gibson's avatar
David Gibson committed
883
	}
Linus Torvalds's avatar
Linus Torvalds committed
884
	spin_unlock(&mm->page_table_lock);
885
	flush_tlb_range(vma, start, end);
886
887
888
889
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
Linus Torvalds's avatar
Linus Torvalds committed
890
}
David Gibson's avatar
David Gibson committed
891

892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

910
911
912
913
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
914
	int avoidcopy;
915
916
917
918
919
920
921
922

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
Nick Piggin's avatar
Nick Piggin committed
923
		return 0;
924
925
926
	}

	page_cache_get(old_page);
927
	new_page = alloc_huge_page(vma, address);
928

929
	if (IS_ERR(new_page)) {
930
		page_cache_release(old_page);
931
		return -PTR_ERR(new_page);
932
933
934
	}

	spin_unlock(&mm->page_table_lock);
935
	copy_huge_page(new_page, old_page, address, vma);
Nick Piggin's avatar
Nick Piggin committed
936
	__SetPageUptodate(new_page);
937
938
939
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
940
	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
941
		/* Break COW */
942
		huge_ptep_clear_flush(vma, address, ptep);
943
944
945
946
947
948
949
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
Nick Piggin's avatar
Nick Piggin committed
950
	return 0;
951
952
}

953
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
954
			unsigned long address, pte_t *ptep, int write_access)
955
956
{
	int ret = VM_FAULT_SIGBUS;
957
958
959
960
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
961
	pte_t new_pte;
962
963
964
965
966
967
968
969
970

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
971
972
973
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
974
975
976
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
977
		page = alloc_huge_page(vma, address);
978
979
		if (IS_ERR(page)) {
			ret = -PTR_ERR(page);
980
981
			goto out;
		}
982
		clear_huge_page(page, address);
Nick Piggin's avatar
Nick Piggin committed
983
		__SetPageUptodate(page);
984

985
986
		if (vma->vm_flags & VM_SHARED) {
			int err;
Ken Chen's avatar
Ken Chen committed
987
			struct inode *inode = mapping->host;
988
989
990
991
992
993
994
995

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
Ken Chen's avatar
Ken Chen committed
996
997
998
999

			spin_lock(&inode->i_lock);
			inode->i_blocks += BLOCKS_PER_HUGEPAGE;
			spin_unlock(&inode->i_lock);
1000
		} else