hugetlb.c 33.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
David Gibson's avatar
David Gibson committed
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

David Gibson's avatar
David Gibson committed
18
19
20
21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23
24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
27
static unsigned long nr_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
28
unsigned long max_huge_pages;
29
unsigned long sysctl_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
33
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34
35
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
36
static int hugetlb_next_nid;
37

38
39
40
41
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
42

43
44
45
46
47
48
49
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
50
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
51
52
53
54
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
55
			   unsigned long addr, struct vm_area_struct *vma)
56
57
58
59
60
61
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
62
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
63
64
65
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
66
67
68
69
70
71
72
73
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
static struct page *dequeue_huge_page(void)
{
	int nid;
	struct page *page = NULL;

	for (nid = 0; nid < MAX_NUMNODES; ++nid) {
		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			break;
		}
	}
	return page;
}

static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
93
				unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
94
{
95
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
96
	struct page *page = NULL;
97
	struct mempolicy *mpol;
98
	nodemask_t *nodemask;
99
	struct zonelist *zonelist = huge_zonelist(vma, address,
100
					htlb_alloc_mask, &mpol, &nodemask);
101
102
	struct zone *zone;
	struct zoneref *z;
Linus Torvalds's avatar
Linus Torvalds committed
103

104
105
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
						MAX_NR_ZONES - 1, nodemask) {
106
107
		nid = zone_to_nid(zone);
		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
108
109
110
111
112
113
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
114
115
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
Ken Chen's avatar
Ken Chen committed
116
			break;
117
		}
Linus Torvalds's avatar
Linus Torvalds committed
118
	}
119
	mpol_put(mpol);	/* unref if mpol !NULL */
Linus Torvalds's avatar
Linus Torvalds committed
120
121
122
	return page;
}

123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

138
139
static void free_huge_page(struct page *page)
{
140
	int nid = page_to_nid(page);
141
	struct address_space *mapping;
142

143
	mapping = (struct address_space *) page_private(page);
144
	set_page_private(page, 0);
145
	BUG_ON(page_count(page));
146
147
148
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
149
150
151
152
153
154
155
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
156
	spin_unlock(&hugetlb_lock);
157
	if (mapping)
158
		hugetlb_put_quota(mapping, 1);
159
160
}

161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

196
static struct page *alloc_fresh_huge_page_node(int nid)
Linus Torvalds's avatar
Linus Torvalds committed
197
198
{
	struct page *page;
199

200
201
202
	page = alloc_pages_node(nid,
		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
		HUGETLB_PAGE_ORDER);
Linus Torvalds's avatar
Linus Torvalds committed
203
	if (page) {
204
		set_compound_page_dtor(page, free_huge_page);
205
		spin_lock(&hugetlb_lock);
Linus Torvalds's avatar
Linus Torvalds committed
206
		nr_huge_pages++;
207
		nr_huge_pages_node[nid]++;
208
		spin_unlock(&hugetlb_lock);
209
		put_page(page); /* free it into the hugepage allocator */
Linus Torvalds's avatar
Linus Torvalds committed
210
	}
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244

	return page;
}

static int alloc_fresh_huge_page(void)
{
	struct page *page;
	int start_nid;
	int next_nid;
	int ret = 0;

	start_nid = hugetlb_next_nid;

	do {
		page = alloc_fresh_huge_page_node(hugetlb_next_nid);
		if (page)
			ret = 1;
		/*
		 * Use a helper variable to find the next node and then
		 * copy it back to hugetlb_next_nid afterwards:
		 * otherwise there's a window in which a racer might
		 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
		 * But we don't need to use a spin_lock here: it really
		 * doesn't matter if occasionally a racer chooses the
		 * same nid as we do.  Move nid forward in the mask even
		 * if we just successfully allocated a hugepage so that
		 * the next caller gets hugepages on the next node.
		 */
		next_nid = next_node(hugetlb_next_nid, node_online_map);
		if (next_nid == MAX_NUMNODES)
			next_nid = first_node(node_online_map);
		hugetlb_next_nid = next_nid;
	} while (!page && hugetlb_next_nid != start_nid);

245
246
247
248
249
	if (ret)
		count_vm_event(HTLB_BUDDY_PGALLOC);
	else
		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);

250
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
251
252
}

253
254
255
256
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;
257
	unsigned int nid;
258

259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
	/*
	 * Assume we will successfully allocate the surplus page to
	 * prevent racing processes from causing the surplus to exceed
	 * overcommit
	 *
	 * This however introduces a different race, where a process B
	 * tries to grow the static hugepage pool while alloc_pages() is
	 * called by process A. B will only examine the per-node
	 * counters in determining if surplus huge pages can be
	 * converted to normal huge pages in adjust_pool_surplus(). A
	 * won't be able to increment the per-node counter, until the
	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
	 * no more huge pages can be converted from surplus to normal
	 * state (and doesn't try to convert again). Thus, we have a
	 * case where a surplus huge page exists, the pool is grown, and
	 * the surplus huge page still exists after, even though it
	 * should just have been converted to a normal huge page. This
	 * does not leak memory, though, as the hugepage will be freed
	 * once it is out of use. It also does not allow the counters to
	 * go out of whack in adjust_pool_surplus() as we don't modify
	 * the node values until we've gotten the hugepage and only the
	 * per-node value is checked there.
	 */
	spin_lock(&hugetlb_lock);
	if (surplus_huge_pages >= nr_overcommit_huge_pages) {
		spin_unlock(&hugetlb_lock);
		return NULL;
	} else {
		nr_huge_pages++;
		surplus_huge_pages++;
	}
	spin_unlock(&hugetlb_lock);

292
293
	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
294
295

	spin_lock(&hugetlb_lock);
296
	if (page) {
297
298
299
300
301
302
		/*
		 * This page is now managed by the hugetlb allocator and has
		 * no users -- drop the buddy allocator's reference.
		 */
		put_page_testzero(page);
		VM_BUG_ON(page_count(page));
303
		nid = page_to_nid(page);
304
		set_compound_page_dtor(page, free_huge_page);
305
306
307
308
309
		/*
		 * We incremented the global counters already
		 */
		nr_huge_pages_node[nid]++;
		surplus_huge_pages_node[nid]++;
310
		__count_vm_event(HTLB_BUDDY_PGALLOC);
311
312
313
	} else {
		nr_huge_pages--;
		surplus_huge_pages--;
314
		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
315
	}
316
	spin_unlock(&hugetlb_lock);
317
318
319
320

	return page;
}

321
322
323
324
325
326
327
328
329
330
331
332
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
333
334
	if (needed <= 0) {
		resv_huge_pages += delta;
335
		return 0;
336
	}
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
374
375
376
	 * allocator.  Commit the entire reservation here to prevent another
	 * process from stealing the pages as they are added to the pool but
	 * before they are reserved.
377
378
	 */
	needed += allocated;
379
	resv_huge_pages += delta;
380
381
	ret = 0;
free:
382
	/* Free the needed pages to the hugetlb pool */
383
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
384
385
		if ((--needed) < 0)
			break;
386
		list_del(&page->lru);
387
388
389
390
391
392
393
394
		enqueue_huge_page(page);
	}

	/* Free unnecessary surplus pages to the buddy allocator */
	if (!list_empty(&surplus_list)) {
		spin_unlock(&hugetlb_lock);
		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
			list_del(&page->lru);
395
			/*
396
397
398
			 * The page has a reference count of zero already, so
			 * call free_huge_page directly instead of using
			 * put_page.  This must be done with hugetlb_lock
399
400
401
			 * unlocked which is safe because free_huge_page takes
			 * hugetlb_lock before deciding how to free the page.
			 */
402
			free_huge_page(page);
403
		}
404
		spin_lock(&hugetlb_lock);
405
406
407
408
409
410
411
412
413
414
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
415
static void return_unused_surplus_pages(unsigned long unused_resv_pages)
416
417
418
419
420
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

421
422
423
424
425
426
427
428
	/*
	 * We want to release as many surplus pages as possible, spread
	 * evenly across all nodes. Iterate across all nodes until we
	 * can no longer free unreserved surplus pages. This occurs when
	 * the nodes with surplus pages have no free pages.
	 */
	unsigned long remaining_iterations = num_online_nodes();

429
430
431
	/* Uncommit the reservation */
	resv_huge_pages -= unused_resv_pages;

432
433
	nr_pages = min(unused_resv_pages, surplus_huge_pages);

434
	while (remaining_iterations-- && nr_pages) {
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
452
			remaining_iterations = num_online_nodes();
453
454
455
456
		}
	}
}

457
458
459

static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
						unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
460
{
461
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
462
463

	spin_lock(&hugetlb_lock);
464
	page = dequeue_huge_page_vma(vma, addr);
Linus Torvalds's avatar
Linus Torvalds committed
465
	spin_unlock(&hugetlb_lock);
466
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
467
}
468

469
470
471
472
static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct page *page = NULL;
473

474
475
476
	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
		return ERR_PTR(-VM_FAULT_SIGBUS);

477
478
	spin_lock(&hugetlb_lock);
	if (free_huge_pages > resv_huge_pages)
479
		page = dequeue_huge_page_vma(vma, addr);
480
	spin_unlock(&hugetlb_lock);
Ken Chen's avatar
Ken Chen committed
481
	if (!page) {
482
		page = alloc_buddy_huge_page(vma, addr);
Ken Chen's avatar
Ken Chen committed
483
484
485
486
487
488
		if (!page) {
			hugetlb_put_quota(vma->vm_file->f_mapping, 1);
			return ERR_PTR(-VM_FAULT_OOM);
		}
	}
	return page;
489
490
491
492
493
494
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
{
	struct page *page;
495
496
	struct address_space *mapping = vma->vm_file->f_mapping;

497
498
499
500
	if (vma->vm_flags & VM_MAYSHARE)
		page = alloc_huge_page_shared(vma, addr);
	else
		page = alloc_huge_page_private(vma, addr);
501
502

	if (!IS_ERR(page)) {
503
		set_page_refcounted(page);
504
		set_page_private(page, (unsigned long) mapping);
505
506
	}
	return page;
507
508
}

Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
static int __init hugetlb_init(void)
{
	unsigned long i;

513
514
515
	if (HPAGE_SHIFT == 0)
		return 0;

Linus Torvalds's avatar
Linus Torvalds committed
516
517
518
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

519
520
	hugetlb_next_nid = first_node(node_online_map);

Linus Torvalds's avatar
Linus Torvalds committed
521
	for (i = 0; i < max_huge_pages; ++i) {
522
		if (!alloc_fresh_huge_page())
Linus Torvalds's avatar
Linus Torvalds committed
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

539
540
541
542
543
544
545
546
547
548
549
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

Linus Torvalds's avatar
Linus Torvalds committed
550
551
552
553
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
554
555
	int i;

Linus Torvalds's avatar
Linus Torvalds committed
556
557
558
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
559
560
			if (count >= nr_huge_pages)
				return;
Linus Torvalds's avatar
Linus Torvalds committed
561
562
563
564
565
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
566
			free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds's avatar
Linus Torvalds committed
567
568
569
570
571
572
573
574
575
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

576
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
Linus Torvalds's avatar
Linus Torvalds committed
577
578
static unsigned long set_max_huge_pages(unsigned long count)
{
579
	unsigned long min_count, ret;
Linus Torvalds's avatar
Linus Torvalds committed
580

581
582
583
584
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
585
586
587
588
589
590
	 *
	 * We might race with alloc_buddy_huge_page() here and be unable
	 * to convert a surplus huge page to a normal huge page. That is
	 * not critical, though, it just means the overall size of the
	 * pool might be one hugepage larger than it needs to be, but
	 * within all the constraints specified by the sysctls.
591
	 */
Linus Torvalds's avatar
Linus Torvalds committed
592
	spin_lock(&hugetlb_lock);
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		int ret;
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
619
620
621
622
623
624
625
626
	 *
	 * By placing pages into the surplus state independent of the
	 * overcommit value, we are allowing the surplus pool size to
	 * exceed overcommit. There are few sane options here. Since
	 * alloc_buddy_huge_page() is checking the global counter,
	 * though, we'll note that we're not allowed to exceed surplus
	 * and won't grow the pool anywhere else. Not until one of the
	 * sysctls are changed, or the surplus pages go out of use.
627
	 */
628
629
	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
	min_count = max(count, min_count);
630
631
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
632
		struct page *page = dequeue_huge_page();
Linus Torvalds's avatar
Linus Torvalds committed
633
634
635
636
		if (!page)
			break;
		update_and_free_page(page);
	}
637
638
639
640
641
642
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
643
	spin_unlock(&hugetlb_lock);
644
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
645
646
647
648
649
650
651
652
653
654
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
655
656
657
658
659
660
661
662
663
664
665
666
667

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

668
669
670
671
672
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
673
674
	spin_lock(&hugetlb_lock);
	nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
675
676
677
678
	spin_unlock(&hugetlb_lock);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
679
680
681
682
683
684
685
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
686
			"HugePages_Rsvd:  %5lu\n"
687
			"HugePages_Surp:  %5lu\n"
Linus Torvalds's avatar
Linus Torvalds committed
688
689
690
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
691
			resv_huge_pages,
692
			surplus_huge_pages,
Linus Torvalds's avatar
Linus Torvalds committed
693
694
695
696
697
698
699
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
700
701
		"Node %d HugePages_Free:  %5u\n"
		"Node %d HugePages_Surp:  %5u\n",
Linus Torvalds's avatar
Linus Torvalds committed
702
		nid, nr_huge_pages_node[nid],
703
704
		nid, free_huge_pages_node[nid],
		nid, surplus_huge_pages_node[nid]);
Linus Torvalds's avatar
Linus Torvalds committed
705
706
707
708
709
710
711
712
713
714
715
716
717
718
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
Nick Piggin's avatar
Nick Piggin committed
719
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds's avatar
Linus Torvalds committed
720
721
{
	BUG();
Nick Piggin's avatar
Nick Piggin committed
722
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
723
724
725
}

struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggin's avatar
Nick Piggin committed
726
	.fault = hugetlb_vm_op_fault,
Linus Torvalds's avatar
Linus Torvalds committed
727
728
};

729
730
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
David Gibson's avatar
David Gibson committed
731
732
733
{
	pte_t entry;

734
	if (writable) {
David Gibson's avatar
David Gibson committed
735
736
737
738
739
740
741
742
743
744
745
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

746
747
748
749
750
751
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
752
753
754
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
755
756
757
}


David Gibson's avatar
David Gibson committed
758
759
760
761
762
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
763
	unsigned long addr;
764
765
766
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson's avatar
David Gibson committed
767

768
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
769
770
771
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
David Gibson's avatar
David Gibson committed
772
773
774
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
775
776
777
778
779

		/* If the pagetables are shared don't copy or take references */
		if (dst_pte == src_pte)
			continue;

780
		spin_lock(&dst->page_table_lock);
781
		spin_lock(&src->page_table_lock);
782
		if (!pte_none(*src_pte)) {
783
784
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
785
786
787
788
789
790
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
791
		spin_unlock(&dst->page_table_lock);
David Gibson's avatar
David Gibson committed
792
793
794
795
796
797
798
	}
	return 0;

nomem:
	return -ENOMEM;
}

799
800
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
David Gibson's avatar
David Gibson committed
801
802
803
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
804
	pte_t *ptep;
David Gibson's avatar
David Gibson committed
805
806
	pte_t pte;
	struct page *page;
807
	struct page *tmp;
808
809
810
811
812
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
813
	LIST_HEAD(page_list);
David Gibson's avatar
David Gibson committed
814
815
816
817
818

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

819
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
820
	for (address = start; address < end; address += HPAGE_SIZE) {
821
		ptep = huge_pte_offset(mm, address);
822
		if (!ptep)
823
824
			continue;

825
826
827
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

828
		pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson's avatar
David Gibson committed
829
830
		if (pte_none(pte))
			continue;
831

David Gibson's avatar
David Gibson committed
832
		page = pte_page(pte);
833
834
		if (pte_dirty(pte))
			set_page_dirty(page);
835
		list_add(&page->lru, &page_list);
David Gibson's avatar
David Gibson committed
836
	}
Linus Torvalds's avatar
Linus Torvalds committed
837
	spin_unlock(&mm->page_table_lock);
838
	flush_tlb_range(vma, start, end);
839
840
841
842
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
Linus Torvalds's avatar
Linus Torvalds committed
843
}
David Gibson's avatar
David Gibson committed
844

845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

863
864
865
866
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
867
	int avoidcopy;
868
869
870
871
872
873
874
875

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
Nick Piggin's avatar
Nick Piggin committed
876
		return 0;
877
878
879
	}

	page_cache_get(old_page);
880
	new_page = alloc_huge_page(vma, address);
881

882
	if (IS_ERR(new_page)) {
883
		page_cache_release(old_page);
884
		return -PTR_ERR(new_page);
885
886
887
	}

	spin_unlock(&mm->page_table_lock);
888
	copy_huge_page(new_page, old_page, address, vma);
Nick Piggin's avatar
Nick Piggin committed
889
	__SetPageUptodate(new_page);
890
891
892
893
894
895
896
897
898
899
900
901
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
Nick Piggin's avatar
Nick Piggin committed
902
	return 0;
903
904
}

905
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
906
			unsigned long address, pte_t *ptep, int write_access)
907
908
{
	int ret = VM_FAULT_SIGBUS;
909
910
911
912
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
913
	pte_t new_pte;
914
915
916
917
918
919
920
921
922

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
923
924
925
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
926
927
928
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
929
		page = alloc_huge_page(vma, address);
930
931
		if (IS_ERR(page)) {
			ret = -PTR_ERR(page);
932
933
			goto out;
		}
934
		clear_huge_page(page, address);
Nick Piggin's avatar
Nick Piggin committed
935
		__SetPageUptodate(page);
936

937
938
		if (vma->vm_flags & VM_SHARED) {
			int err;
Ken Chen's avatar
Ken Chen committed
939
			struct inode *inode = mapping->host;
940
941
942
943
944
945
946
947

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
Ken Chen's avatar
Ken Chen committed
948
949
950
951

			spin_lock(&inode->i_lock);
			inode->i_blocks += BLOCKS_PER_HUGEPAGE;
			spin_unlock(&inode->i_lock);
952
953
954
		} else
			lock_page(page);
	}
955

956
	spin_lock(&mm->page_table_lock);
957
958
959
960
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

Nick Piggin's avatar
Nick Piggin committed
961
	ret = 0;
962
	if (!pte_none(*ptep))
963
964
		goto backout;

965
966
967
968
969
970
971
972
973
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

974
	spin_unlock(&mm->page_table_lock);
975
976
	unlock_page(page);
out:
977
	return ret;
978
979
980
981
982
983

backout:
	spin_unlock(&mm->page_table_lock);
	unlock_page(page);
	put_page(page);
	goto out;
984
985
}

986
987
988
989
990
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
991
	int ret;
992
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
993
994
995
996
997

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

998
999
1000
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate