hugetlb.c 31.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
David Gibson's avatar
David Gibson committed
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

David Gibson's avatar
David Gibson committed
18
19
20
21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23
24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
27
static unsigned long nr_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
28
unsigned long max_huge_pages;
29
unsigned long sysctl_overcommit_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
33
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34
35
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
36
static int hugetlb_next_nid;
37

38
39
40
41
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
42

43
44
45
46
47
48
49
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
50
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
51
52
53
54
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
55
			   unsigned long addr, struct vm_area_struct *vma)
56
57
58
59
60
61
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
62
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
63
64
65
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
66
67
68
69
70
71
72
73
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

74
75
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
				unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
76
{
77
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
78
	struct page *page = NULL;
79
	struct mempolicy *mpol;
80
	struct zonelist *zonelist = huge_zonelist(vma, address,
81
					htlb_alloc_mask, &mpol);
82
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
83

84
	for (z = zonelist->zones; *z; z++) {
85
		nid = zone_to_nid(*z);
86
		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
87
88
89
90
91
92
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
93
94
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
Ken Chen's avatar
Ken Chen committed
95
			break;
96
		}
Linus Torvalds's avatar
Linus Torvalds committed
97
	}
98
	mpol_free(mpol);	/* unref if mpol !NULL */
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
	return page;
}

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

117
118
static void free_huge_page(struct page *page)
{
119
	int nid = page_to_nid(page);
120
	struct address_space *mapping;
121

122
	mapping = (struct address_space *) page_private(page);
123
	set_page_private(page, 0);
124
	BUG_ON(page_count(page));
125
126
127
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
128
129
130
131
132
133
134
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
135
	spin_unlock(&hugetlb_lock);
136
	if (mapping)
137
		hugetlb_put_quota(mapping, 1);
138
139
}

140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

175
static struct page *alloc_fresh_huge_page_node(int nid)
Linus Torvalds's avatar
Linus Torvalds committed
176
177
{
	struct page *page;
178

179
180
181
	page = alloc_pages_node(nid,
		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
		HUGETLB_PAGE_ORDER);
Linus Torvalds's avatar
Linus Torvalds committed
182
	if (page) {
183
		set_compound_page_dtor(page, free_huge_page);
184
		spin_lock(&hugetlb_lock);
Linus Torvalds's avatar
Linus Torvalds committed
185
		nr_huge_pages++;
186
		nr_huge_pages_node[nid]++;
187
		spin_unlock(&hugetlb_lock);
188
		put_page(page); /* free it into the hugepage allocator */
Linus Torvalds's avatar
Linus Torvalds committed
189
	}
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224

	return page;
}

static int alloc_fresh_huge_page(void)
{
	struct page *page;
	int start_nid;
	int next_nid;
	int ret = 0;

	start_nid = hugetlb_next_nid;

	do {
		page = alloc_fresh_huge_page_node(hugetlb_next_nid);
		if (page)
			ret = 1;
		/*
		 * Use a helper variable to find the next node and then
		 * copy it back to hugetlb_next_nid afterwards:
		 * otherwise there's a window in which a racer might
		 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
		 * But we don't need to use a spin_lock here: it really
		 * doesn't matter if occasionally a racer chooses the
		 * same nid as we do.  Move nid forward in the mask even
		 * if we just successfully allocated a hugepage so that
		 * the next caller gets hugepages on the next node.
		 */
		next_nid = next_node(hugetlb_next_nid, node_online_map);
		if (next_nid == MAX_NUMNODES)
			next_nid = first_node(node_online_map);
		hugetlb_next_nid = next_nid;
	} while (!page && hugetlb_next_nid != start_nid);

	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
225
226
}

227
228
229
230
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;
231
	unsigned int nid;
232

233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
	/*
	 * Assume we will successfully allocate the surplus page to
	 * prevent racing processes from causing the surplus to exceed
	 * overcommit
	 *
	 * This however introduces a different race, where a process B
	 * tries to grow the static hugepage pool while alloc_pages() is
	 * called by process A. B will only examine the per-node
	 * counters in determining if surplus huge pages can be
	 * converted to normal huge pages in adjust_pool_surplus(). A
	 * won't be able to increment the per-node counter, until the
	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
	 * no more huge pages can be converted from surplus to normal
	 * state (and doesn't try to convert again). Thus, we have a
	 * case where a surplus huge page exists, the pool is grown, and
	 * the surplus huge page still exists after, even though it
	 * should just have been converted to a normal huge page. This
	 * does not leak memory, though, as the hugepage will be freed
	 * once it is out of use. It also does not allow the counters to
	 * go out of whack in adjust_pool_surplus() as we don't modify
	 * the node values until we've gotten the hugepage and only the
	 * per-node value is checked there.
	 */
	spin_lock(&hugetlb_lock);
	if (surplus_huge_pages >= nr_overcommit_huge_pages) {
		spin_unlock(&hugetlb_lock);
		return NULL;
	} else {
		nr_huge_pages++;
		surplus_huge_pages++;
	}
	spin_unlock(&hugetlb_lock);

266
267
	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
268
269

	spin_lock(&hugetlb_lock);
270
	if (page) {
271
		nid = page_to_nid(page);
272
		set_compound_page_dtor(page, free_huge_page);
273
274
275
276
277
278
279
280
		/*
		 * We incremented the global counters already
		 */
		nr_huge_pages_node[nid]++;
		surplus_huge_pages_node[nid]++;
	} else {
		nr_huge_pages--;
		surplus_huge_pages--;
281
	}
282
	spin_unlock(&hugetlb_lock);
283
284
285
286

	return page;
}

287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
	if (needed <= 0)
		return 0;

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
	 * allocator.
	 */
	needed += allocated;
	ret = 0;
free:
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
		list_del(&page->lru);
		if ((--needed) >= 0)
			enqueue_huge_page(page);
347
348
349
350
351
352
353
354
355
356
357
		else {
			/*
			 * Decrement the refcount and free the page using its
			 * destructor.  This must be done with hugetlb_lock
			 * unlocked which is safe because free_huge_page takes
			 * hugetlb_lock before deciding how to free the page.
			 */
			spin_unlock(&hugetlb_lock);
			put_page(page);
			spin_lock(&hugetlb_lock);
		}
358
359
360
361
362
363
364
365
366
367
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
368
static void return_unused_surplus_pages(unsigned long unused_resv_pages)
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

	nr_pages = min(unused_resv_pages, surplus_huge_pages);

	while (nr_pages) {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
		}
	}
}

398
399
400

static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
						unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
401
{
402
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
403
404

	spin_lock(&hugetlb_lock);
405
	page = dequeue_huge_page(vma, addr);
Linus Torvalds's avatar
Linus Torvalds committed
406
	spin_unlock(&hugetlb_lock);
407
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
408
}
409

410
411
412
413
static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct page *page = NULL;
414

415
416
417
	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
		return ERR_PTR(-VM_FAULT_SIGBUS);

418
419
420
421
	spin_lock(&hugetlb_lock);
	if (free_huge_pages > resv_huge_pages)
		page = dequeue_huge_page(vma, addr);
	spin_unlock(&hugetlb_lock);
Ken Chen's avatar
Ken Chen committed
422
	if (!page) {
423
		page = alloc_buddy_huge_page(vma, addr);
Ken Chen's avatar
Ken Chen committed
424
425
426
427
428
429
		if (!page) {
			hugetlb_put_quota(vma->vm_file->f_mapping, 1);
			return ERR_PTR(-VM_FAULT_OOM);
		}
	}
	return page;
430
431
432
433
434
435
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
{
	struct page *page;
436
437
	struct address_space *mapping = vma->vm_file->f_mapping;

438
439
440
441
	if (vma->vm_flags & VM_MAYSHARE)
		page = alloc_huge_page_shared(vma, addr);
	else
		page = alloc_huge_page_private(vma, addr);
442
443

	if (!IS_ERR(page)) {
444
		set_page_refcounted(page);
445
		set_page_private(page, (unsigned long) mapping);
446
447
	}
	return page;
448
449
}

Linus Torvalds's avatar
Linus Torvalds committed
450
451
452
453
static int __init hugetlb_init(void)
{
	unsigned long i;

454
455
456
	if (HPAGE_SHIFT == 0)
		return 0;

Linus Torvalds's avatar
Linus Torvalds committed
457
458
459
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

460
461
	hugetlb_next_nid = first_node(node_online_map);

Linus Torvalds's avatar
Linus Torvalds committed
462
	for (i = 0; i < max_huge_pages; ++i) {
463
		if (!alloc_fresh_huge_page())
Linus Torvalds's avatar
Linus Torvalds committed
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

480
481
482
483
484
485
486
487
488
489
490
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

Linus Torvalds's avatar
Linus Torvalds committed
491
492
493
494
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
495
496
	int i;

Linus Torvalds's avatar
Linus Torvalds committed
497
498
499
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
500
501
			if (count >= nr_huge_pages)
				return;
Linus Torvalds's avatar
Linus Torvalds committed
502
503
504
505
506
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
507
			free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds's avatar
Linus Torvalds committed
508
509
510
511
512
513
514
515
516
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

517
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
Linus Torvalds's avatar
Linus Torvalds committed
518
519
static unsigned long set_max_huge_pages(unsigned long count)
{
520
	unsigned long min_count, ret;
Linus Torvalds's avatar
Linus Torvalds committed
521

522
523
524
525
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
526
527
528
529
530
531
	 *
	 * We might race with alloc_buddy_huge_page() here and be unable
	 * to convert a surplus huge page to a normal huge page. That is
	 * not critical, though, it just means the overall size of the
	 * pool might be one hugepage larger than it needs to be, but
	 * within all the constraints specified by the sysctls.
532
	 */
Linus Torvalds's avatar
Linus Torvalds committed
533
	spin_lock(&hugetlb_lock);
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		int ret;
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
560
561
562
563
564
565
566
567
	 *
	 * By placing pages into the surplus state independent of the
	 * overcommit value, we are allowing the surplus pool size to
	 * exceed overcommit. There are few sane options here. Since
	 * alloc_buddy_huge_page() is checking the global counter,
	 * though, we'll note that we're not allowed to exceed surplus
	 * and won't grow the pool anywhere else. Not until one of the
	 * sysctls are changed, or the surplus pages go out of use.
568
	 */
569
570
	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
	min_count = max(count, min_count);
571
572
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
573
		struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds's avatar
Linus Torvalds committed
574
575
576
577
		if (!page)
			break;
		update_and_free_page(page);
	}
578
579
580
581
582
583
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
584
	spin_unlock(&hugetlb_lock);
585
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
586
587
588
589
590
591
592
593
594
595
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
596
597
598
599
600
601
602
603
604
605
606
607
608

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

609
610
611
612
613
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
614
615
	spin_lock(&hugetlb_lock);
	nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
616
617
618
619
	spin_unlock(&hugetlb_lock);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
620
621
622
623
624
625
626
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
627
			"HugePages_Rsvd:  %5lu\n"
628
			"HugePages_Surp:  %5lu\n"
Linus Torvalds's avatar
Linus Torvalds committed
629
630
631
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
632
			resv_huge_pages,
633
			surplus_huge_pages,
Linus Torvalds's avatar
Linus Torvalds committed
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
		"Node %d HugePages_Free:  %5u\n",
		nid, nr_huge_pages_node[nid],
		nid, free_huge_pages_node[nid]);
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
Nick Piggin's avatar
Nick Piggin committed
658
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds's avatar
Linus Torvalds committed
659
660
{
	BUG();
Nick Piggin's avatar
Nick Piggin committed
661
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
662
663
664
}

struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggin's avatar
Nick Piggin committed
665
	.fault = hugetlb_vm_op_fault,
Linus Torvalds's avatar
Linus Torvalds committed
666
667
};

668
669
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
David Gibson's avatar
David Gibson committed
670
671
672
{
	pte_t entry;

673
	if (writable) {
David Gibson's avatar
David Gibson committed
674
675
676
677
678
679
680
681
682
683
684
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

685
686
687
688
689
690
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
691
692
693
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
694
695
696
}


David Gibson's avatar
David Gibson committed
697
698
699
700
701
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
702
	unsigned long addr;
703
704
705
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson's avatar
David Gibson committed
706

707
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
708
709
710
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
David Gibson's avatar
David Gibson committed
711
712
713
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
714
715
716
717
718

		/* If the pagetables are shared don't copy or take references */
		if (dst_pte == src_pte)
			continue;

719
		spin_lock(&dst->page_table_lock);
720
		spin_lock(&src->page_table_lock);
721
		if (!pte_none(*src_pte)) {
722
723
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
724
725
726
727
728
729
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
730
		spin_unlock(&dst->page_table_lock);
David Gibson's avatar
David Gibson committed
731
732
733
734
735
736
737
	}
	return 0;

nomem:
	return -ENOMEM;
}

738
739
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
David Gibson's avatar
David Gibson committed
740
741
742
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
743
	pte_t *ptep;
David Gibson's avatar
David Gibson committed
744
745
	pte_t pte;
	struct page *page;
746
	struct page *tmp;
747
748
749
750
751
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
752
	LIST_HEAD(page_list);
David Gibson's avatar
David Gibson committed
753
754
755
756
757

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

758
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
759
	for (address = start; address < end; address += HPAGE_SIZE) {
760
		ptep = huge_pte_offset(mm, address);
761
		if (!ptep)
762
763
			continue;

764
765
766
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

767
		pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson's avatar
David Gibson committed
768
769
		if (pte_none(pte))
			continue;
770

David Gibson's avatar
David Gibson committed
771
		page = pte_page(pte);
772
773
		if (pte_dirty(pte))
			set_page_dirty(page);
774
		list_add(&page->lru, &page_list);
David Gibson's avatar
David Gibson committed
775
	}
Linus Torvalds's avatar
Linus Torvalds committed
776
	spin_unlock(&mm->page_table_lock);
777
	flush_tlb_range(vma, start, end);
778
779
780
781
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
Linus Torvalds's avatar
Linus Torvalds committed
782
}
David Gibson's avatar
David Gibson committed
783

784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

802
803
804
805
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
806
	int avoidcopy;
807
808
809
810
811
812
813
814

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
Nick Piggin's avatar
Nick Piggin committed
815
		return 0;
816
817
818
	}

	page_cache_get(old_page);
819
	new_page = alloc_huge_page(vma, address);
820

821
	if (IS_ERR(new_page)) {
822
		page_cache_release(old_page);
823
		return -PTR_ERR(new_page);
824
825
826
	}

	spin_unlock(&mm->page_table_lock);
827
	copy_huge_page(new_page, old_page, address, vma);
Nick Piggin's avatar
Nick Piggin committed
828
	__SetPageUptodate(new_page);
829
830
831
832
833
834
835
836
837
838
839
840
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
Nick Piggin's avatar
Nick Piggin committed
841
	return 0;
842
843
}

844
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
845
			unsigned long address, pte_t *ptep, int write_access)
846
847
{
	int ret = VM_FAULT_SIGBUS;
848
849
850
851
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
852
	pte_t new_pte;
853
854
855
856
857
858
859
860
861

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
862
863
864
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
865
866
867
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
868
		page = alloc_huge_page(vma, address);
869
870
		if (IS_ERR(page)) {
			ret = -PTR_ERR(page);
871
872
			goto out;
		}
873
		clear_huge_page(page, address);
Nick Piggin's avatar
Nick Piggin committed
874
		__SetPageUptodate(page);
875

876
877
		if (vma->vm_flags & VM_SHARED) {
			int err;
Ken Chen's avatar
Ken Chen committed
878
			struct inode *inode = mapping->host;
879
880
881
882
883
884
885
886

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
Ken Chen's avatar
Ken Chen committed
887
888
889
890

			spin_lock(&inode->i_lock);
			inode->i_blocks += BLOCKS_PER_HUGEPAGE;
			spin_unlock(&inode->i_lock);
891
892
893
		} else
			lock_page(page);
	}
894

895
	spin_lock(&mm->page_table_lock);
896
897
898
899
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

Nick Piggin's avatar
Nick Piggin committed
900
	ret = 0;
901
	if (!pte_none(*ptep))
902
903
		goto backout;

904
905
906
907
908
909
910
911
912
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

913
	spin_unlock(&mm->page_table_lock);
914
915
	unlock_page(page);
out:
916
	return ret;
917
918
919
920
921
922

backout:
	spin_unlock(&mm->page_table_lock);
	unlock_page(page);
	put_page(page);
	goto out;
923
924
}

925
926
927
928
929
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
930
	int ret;
931
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
932
933
934
935
936

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

937
938
939
940
941
942
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
943
	entry = *ptep;
944
945
946
947
948
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
949

Nick Piggin's avatar
Nick Piggin committed
950
	ret = 0;
951
952
953
954
955
956
957

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);
	spin_unlock(&mm->page_table_lock);
958
	mutex_unlock(&hugetlb_instantiation_mutex);
959
960

	return ret;
961
962
}

David Gibson's avatar
David Gibson committed
963
964
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
965
966
			unsigned long *position, int *length, int i,
			int write)
David Gibson's avatar
David Gibson committed
967
{
968
969
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
David Gibson's avatar
David Gibson committed
970
971
	int remainder = *length;

972
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
973
	while (vaddr < vma->vm_end && remainder) {
974
975
		pte_t *pte;
		struct page *page;
David Gibson's avatar
David Gibson committed
976

977
978
979
980
981
982
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
		 * each hugepage.  We have to make * sure we get the
		 * first, for the page indexing below to work.
		 */
		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
David Gibson's avatar
David Gibson committed
983

984
		if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
985
			int ret;
David Gibson's avatar
David Gibson committed
986

987
			spin_unlock(&mm->page_table_lock);
988
			ret = hugetlb_fault(mm, vma, vaddr, write);
989
			spin_lock(&mm->page_table_lock);
990
			if (!(ret & VM_FAULT_ERROR))
991
				continue;
David Gibson's avatar
David Gibson committed
992

993
994
995
996
997
998
			remainder = 0;
			if (!i)
				i = -EFAULT;
			break;
		}

999
1000
		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
		page = pte_page(*pte);