hugetlb.c 30.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
David Gibson's avatar
David Gibson committed
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

David Gibson's avatar
David Gibson committed
18
19
20
21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23
24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
27
28
29
30
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
31
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
32
33
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
34
unsigned long nr_overcommit_huge_pages;
35
static int hugetlb_next_nid;
36

37
38
39
40
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
41

42
43
44
45
46
47
48
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
49
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
50
51
52
53
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
54
			   unsigned long addr, struct vm_area_struct *vma)
55
56
57
58
59
60
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
61
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
62
63
64
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
65
66
67
68
69
70
71
72
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

73
74
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
				unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
75
{
76
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
77
	struct page *page = NULL;
78
	struct mempolicy *mpol;
79
	struct zonelist *zonelist = huge_zonelist(vma, address,
80
					htlb_alloc_mask, &mpol);
81
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
82

83
	for (z = zonelist->zones; *z; z++) {
84
		nid = zone_to_nid(*z);
85
		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
86
87
88
89
90
91
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
92
93
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
Ken Chen's avatar
Ken Chen committed
94
			break;
95
		}
Linus Torvalds's avatar
Linus Torvalds committed
96
	}
97
	mpol_free(mpol);	/* unref if mpol !NULL */
Linus Torvalds's avatar
Linus Torvalds committed
98
99
100
	return page;
}

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

116
117
static void free_huge_page(struct page *page)
{
118
	int nid = page_to_nid(page);
119
	struct address_space *mapping;
120

121
	mapping = (struct address_space *) page_private(page);
122
	BUG_ON(page_count(page));
123
124
125
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
126
127
128
129
130
131
132
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
133
	spin_unlock(&hugetlb_lock);
134
	if (mapping)
135
		hugetlb_put_quota(mapping, 1);
136
	set_page_private(page, 0);
137
138
}

139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

174
static struct page *alloc_fresh_huge_page_node(int nid)
Linus Torvalds's avatar
Linus Torvalds committed
175
176
{
	struct page *page;
177

178
179
180
	page = alloc_pages_node(nid,
		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
		HUGETLB_PAGE_ORDER);
Linus Torvalds's avatar
Linus Torvalds committed
181
	if (page) {
182
		set_compound_page_dtor(page, free_huge_page);
183
		spin_lock(&hugetlb_lock);
Linus Torvalds's avatar
Linus Torvalds committed
184
		nr_huge_pages++;
185
		nr_huge_pages_node[nid]++;
186
		spin_unlock(&hugetlb_lock);
187
		put_page(page); /* free it into the hugepage allocator */
Linus Torvalds's avatar
Linus Torvalds committed
188
	}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223

	return page;
}

static int alloc_fresh_huge_page(void)
{
	struct page *page;
	int start_nid;
	int next_nid;
	int ret = 0;

	start_nid = hugetlb_next_nid;

	do {
		page = alloc_fresh_huge_page_node(hugetlb_next_nid);
		if (page)
			ret = 1;
		/*
		 * Use a helper variable to find the next node and then
		 * copy it back to hugetlb_next_nid afterwards:
		 * otherwise there's a window in which a racer might
		 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
		 * But we don't need to use a spin_lock here: it really
		 * doesn't matter if occasionally a racer chooses the
		 * same nid as we do.  Move nid forward in the mask even
		 * if we just successfully allocated a hugepage so that
		 * the next caller gets hugepages on the next node.
		 */
		next_nid = next_node(hugetlb_next_nid, node_online_map);
		if (next_nid == MAX_NUMNODES)
			next_nid = first_node(node_online_map);
		hugetlb_next_nid = next_nid;
	} while (!page && hugetlb_next_nid != start_nid);

	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
224
225
}

226
227
228
229
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;
230
	unsigned int nid;
231

232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
	/*
	 * Assume we will successfully allocate the surplus page to
	 * prevent racing processes from causing the surplus to exceed
	 * overcommit
	 *
	 * This however introduces a different race, where a process B
	 * tries to grow the static hugepage pool while alloc_pages() is
	 * called by process A. B will only examine the per-node
	 * counters in determining if surplus huge pages can be
	 * converted to normal huge pages in adjust_pool_surplus(). A
	 * won't be able to increment the per-node counter, until the
	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
	 * no more huge pages can be converted from surplus to normal
	 * state (and doesn't try to convert again). Thus, we have a
	 * case where a surplus huge page exists, the pool is grown, and
	 * the surplus huge page still exists after, even though it
	 * should just have been converted to a normal huge page. This
	 * does not leak memory, though, as the hugepage will be freed
	 * once it is out of use. It also does not allow the counters to
	 * go out of whack in adjust_pool_surplus() as we don't modify
	 * the node values until we've gotten the hugepage and only the
	 * per-node value is checked there.
	 */
	spin_lock(&hugetlb_lock);
	if (surplus_huge_pages >= nr_overcommit_huge_pages) {
		spin_unlock(&hugetlb_lock);
		return NULL;
	} else {
		nr_huge_pages++;
		surplus_huge_pages++;
	}
	spin_unlock(&hugetlb_lock);

265
266
	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
267
268

	spin_lock(&hugetlb_lock);
269
	if (page) {
270
		nid = page_to_nid(page);
271
		set_compound_page_dtor(page, free_huge_page);
272
273
274
275
276
277
278
279
		/*
		 * We incremented the global counters already
		 */
		nr_huge_pages_node[nid]++;
		surplus_huge_pages_node[nid]++;
	} else {
		nr_huge_pages--;
		surplus_huge_pages--;
280
	}
281
	spin_unlock(&hugetlb_lock);
282
283
284
285

	return page;
}

286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
	if (needed <= 0)
		return 0;

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
	 * allocator.
	 */
	needed += allocated;
	ret = 0;
free:
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
		list_del(&page->lru);
		if ((--needed) >= 0)
			enqueue_huge_page(page);
346
347
348
349
350
351
352
353
354
355
356
		else {
			/*
			 * Decrement the refcount and free the page using its
			 * destructor.  This must be done with hugetlb_lock
			 * unlocked which is safe because free_huge_page takes
			 * hugetlb_lock before deciding how to free the page.
			 */
			spin_unlock(&hugetlb_lock);
			put_page(page);
			spin_lock(&hugetlb_lock);
		}
357
358
359
360
361
362
363
364
365
366
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
367
static void return_unused_surplus_pages(unsigned long unused_resv_pages)
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

	nr_pages = min(unused_resv_pages, surplus_huge_pages);

	while (nr_pages) {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
		}
	}
}

397
398
399

static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
						unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
400
{
401
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
402
403

	spin_lock(&hugetlb_lock);
404
	page = dequeue_huge_page(vma, addr);
Linus Torvalds's avatar
Linus Torvalds committed
405
	spin_unlock(&hugetlb_lock);
406
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
407
}
408

409
410
411
412
static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct page *page = NULL;
413

414
415
416
	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
		return ERR_PTR(-VM_FAULT_SIGBUS);

417
418
419
420
	spin_lock(&hugetlb_lock);
	if (free_huge_pages > resv_huge_pages)
		page = dequeue_huge_page(vma, addr);
	spin_unlock(&hugetlb_lock);
Ken Chen's avatar
Ken Chen committed
421
	if (!page) {
422
		page = alloc_buddy_huge_page(vma, addr);
Ken Chen's avatar
Ken Chen committed
423
424
425
426
427
428
		if (!page) {
			hugetlb_put_quota(vma->vm_file->f_mapping, 1);
			return ERR_PTR(-VM_FAULT_OOM);
		}
	}
	return page;
429
430
431
432
433
434
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
{
	struct page *page;
435
436
	struct address_space *mapping = vma->vm_file->f_mapping;

437
438
439
440
	if (vma->vm_flags & VM_MAYSHARE)
		page = alloc_huge_page_shared(vma, addr);
	else
		page = alloc_huge_page_private(vma, addr);
441
442

	if (!IS_ERR(page)) {
443
		set_page_refcounted(page);
444
		set_page_private(page, (unsigned long) mapping);
445
446
	}
	return page;
447
448
}

Linus Torvalds's avatar
Linus Torvalds committed
449
450
451
452
static int __init hugetlb_init(void)
{
	unsigned long i;

453
454
455
	if (HPAGE_SHIFT == 0)
		return 0;

Linus Torvalds's avatar
Linus Torvalds committed
456
457
458
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

459
460
	hugetlb_next_nid = first_node(node_online_map);

Linus Torvalds's avatar
Linus Torvalds committed
461
	for (i = 0; i < max_huge_pages; ++i) {
462
		if (!alloc_fresh_huge_page())
Linus Torvalds's avatar
Linus Torvalds committed
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

479
480
481
482
483
484
485
486
487
488
489
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

Linus Torvalds's avatar
Linus Torvalds committed
490
491
492
493
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
494
495
	int i;

Linus Torvalds's avatar
Linus Torvalds committed
496
497
498
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
499
500
			if (count >= nr_huge_pages)
				return;
Linus Torvalds's avatar
Linus Torvalds committed
501
502
503
504
505
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
506
			free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds's avatar
Linus Torvalds committed
507
508
509
510
511
512
513
514
515
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

516
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
Linus Torvalds's avatar
Linus Torvalds committed
517
518
static unsigned long set_max_huge_pages(unsigned long count)
{
519
	unsigned long min_count, ret;
Linus Torvalds's avatar
Linus Torvalds committed
520

521
522
523
524
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
525
526
527
528
529
530
	 *
	 * We might race with alloc_buddy_huge_page() here and be unable
	 * to convert a surplus huge page to a normal huge page. That is
	 * not critical, though, it just means the overall size of the
	 * pool might be one hugepage larger than it needs to be, but
	 * within all the constraints specified by the sysctls.
531
	 */
Linus Torvalds's avatar
Linus Torvalds committed
532
	spin_lock(&hugetlb_lock);
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		int ret;
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
559
560
561
562
563
564
565
566
	 *
	 * By placing pages into the surplus state independent of the
	 * overcommit value, we are allowing the surplus pool size to
	 * exceed overcommit. There are few sane options here. Since
	 * alloc_buddy_huge_page() is checking the global counter,
	 * though, we'll note that we're not allowed to exceed surplus
	 * and won't grow the pool anywhere else. Not until one of the
	 * sysctls are changed, or the surplus pages go out of use.
567
	 */
568
569
	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
	min_count = max(count, min_count);
570
571
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
572
		struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds's avatar
Linus Torvalds committed
573
574
575
576
		if (!page)
			break;
		update_and_free_page(page);
	}
577
578
579
580
581
582
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
583
	spin_unlock(&hugetlb_lock);
584
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
585
586
587
588
589
590
591
592
593
594
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
595
596
597
598
599
600
601
602
603
604
605
606
607

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
608
609
610
611
612
613
614
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
615
			"HugePages_Rsvd:  %5lu\n"
616
			"HugePages_Surp:  %5lu\n"
Linus Torvalds's avatar
Linus Torvalds committed
617
618
619
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
620
			resv_huge_pages,
621
			surplus_huge_pages,
Linus Torvalds's avatar
Linus Torvalds committed
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
		"Node %d HugePages_Free:  %5u\n",
		nid, nr_huge_pages_node[nid],
		nid, free_huge_pages_node[nid]);
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
Nick Piggin's avatar
Nick Piggin committed
646
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds's avatar
Linus Torvalds committed
647
648
{
	BUG();
Nick Piggin's avatar
Nick Piggin committed
649
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
650
651
652
}

struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggin's avatar
Nick Piggin committed
653
	.fault = hugetlb_vm_op_fault,
Linus Torvalds's avatar
Linus Torvalds committed
654
655
};

656
657
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
David Gibson's avatar
David Gibson committed
658
659
660
{
	pte_t entry;

661
	if (writable) {
David Gibson's avatar
David Gibson committed
662
663
664
665
666
667
668
669
670
671
672
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

673
674
675
676
677
678
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
679
680
681
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
682
683
684
}


David Gibson's avatar
David Gibson committed
685
686
687
688
689
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
690
	unsigned long addr;
691
692
693
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson's avatar
David Gibson committed
694

695
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
696
697
698
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
David Gibson's avatar
David Gibson committed
699
700
701
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
702
703
704
705
706

		/* If the pagetables are shared don't copy or take references */
		if (dst_pte == src_pte)
			continue;

707
		spin_lock(&dst->page_table_lock);
708
		spin_lock(&src->page_table_lock);
709
		if (!pte_none(*src_pte)) {
710
711
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
712
713
714
715
716
717
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
718
		spin_unlock(&dst->page_table_lock);
David Gibson's avatar
David Gibson committed
719
720
721
722
723
724
725
	}
	return 0;

nomem:
	return -ENOMEM;
}

726
727
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
David Gibson's avatar
David Gibson committed
728
729
730
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
731
	pte_t *ptep;
David Gibson's avatar
David Gibson committed
732
733
	pte_t pte;
	struct page *page;
734
	struct page *tmp;
735
736
737
738
739
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
740
	LIST_HEAD(page_list);
David Gibson's avatar
David Gibson committed
741
742
743
744
745

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

746
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
747
	for (address = start; address < end; address += HPAGE_SIZE) {
748
		ptep = huge_pte_offset(mm, address);
749
		if (!ptep)
750
751
			continue;

752
753
754
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

755
		pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson's avatar
David Gibson committed
756
757
		if (pte_none(pte))
			continue;
758

David Gibson's avatar
David Gibson committed
759
		page = pte_page(pte);
760
761
		if (pte_dirty(pte))
			set_page_dirty(page);
762
		list_add(&page->lru, &page_list);
David Gibson's avatar
David Gibson committed
763
	}
Linus Torvalds's avatar
Linus Torvalds committed
764
	spin_unlock(&mm->page_table_lock);
765
	flush_tlb_range(vma, start, end);
766
767
768
769
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
Linus Torvalds's avatar
Linus Torvalds committed
770
}
David Gibson's avatar
David Gibson committed
771

772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

790
791
792
793
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
794
	int avoidcopy;
795
796
797
798
799
800
801
802

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
Nick Piggin's avatar
Nick Piggin committed
803
		return 0;
804
805
806
	}

	page_cache_get(old_page);
807
	new_page = alloc_huge_page(vma, address);
808

809
	if (IS_ERR(new_page)) {
810
		page_cache_release(old_page);
811
		return -PTR_ERR(new_page);
812
813
814
	}

	spin_unlock(&mm->page_table_lock);
815
	copy_huge_page(new_page, old_page, address, vma);
816
817
818
819
820
821
822
823
824
825
826
827
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
Nick Piggin's avatar
Nick Piggin committed
828
	return 0;
829
830
}

831
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
832
			unsigned long address, pte_t *ptep, int write_access)
833
834
{
	int ret = VM_FAULT_SIGBUS;
835
836
837
838
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
839
	pte_t new_pte;
840
841
842
843
844
845
846
847
848

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
849
850
851
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
852
853
854
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
855
		page = alloc_huge_page(vma, address);
856
857
		if (IS_ERR(page)) {
			ret = -PTR_ERR(page);
858
859
			goto out;
		}
860
		clear_huge_page(page, address);
861

862
863
		if (vma->vm_flags & VM_SHARED) {
			int err;
Ken Chen's avatar
Ken Chen committed
864
			struct inode *inode = mapping->host;
865
866
867
868
869
870
871
872

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
Ken Chen's avatar
Ken Chen committed
873
874
875
876

			spin_lock(&inode->i_lock);
			inode->i_blocks += BLOCKS_PER_HUGEPAGE;
			spin_unlock(&inode->i_lock);
877
878
879
		} else
			lock_page(page);
	}
880

881
	spin_lock(&mm->page_table_lock);
882
883
884
885
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

Nick Piggin's avatar
Nick Piggin committed
886
	ret = 0;
887
	if (!pte_none(*ptep))
888
889
		goto backout;

890
891
892
893
894
895
896
897
898
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

899
	spin_unlock(&mm->page_table_lock);
900
901
	unlock_page(page);
out:
902
	return ret;
903
904
905
906
907
908

backout:
	spin_unlock(&mm->page_table_lock);
	unlock_page(page);
	put_page(page);
	goto out;
909
910
}

911
912
913
914
915
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
916
	int ret;
917
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
918
919
920
921
922

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

923
924
925
926
927
928
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
929
	entry = *ptep;
930
931
932
933
934
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
935

Nick Piggin's avatar
Nick Piggin committed
936
	ret = 0;
937
938
939
940
941
942
943

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);
	spin_unlock(&mm->page_table_lock);
944
	mutex_unlock(&hugetlb_instantiation_mutex);
945
946

	return ret;
947
948
}

David Gibson's avatar
David Gibson committed
949
950
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
951
952
			unsigned long *position, int *length, int i,
			int write)
David Gibson's avatar
David Gibson committed
953
{
954
955
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
David Gibson's avatar
David Gibson committed
956
957
	int remainder = *length;

958
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
959
	while (vaddr < vma->vm_end && remainder) {
960
961
		pte_t *pte;
		struct page *page;
David Gibson's avatar
David Gibson committed
962

963
964
965
966
967
968
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
		 * each hugepage.  We have to make * sure we get the
		 * first, for the page indexing below to work.
		 */
		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
David Gibson's avatar
David Gibson committed
969

970
		if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
971
			int ret;
David Gibson's avatar
David Gibson committed
972

973
			spin_unlock(&mm->page_table_lock);
974
			ret = hugetlb_fault(mm, vma, vaddr, write);
975
			spin_lock(&mm->page_table_lock);
976
			if (!(ret & VM_FAULT_ERROR))
977
				continue;
David Gibson's avatar
David Gibson committed
978

979
980
981
982
983
984
			remainder = 0;
			if (!i)
				i = -EFAULT;
			break;
		}

985
986
987
		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
		page = pte_page(*pte);
same_page:
988
989
		if (pages) {
			get_page(page);
990
			pages[i] = page + pfn_offset;
991
		}
David Gibson's avatar
David Gibson committed
992
993
994
995
996

		if (vmas)
			vmas[i] = vma;

		vaddr += PAGE_SIZE;
997
		++pfn_offset;
David Gibson's avatar
David Gibson committed
998
999
		--remainder;
		++i;
1000
1001
1002
1003
1004
1005
1006
1007
		if (vaddr < vma->vm_end && remainder &&
				pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
			/*
			 * We use pfn_offset to avoid touching the pageframes
			 * of this compound page.
			 */
			goto same_page;
		}
David Gibson's avatar
David Gibson committed
1008
	}
1009
	spin_unlock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
1010
1011
1012
1013
1014
	*length = remainder;
	*position = vaddr;

	return i;
}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026

void hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	pte_t *ptep;
	pte_t pte;

	BUG_ON(address >= end);
	flush_cache_range(vma, address, end);

1027
	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1028
1029
1030
1031
1032
	spin_lock(&mm->page_table_lock);
	for (; address < end; address += HPAGE_SIZE) {
		ptep = huge_pte_offset(mm, address);
		if (!ptep)
			continue;
1033
1034
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;
1035
1036
1037
1038
1039
1040
1041
		if (!pte_none(*ptep)) {
			pte = huge_ptep_get_and_clear(mm, address, ptep);
			pte = pte_mkhuge(pte_modify(pte, newprot));
			set_huge_pte_at(mm, address, ptep, pte);
		}
	}
	spin_unlock(&mm->page_table_lock);
1042
	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1043
1044
1045
1046

	flush_tlb_range(vma, start, end);
}

1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
struct file_region {
	struct list_head link;
	long from;
	long to;
};

static long region_add(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg, *trg;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;

	/* Check for and consume any regions we now overlap with. */
	nrg = rg;
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			break;

		/* If this area reaches higher then extend our area to
		 * include it completely.  If this is not the first area
		 * which we intend to reuse, free it. */
		if (rg->to > t)
			t = rg->to;
		if (rg != nrg) {
			list_del(&rg->link);
			kfree(rg);
		}
	}
	nrg->from = f;
	nrg->to = t;
	return 0;
}

static long region_chg(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg;
	long chg = 0;

	/* Locate the region we are before or in. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* If we are below the current region then a new region is required.
	 * Subtle, allocate a new region at the position but make it zero
Simon Arlott's avatar
Simon Arlott committed
1101
	 * size such that we can guarantee to record the reservation. */
1102
1103
	if (&rg->link == head || t < rg->from) {
		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1104
		if (!nrg)
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
			return -ENOMEM;
		nrg->from = f;
		nrg->to   = f;
		INIT_LIST_HEAD(&nrg->link);
		list_add(&nrg->link, rg->link.prev);

		return t - f;
	}

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;
	chg = t - f;

	/* Check for and consume any regions we now overlap with. */
	list_for_each_entry(rg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			return chg;

		/* We overlap with this area, if it extends futher than
		 * us then we must extend ourselves.  Account for its
		 * existing reservation. */
		if (rg->to > t) {
			chg += rg->to - t;
			t = rg->to;
		}
		chg -= rg->to - rg->from;
	}
	return chg;
}

static long region_truncate(struct list_head *head, long end)
{
	struct file_region *rg, *trg;
	long chg = 0;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (end <= rg->to)
			break;
	if (&rg->link == head)
		return 0;

	/* If we are in the middle of a region then adjust it. */
	if (end > rg->from) {
		chg = rg->to - end;
		rg->to = end;
		rg = list_entry(rg->link.next, typeof(*rg), link);
	}

	/* Drop any remaining regions. */
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		chg += rg->to - rg->from;
		list_del(&rg->link);
		kfree(rg);
	}
	return chg;
}

static int hugetlb_acct_memory(long delta)
{
	int ret = -ENOMEM;

	spin_lock(&hugetlb_lock);
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
	/*
	 * When cpuset is configured, it breaks the strict hugetlb page
	 * reservation as the accounting is done on a global variable. Such
	 * reservation is completely rubbish in the presence of cpuset because
	 * the reservation is not checked against page availability for the
	 * current cpuset. Application can still potentially OOM'ed by kernel
	 * with lack of free htlb page in cpuset that the task is in.
	 * Attempt to enforce strict accounting with cpuset is almost
	 * impossible (or too ugly) because cpuset is too fluid that
	 * task or memory node can be dynamically moved between cpusets.
	 *
	 * The change of semantics for shared hugetlb mapping with cpuset is
	 * undesirable. However, in order to preserve some of the semantics,
	 * we fall back to check against current free page availability as
	 * a best attempt and hopefully to minimize the impact of changing
	 * semantics that cpuset has.
	 */
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
	if (delta > 0) {
		if (gather_surplus_pages(delta) < 0)
			goto out;

		if (delta > cpuset_mems_nr(free_huge_pages_node))
			goto out;
	}

	ret = 0;
	resv_huge_pages += delta;
	if (delta < 0)
		return_unused_surplus_pages((unsigned long) -delta);

out:
	spin_unlock(&hugetlb_lock);
	return ret;
}

int hugetlb_reserve_pages(struct inode *inode, long from, long to)
{
	long ret, chg;

	chg = region_chg(&inode->i_mapping->private_list, from, to);
	if (chg < 0)
		return chg;
1215

1216
1217
	if (hugetlb_get_quota(inode->i_mapping, chg))
		return -ENOSPC;
1218
	ret = hugetlb_acct_memory(chg);
Ken Chen's avatar
Ken Chen committed
1219
1220
	if (ret < 0) {
		hugetlb_put_quota(inode->i_mapping, chg);
1221
		return ret;
Ken Chen's avatar
Ken Chen committed
1222
	}
1223
1224
1225
1226
1227
1228
1229
	region_add(&inode->i_mapping->private_list, from, to);
	return 0;
}

void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
	long chg = region_truncate(&inode->i_mapping->private_list, offset);
Ken Chen's avatar
Ken Chen committed
1230
1231
1232
1233
1234

	spin_lock(&inode->i_lock);
	inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
	spin_unlock(&inode->i_lock);

1235
1236
	hugetlb_put_quota(inode->i_mapping, (chg - freed));
	hugetlb_acct_memory(-(chg - freed));
1237
}