hugetlb.c 28.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
David Gibson's avatar
David Gibson committed
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

David Gibson's avatar
David Gibson committed
18
19
20
21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23
24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
27
28
29
30
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
31
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
32
33
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
34
int hugetlb_dynamic_pool;
35
static int hugetlb_next_nid;
36

37
38
39
40
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
41

42
43
44
45
46
47
48
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
49
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
50
51
52
53
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
54
			   unsigned long addr, struct vm_area_struct *vma)
55
56
57
58
59
60
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
61
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
62
63
64
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
65
66
67
68
69
70
71
72
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

73
74
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
				unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
75
{
76
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
77
	struct page *page = NULL;
78
	struct mempolicy *mpol;
79
	struct zonelist *zonelist = huge_zonelist(vma, address,
80
					htlb_alloc_mask, &mpol);
81
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
82

83
	for (z = zonelist->zones; *z; z++) {
84
		nid = zone_to_nid(*z);
85
		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
86
87
88
89
90
91
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
92
93
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
Ken Chen's avatar
Ken Chen committed
94
			break;
95
		}
Linus Torvalds's avatar
Linus Torvalds committed
96
	}
97
	mpol_free(mpol);	/* unref if mpol !NULL */
Linus Torvalds's avatar
Linus Torvalds committed
98
99
100
	return page;
}

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

116
117
static void free_huge_page(struct page *page)
{
118
	int nid = page_to_nid(page);
119
	struct address_space *mapping;
120

121
	mapping = (struct address_space *) page_private(page);
122
	BUG_ON(page_count(page));
123
124
125
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
126
127
128
129
130
131
132
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
133
	spin_unlock(&hugetlb_lock);
134
	if (mapping)
135
		hugetlb_put_quota(mapping, 1);
136
	set_page_private(page, 0);
137
138
}

139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

174
static struct page *alloc_fresh_huge_page_node(int nid)
Linus Torvalds's avatar
Linus Torvalds committed
175
176
{
	struct page *page;
177

178
179
180
	page = alloc_pages_node(nid,
		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
		HUGETLB_PAGE_ORDER);
Linus Torvalds's avatar
Linus Torvalds committed
181
	if (page) {
182
		set_compound_page_dtor(page, free_huge_page);
183
		spin_lock(&hugetlb_lock);
Linus Torvalds's avatar
Linus Torvalds committed
184
		nr_huge_pages++;
185
		nr_huge_pages_node[nid]++;
186
		spin_unlock(&hugetlb_lock);
187
		put_page(page); /* free it into the hugepage allocator */
Linus Torvalds's avatar
Linus Torvalds committed
188
	}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223

	return page;
}

static int alloc_fresh_huge_page(void)
{
	struct page *page;
	int start_nid;
	int next_nid;
	int ret = 0;

	start_nid = hugetlb_next_nid;

	do {
		page = alloc_fresh_huge_page_node(hugetlb_next_nid);
		if (page)
			ret = 1;
		/*
		 * Use a helper variable to find the next node and then
		 * copy it back to hugetlb_next_nid afterwards:
		 * otherwise there's a window in which a racer might
		 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
		 * But we don't need to use a spin_lock here: it really
		 * doesn't matter if occasionally a racer chooses the
		 * same nid as we do.  Move nid forward in the mask even
		 * if we just successfully allocated a hugepage so that
		 * the next caller gets hugepages on the next node.
		 */
		next_nid = next_node(hugetlb_next_nid, node_online_map);
		if (next_nid == MAX_NUMNODES)
			next_nid = first_node(node_online_map);
		hugetlb_next_nid = next_nid;
	} while (!page && hugetlb_next_nid != start_nid);

	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
224
225
}

226
227
228
229
230
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;

231
232
233
234
	/* Check if the dynamic pool is enabled */
	if (!hugetlb_dynamic_pool)
		return NULL;

235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
	if (page) {
		set_compound_page_dtor(page, free_huge_page);
		spin_lock(&hugetlb_lock);
		nr_huge_pages++;
		nr_huge_pages_node[page_to_nid(page)]++;
		surplus_huge_pages++;
		surplus_huge_pages_node[page_to_nid(page)]++;
		spin_unlock(&hugetlb_lock);
	}

	return page;
}

250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
	if (needed <= 0)
		return 0;

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
	 * allocator.
	 */
	needed += allocated;
	ret = 0;
free:
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
		list_del(&page->lru);
		if ((--needed) >= 0)
			enqueue_huge_page(page);
310
311
312
313
314
315
316
317
318
319
320
		else {
			/*
			 * Decrement the refcount and free the page using its
			 * destructor.  This must be done with hugetlb_lock
			 * unlocked which is safe because free_huge_page takes
			 * hugetlb_lock before deciding how to free the page.
			 */
			spin_unlock(&hugetlb_lock);
			put_page(page);
			spin_lock(&hugetlb_lock);
		}
321
322
323
324
325
326
327
328
329
330
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
331
static void return_unused_surplus_pages(unsigned long unused_resv_pages)
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

	nr_pages = min(unused_resv_pages, surplus_huge_pages);

	while (nr_pages) {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
		}
	}
}

361
362
363

static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
						unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
364
{
365
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
366
367

	spin_lock(&hugetlb_lock);
368
	page = dequeue_huge_page(vma, addr);
Linus Torvalds's avatar
Linus Torvalds committed
369
	spin_unlock(&hugetlb_lock);
370
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
371
}
372

373
374
375
376
static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct page *page = NULL;
377

378
379
380
	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
		return ERR_PTR(-VM_FAULT_SIGBUS);

381
382
383
384
385
	spin_lock(&hugetlb_lock);
	if (free_huge_pages > resv_huge_pages)
		page = dequeue_huge_page(vma, addr);
	spin_unlock(&hugetlb_lock);
	if (!page)
386
		page = alloc_buddy_huge_page(vma, addr);
387
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
388
389
390
391
392
393
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
{
	struct page *page;
394
395
	struct address_space *mapping = vma->vm_file->f_mapping;

396
397
398
399
	if (vma->vm_flags & VM_MAYSHARE)
		page = alloc_huge_page_shared(vma, addr);
	else
		page = alloc_huge_page_private(vma, addr);
400
401

	if (!IS_ERR(page)) {
402
		set_page_refcounted(page);
403
		set_page_private(page, (unsigned long) mapping);
404
405
	}
	return page;
406
407
}

Linus Torvalds's avatar
Linus Torvalds committed
408
409
410
411
static int __init hugetlb_init(void)
{
	unsigned long i;

412
413
414
	if (HPAGE_SHIFT == 0)
		return 0;

Linus Torvalds's avatar
Linus Torvalds committed
415
416
417
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

418
419
	hugetlb_next_nid = first_node(node_online_map);

Linus Torvalds's avatar
Linus Torvalds committed
420
	for (i = 0; i < max_huge_pages; ++i) {
421
		if (!alloc_fresh_huge_page())
Linus Torvalds's avatar
Linus Torvalds committed
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

438
439
440
441
442
443
444
445
446
447
448
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

Linus Torvalds's avatar
Linus Torvalds committed
449
450
451
452
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
453
454
	int i;

Linus Torvalds's avatar
Linus Torvalds committed
455
456
457
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
458
459
			if (count >= nr_huge_pages)
				return;
Linus Torvalds's avatar
Linus Torvalds committed
460
461
462
463
464
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
465
			free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds's avatar
Linus Torvalds committed
466
467
468
469
470
471
472
473
474
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

475
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
Linus Torvalds's avatar
Linus Torvalds committed
476
477
static unsigned long set_max_huge_pages(unsigned long count)
{
478
	unsigned long min_count, ret;
Linus Torvalds's avatar
Linus Torvalds committed
479

480
481
482
483
484
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
485
	spin_lock(&hugetlb_lock);
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		int ret;
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
	 */
513
514
	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
	min_count = max(count, min_count);
515
516
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
517
		struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds's avatar
Linus Torvalds committed
518
519
520
521
		if (!page)
			break;
		update_and_free_page(page);
	}
522
523
524
525
526
527
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
Linus Torvalds's avatar
Linus Torvalds committed
528
	spin_unlock(&hugetlb_lock);
529
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
530
531
532
533
534
535
536
537
538
539
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
540
541
542
543
544
545
546
547
548
549
550
551
552

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
553
554
555
556
557
558
559
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
560
			"HugePages_Rsvd:  %5lu\n"
561
			"HugePages_Surp:  %5lu\n"
Linus Torvalds's avatar
Linus Torvalds committed
562
563
564
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
565
			resv_huge_pages,
566
			surplus_huge_pages,
Linus Torvalds's avatar
Linus Torvalds committed
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
		"Node %d HugePages_Free:  %5u\n",
		nid, nr_huge_pages_node[nid],
		nid, free_huge_pages_node[nid]);
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
Nick Piggin's avatar
Nick Piggin committed
591
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds's avatar
Linus Torvalds committed
592
593
{
	BUG();
Nick Piggin's avatar
Nick Piggin committed
594
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
595
596
597
}

struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggin's avatar
Nick Piggin committed
598
	.fault = hugetlb_vm_op_fault,
Linus Torvalds's avatar
Linus Torvalds committed
599
600
};

601
602
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
David Gibson's avatar
David Gibson committed
603
604
605
{
	pte_t entry;

606
	if (writable) {
David Gibson's avatar
David Gibson committed
607
608
609
610
611
612
613
614
615
616
617
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

618
619
620
621
622
623
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
624
625
626
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
627
628
629
}


David Gibson's avatar
David Gibson committed
630
631
632
633
634
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
635
	unsigned long addr;
636
637
638
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson's avatar
David Gibson committed
639

640
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
641
642
643
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
David Gibson's avatar
David Gibson committed
644
645
646
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
647
		spin_lock(&dst->page_table_lock);
648
		spin_lock(&src->page_table_lock);
649
		if (!pte_none(*src_pte)) {
650
651
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
652
653
654
655
656
657
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
658
		spin_unlock(&dst->page_table_lock);
David Gibson's avatar
David Gibson committed
659
660
661
662
663
664
665
	}
	return 0;

nomem:
	return -ENOMEM;
}

666
667
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
David Gibson's avatar
David Gibson committed
668
669
670
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
671
	pte_t *ptep;
David Gibson's avatar
David Gibson committed
672
673
	pte_t pte;
	struct page *page;
674
	struct page *tmp;
675
676
677
678
679
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
680
	LIST_HEAD(page_list);
David Gibson's avatar
David Gibson committed
681
682
683
684
685

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

686
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
687
	for (address = start; address < end; address += HPAGE_SIZE) {
688
		ptep = huge_pte_offset(mm, address);
689
		if (!ptep)
690
691
			continue;

692
693
694
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

695
		pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson's avatar
David Gibson committed
696
697
		if (pte_none(pte))
			continue;
698

David Gibson's avatar
David Gibson committed
699
		page = pte_page(pte);
700
701
		if (pte_dirty(pte))
			set_page_dirty(page);
702
		list_add(&page->lru, &page_list);
David Gibson's avatar
David Gibson committed
703
	}
Linus Torvalds's avatar
Linus Torvalds committed
704
	spin_unlock(&mm->page_table_lock);
705
	flush_tlb_range(vma, start, end);
706
707
708
709
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
Linus Torvalds's avatar
Linus Torvalds committed
710
}
David Gibson's avatar
David Gibson committed
711

712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

730
731
732
733
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
734
	int avoidcopy;
735
736
737
738
739
740
741
742

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
Nick Piggin's avatar
Nick Piggin committed
743
		return 0;
744
745
746
	}

	page_cache_get(old_page);
747
	new_page = alloc_huge_page(vma, address);
748

749
	if (IS_ERR(new_page)) {
750
		page_cache_release(old_page);
751
		return -PTR_ERR(new_page);
752
753
754
	}

	spin_unlock(&mm->page_table_lock);
755
	copy_huge_page(new_page, old_page, address, vma);
756
757
758
759
760
761
762
763
764
765
766
767
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
Nick Piggin's avatar
Nick Piggin committed
768
	return 0;
769
770
}

771
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
772
			unsigned long address, pte_t *ptep, int write_access)
773
774
{
	int ret = VM_FAULT_SIGBUS;
775
776
777
778
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
779
	pte_t new_pte;
780
781
782
783
784
785
786
787
788

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
789
790
791
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
792
793
794
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
795
		page = alloc_huge_page(vma, address);
796
797
		if (IS_ERR(page)) {
			ret = -PTR_ERR(page);
798
799
			goto out;
		}
800
		clear_huge_page(page, address);
801

802
803
		if (vma->vm_flags & VM_SHARED) {
			int err;
Ken Chen's avatar
Ken Chen committed
804
			struct inode *inode = mapping->host;
805
806
807
808
809
810
811
812

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
Ken Chen's avatar
Ken Chen committed
813
814
815
816

			spin_lock(&inode->i_lock);
			inode->i_blocks += BLOCKS_PER_HUGEPAGE;
			spin_unlock(&inode->i_lock);
817
818
819
		} else
			lock_page(page);
	}
820

821
	spin_lock(&mm->page_table_lock);
822
823
824
825
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

Nick Piggin's avatar
Nick Piggin committed
826
	ret = 0;
827
	if (!pte_none(*ptep))
828
829
		goto backout;

830
831
832
833
834
835
836
837
838
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

839
	spin_unlock(&mm->page_table_lock);
840
841
	unlock_page(page);
out:
842
	return ret;
843
844
845
846
847
848

backout:
	spin_unlock(&mm->page_table_lock);
	unlock_page(page);
	put_page(page);
	goto out;
849
850
}

851
852
853
854
855
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
856
	int ret;
857
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
858
859
860
861
862

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

863
864
865
866
867
868
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
869
	entry = *ptep;
870
871
872
873
874
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
875

Nick Piggin's avatar
Nick Piggin committed
876
	ret = 0;
877
878
879
880
881
882
883

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);
	spin_unlock(&mm->page_table_lock);
884
	mutex_unlock(&hugetlb_instantiation_mutex);
885
886

	return ret;
887
888
}

David Gibson's avatar
David Gibson committed
889
890
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
891
892
			unsigned long *position, int *length, int i,
			int write)
David Gibson's avatar
David Gibson committed
893
{
894
895
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
David Gibson's avatar
David Gibson committed
896
897
	int remainder = *length;

898
	spin_lock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
899
	while (vaddr < vma->vm_end && remainder) {
900
901
		pte_t *pte;
		struct page *page;
David Gibson's avatar
David Gibson committed
902

903
904
905
906
907
908
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
		 * each hugepage.  We have to make * sure we get the
		 * first, for the page indexing below to work.
		 */
		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
David Gibson's avatar
David Gibson committed
909

910
		if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
911
			int ret;
David Gibson's avatar
David Gibson committed
912

913
			spin_unlock(&mm->page_table_lock);
914
			ret = hugetlb_fault(mm, vma, vaddr, write);
915
			spin_lock(&mm->page_table_lock);
916
			if (!(ret & VM_FAULT_ERROR))
917
				continue;
David Gibson's avatar
David Gibson committed
918

919
920
921
922
923
924
			remainder = 0;
			if (!i)
				i = -EFAULT;
			break;
		}

925
926
927
		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
		page = pte_page(*pte);
same_page:
928
929
		if (pages) {
			get_page(page);
930
			pages[i] = page + pfn_offset;
931
		}
David Gibson's avatar
David Gibson committed
932
933
934
935
936

		if (vmas)
			vmas[i] = vma;

		vaddr += PAGE_SIZE;
937
		++pfn_offset;
David Gibson's avatar
David Gibson committed
938
939
		--remainder;
		++i;
940
941
942
943
944
945
946
947
		if (vaddr < vma->vm_end && remainder &&
				pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
			/*
			 * We use pfn_offset to avoid touching the pageframes
			 * of this compound page.
			 */
			goto same_page;
		}
David Gibson's avatar
David Gibson committed
948
	}
949
	spin_unlock(&mm->page_table_lock);
David Gibson's avatar
David Gibson committed
950
951
952
953
954
	*length = remainder;
	*position = vaddr;

	return i;
}
955
956
957
958
959
960
961
962
963
964
965
966

void hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	pte_t *ptep;
	pte_t pte;

	BUG_ON(address >= end);
	flush_cache_range(vma, address, end);

967
	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
968
969
970
971
972
	spin_lock(&mm->page_table_lock);
	for (; address < end; address += HPAGE_SIZE) {
		ptep = huge_pte_offset(mm, address);
		if (!ptep)
			continue;
973
974
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;
975
976
977
978
979
980
981
		if (!pte_none(*ptep)) {
			pte = huge_ptep_get_and_clear(mm, address, ptep);
			pte = pte_mkhuge(pte_modify(pte, newprot));
			set_huge_pte_at(mm, address, ptep, pte);
		}
	}
	spin_unlock(&mm->page_table_lock);
982
	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
983
984
985
986

	flush_tlb_range(vma, start, end);
}

987
988
989
990
991
992
993
994
995
996
997
998
999
1000
struct file_region {
	struct list_head link;
	long from;
	long to;
};

static long region_add(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg, *trg;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;
For faster browsing, not all history is shown. View entire blame