bootmem.c 20.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
 *  bootmem - A boot-time physical memory allocator and configurator
Linus Torvalds's avatar
Linus Torvalds committed
3
4
 *
 *  Copyright (C) 1999 Ingo Molnar
5
6
 *                1999 Kanoj Sarcar, SGI
 *                2008 Johannes Weiner
Linus Torvalds's avatar
Linus Torvalds committed
7
 *
8
9
 * Access to this subsystem has to be serialized externally (which is true
 * for the boot process anyway).
Linus Torvalds's avatar
Linus Torvalds committed
10
11
 */
#include <linux/init.h>
12
#include <linux/pfn.h>
Linus Torvalds's avatar
Linus Torvalds committed
13
14
#include <linux/bootmem.h>
#include <linux/module.h>
15
16

#include <asm/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
17
#include <asm/io.h>
18
#include <asm/processor.h>
19

Linus Torvalds's avatar
Linus Torvalds committed
20
21
22
23
24
25
#include "internal.h"

unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;

26
static LIST_HEAD(bdata_list);
27
28
29
30
31
32
33
34
#ifdef CONFIG_CRASH_DUMP
/*
 * If we have booted due to a crash, max_pfn will be a very low value. We need
 * to know the amount of memory that the previous kernel used.
 */
unsigned long saved_max_pfn;
#endif

35
36
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
static int bootmem_debug;

static int __init bootmem_debug_setup(char *buf)
{
	bootmem_debug = 1;
	return 0;
}
early_param("bootmem_debug", bootmem_debug_setup);

#define bdebug(fmt, args...) ({				\
	if (unlikely(bootmem_debug))			\
		printk(KERN_INFO			\
			"bootmem::%s " fmt,		\
			__FUNCTION__, ## args);		\
})

53
54
55
56
57
58
59
60
61
62
63
64
65
/*
 * Given an initialised bdata, it returns the size of the boot bitmap
 */
static unsigned long __init get_mapsize(bootmem_data_t *bdata)
{
	unsigned long mapsize;
	unsigned long start = PFN_DOWN(bdata->node_boot_start);
	unsigned long end = bdata->node_low_pfn;

	mapsize = ((end - start) + 7) / 8;
	return ALIGN(mapsize, sizeof(long));
}

66
67
68
69
/**
 * bootmem_bootmap_pages - calculate bitmap size in pages
 * @pages: number of pages the bitmap has to represent
 */
70
unsigned long __init bootmem_bootmap_pages(unsigned long pages)
Linus Torvalds's avatar
Linus Torvalds committed
71
72
73
74
75
76
77
78
79
{
	unsigned long mapsize;

	mapsize = (pages+7)/8;
	mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
	mapsize >>= PAGE_SHIFT;

	return mapsize;
}
80

81
82
83
/*
 * link bdata in order
 */
84
static void __init link_bootmem(bootmem_data_t *bdata)
85
86
{
	bootmem_data_t *ent;
87

88
89
90
91
92
93
94
95
96
97
98
99
100
101
	if (list_empty(&bdata_list)) {
		list_add(&bdata->list, &bdata_list);
		return;
	}
	/* insert in order */
	list_for_each_entry(ent, &bdata_list, list) {
		if (bdata->node_boot_start < ent->node_boot_start) {
			list_add_tail(&bdata->list, &ent->list);
			return;
		}
	}
	list_add_tail(&bdata->list, &bdata_list);
}

Linus Torvalds's avatar
Linus Torvalds committed
102
103
104
/*
 * Called once to set up the allocator itself.
 */
105
static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
Linus Torvalds's avatar
Linus Torvalds committed
106
107
	unsigned long mapstart, unsigned long start, unsigned long end)
{
108
	unsigned long mapsize;
Linus Torvalds's avatar
Linus Torvalds committed
109

110
	mminit_validate_memmodel_limits(&start, &end);
111
112
	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
	bdata->node_boot_start = PFN_PHYS(start);
Linus Torvalds's avatar
Linus Torvalds committed
113
	bdata->node_low_pfn = end;
114
	link_bootmem(bdata);
Linus Torvalds's avatar
Linus Torvalds committed
115
116
117
118
119

	/*
	 * Initially all pages are reserved - setup_arch() has to
	 * register free RAM areas explicitly.
	 */
120
	mapsize = get_mapsize(bdata);
Linus Torvalds's avatar
Linus Torvalds committed
121
122
	memset(bdata->node_bootmem_map, 0xff, mapsize);

123
124
125
	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
		bdata - bootmem_node_data, start, mapstart, end, mapsize);

Linus Torvalds's avatar
Linus Torvalds committed
126
127
128
	return mapsize;
}

129
130
131
132
133
134
135
136
137
/**
 * init_bootmem_node - register a node as boot memory
 * @pgdat: node to register
 * @freepfn: pfn where the bitmap for this node is to be placed
 * @startpfn: first pfn on the node
 * @endpfn: first pfn after the node
 *
 * Returns the number of bytes needed to hold the bitmap for this node.
 */
138
139
140
141
142
143
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
				unsigned long startpfn, unsigned long endpfn)
{
	return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
}

144
145
146
147
148
149
150
/**
 * init_bootmem - register boot memory
 * @start: pfn where the bitmap is to be placed
 * @pages: number of available physical pages
 *
 * Returns the number of bytes needed to hold the bitmap.
 */
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
{
	max_low_pfn = pages;
	min_low_pfn = start;
	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}

static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
	struct page *page;
	unsigned long pfn;
	unsigned long i, count;
	unsigned long idx;
	unsigned long *map;
	int gofast = 0;

	BUG_ON(!bdata->node_bootmem_map);

	count = 0;
	/* first extant page of the node */
	pfn = PFN_DOWN(bdata->node_boot_start);
	idx = bdata->node_low_pfn - pfn;
	map = bdata->node_bootmem_map;
	/*
	 * Check if we are aligned to BITS_PER_LONG pages.  If so, we might
	 * be able to free page orders of that size at once.
	 */
	if (!(pfn & (BITS_PER_LONG-1)))
		gofast = 1;

	for (i = 0; i < idx; ) {
		unsigned long v = ~map[i / BITS_PER_LONG];

		if (gofast && v == ~0UL) {
			int order;

			page = pfn_to_page(pfn);
			count += BITS_PER_LONG;
			order = ffs(BITS_PER_LONG) - 1;
			__free_pages_bootmem(page, order);
			i += BITS_PER_LONG;
			page += BITS_PER_LONG;
		} else if (v) {
			unsigned long m;

			page = pfn_to_page(pfn);
			for (m = 1; m && i < idx; m<<=1, page++, i++) {
				if (v & m) {
					count++;
					__free_pages_bootmem(page, 0);
				}
			}
		} else {
			i += BITS_PER_LONG;
		}
		pfn += BITS_PER_LONG;
	}

	/*
	 * Now free the allocator bitmap itself, it's not
	 * needed anymore:
	 */
	page = virt_to_page(bdata->node_bootmem_map);
	idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
	for (i = 0; i < idx; i++, page++)
		__free_pages_bootmem(page, 0);
	count += i;
	bdata->node_bootmem_map = NULL;

220
221
	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);

222
223
224
	return count;
}

225
226
227
228
229
230
/**
 * free_all_bootmem_node - release a node's free pages to the buddy allocator
 * @pgdat: node to be released
 *
 * Returns the number of pages actually released.
 */
231
232
233
234
235
236
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
	register_page_bootmem_info_node(pgdat);
	return free_all_bootmem_core(pgdat->bdata);
}

237
238
239
240
241
/**
 * free_all_bootmem - release free pages to the buddy allocator
 *
 * Returns the number of pages actually released.
 */
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
unsigned long __init free_all_bootmem(void)
{
	return free_all_bootmem_core(NODE_DATA(0)->bdata);
}

static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
				     unsigned long size)
{
	unsigned long sidx, eidx;
	unsigned long i;

	BUG_ON(!size);

	/* out range */
	if (addr + size < bdata->node_boot_start ||
		PFN_DOWN(addr) > bdata->node_low_pfn)
		return;
	/*
	 * round down end of usable mem, partially free pages are
	 * considered reserved.
	 */

	if (addr >= bdata->node_boot_start && addr < bdata->last_success)
		bdata->last_success = addr;

	/*
	 * Round up to index to the range.
	 */
	if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
		sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
	else
		sidx = 0;

	eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
	if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
		eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);

279
280
281
282
	bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
		sidx + PFN_DOWN(bdata->node_boot_start),
		eidx + PFN_DOWN(bdata->node_boot_start));

283
284
285
286
287
288
	for (i = sidx; i < eidx; i++) {
		if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
			BUG();
	}
}

289
290
291
292
293
294
295
296
297
298
/**
 * free_bootmem_node - mark a page range as usable
 * @pgdat: node the range resides on
 * @physaddr: starting address of the range
 * @size: size of the range in bytes
 *
 * Partial pages will be considered reserved and left as they are.
 *
 * Only physical pages that actually reside on @pgdat are marked.
 */
299
300
301
302
303
304
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
			      unsigned long size)
{
	free_bootmem_core(pgdat->bdata, physaddr, size);
}

305
306
307
308
309
310
311
312
313
314
/**
 * free_bootmem - mark a page range as usable
 * @addr: starting address of the range
 * @size: size of the range in bytes
 *
 * Partial pages will be considered reserved and left as they are.
 *
 * All physical pages within the range are marked, no matter what
 * node they reside on.
 */
315
316
317
318
319
320
321
void __init free_bootmem(unsigned long addr, unsigned long size)
{
	bootmem_data_t *bdata;
	list_for_each_entry(bdata, &bdata_list, list)
		free_bootmem_core(bdata, addr, size);
}

Linus Torvalds's avatar
Linus Torvalds committed
322
323
324
325
326
/*
 * Marks a particular physical memory range as unallocatable. Usable RAM
 * might be used for boot-time allocations - or it might get added
 * to the free page pool later on.
 */
327
static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
328
			unsigned long addr, unsigned long size, int flags)
Linus Torvalds's avatar
Linus Torvalds committed
329
{
330
	unsigned long sidx, eidx;
Linus Torvalds's avatar
Linus Torvalds committed
331
	unsigned long i;
332
333
334
335
336
337
338

	BUG_ON(!size);

	/* out of range, don't hold other */
	if (addr + size < bdata->node_boot_start ||
		PFN_DOWN(addr) > bdata->node_low_pfn)
		return 0;
339

Linus Torvalds's avatar
Linus Torvalds committed
340
	/*
341
	 * Round up to index to the range.
Linus Torvalds's avatar
Linus Torvalds committed
342
	 */
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
	if (addr > bdata->node_boot_start)
		sidx= PFN_DOWN(addr - bdata->node_boot_start);
	else
		sidx = 0;

	eidx = PFN_UP(addr + size - bdata->node_boot_start);
	if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
		eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);

	for (i = sidx; i < eidx; i++) {
		if (test_bit(i, bdata->node_bootmem_map)) {
			if (flags & BOOTMEM_EXCLUSIVE)
				return -EBUSY;
		}
	}

	return 0;

}

static void __init reserve_bootmem_core(bootmem_data_t *bdata,
			unsigned long addr, unsigned long size, int flags)
{
	unsigned long sidx, eidx;
	unsigned long i;

Linus Torvalds's avatar
Linus Torvalds committed
369
	BUG_ON(!size);
370

371
372
373
374
375
376
377
378
379
380
381
382
383
	/* out of range */
	if (addr + size < bdata->node_boot_start ||
		PFN_DOWN(addr) > bdata->node_low_pfn)
		return;

	/*
	 * Round up to index to the range.
	 */
	if (addr > bdata->node_boot_start)
		sidx= PFN_DOWN(addr - bdata->node_boot_start);
	else
		sidx = 0;

384
	eidx = PFN_UP(addr + size - bdata->node_boot_start);
385
386
	if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
		eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
Linus Torvalds's avatar
Linus Torvalds committed
387

388
389
390
391
392
393
394
395
396
397
	bdebug("nid=%td start=%lx end=%lx flags=%x\n",
		bdata - bootmem_node_data,
		sidx + PFN_DOWN(bdata->node_boot_start),
		eidx + PFN_DOWN(bdata->node_boot_start),
		flags);

	for (i = sidx; i < eidx; i++)
		if (test_and_set_bit(i, bdata->node_bootmem_map))
			bdebug("hm, page %lx reserved twice.\n",
				PFN_DOWN(bdata->node_boot_start) + i);
Linus Torvalds's avatar
Linus Torvalds committed
398
399
}

400
401
402
403
404
405
406
407
408
409
410
/**
 * reserve_bootmem_node - mark a page range as reserved
 * @pgdat: node the range resides on
 * @physaddr: starting address of the range
 * @size: size of the range in bytes
 * @flags: reservation flags (see linux/bootmem.h)
 *
 * Partial pages will be reserved.
 *
 * Only physical pages that actually reside on @pgdat are marked.
 */
411
412
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
				 unsigned long size, int flags)
Linus Torvalds's avatar
Linus Torvalds committed
413
{
414
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
415

416
417
418
419
420
421
	ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
	if (ret < 0)
		return -ENOMEM;
	reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
	return 0;
}
422

423
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
424
425
426
427
428
429
430
431
432
433
434
/**
 * reserve_bootmem - mark a page range as usable
 * @addr: starting address of the range
 * @size: size of the range in bytes
 * @flags: reservation flags (see linux/bootmem.h)
 *
 * Partial pages will be reserved.
 *
 * All physical pages within the range are marked, no matter what
 * node they reside on.
 */
435
436
437
438
439
int __init reserve_bootmem(unsigned long addr, unsigned long size,
			    int flags)
{
	bootmem_data_t *bdata;
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
440

441
442
443
444
	list_for_each_entry(bdata, &bdata_list, list) {
		ret = can_reserve_bootmem_core(bdata, addr, size, flags);
		if (ret < 0)
			return ret;
Linus Torvalds's avatar
Linus Torvalds committed
445
	}
446
447
448
449
	list_for_each_entry(bdata, &bdata_list, list)
		reserve_bootmem_core(bdata, addr, size, flags);

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
450
}
451
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
Linus Torvalds's avatar
Linus Torvalds committed
452
453
454
455
456
457
458
459
460
461
462
463
464
465

/*
 * We 'merge' subsequent allocations to save space. We might 'lose'
 * some fraction of a page if allocations cannot be satisfied due to
 * size constraints on boxes where there is physical RAM space
 * fragmentation - in these cases (mostly large memory boxes) this
 * is not a problem.
 *
 * On low memory boxes we get it right in 100% of the cases.
 *
 * alignment has to be a power of 2 value.
 *
 * NOTE:  This function is _not_ reentrant.
 */
466
467
468
static void * __init
alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
		unsigned long align, unsigned long goal, unsigned long limit)
Linus Torvalds's avatar
Linus Torvalds committed
469
{
470
	unsigned long areasize, preferred;
471
	unsigned long i, start = 0, incr, eidx, end_pfn;
Linus Torvalds's avatar
Linus Torvalds committed
472
	void *ret;
473
474
	unsigned long node_boot_start;
	void *node_bootmem_map;
Linus Torvalds's avatar
Linus Torvalds committed
475

476
	if (!size) {
477
		printk("alloc_bootmem_core(): zero-sized request\n");
Linus Torvalds's avatar
Linus Torvalds committed
478
479
480
481
		BUG();
	}
	BUG_ON(align & (align-1));

482
483
484
485
	/* on nodes without memory - bootmem_map is NULL */
	if (!bdata->node_bootmem_map)
		return NULL;

486
487
488
489
	bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
		bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
		align, goal, limit);

490
491
492
493
494
495
496
497
498
499
500
501
502
	/* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
	node_boot_start = bdata->node_boot_start;
	node_bootmem_map = bdata->node_bootmem_map;
	if (align) {
		node_boot_start = ALIGN(bdata->node_boot_start, align);
		if (node_boot_start > bdata->node_boot_start)
			node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
			    PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
	}

	if (limit && node_boot_start >= limit)
		return NULL;

503
504
	end_pfn = bdata->node_low_pfn;
	limit = PFN_DOWN(limit);
505
506
507
	if (limit && end_pfn > limit)
		end_pfn = limit;

508
	eidx = end_pfn - PFN_DOWN(node_boot_start);
Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
513

	/*
	 * We try to allocate bootmem pages above 'goal'
	 * first, then we try to allocate lower pages.
	 */
514
515
	preferred = 0;
	if (goal && PFN_DOWN(goal) < end_pfn) {
516
517
		if (goal > node_boot_start)
			preferred = goal - node_boot_start;
Linus Torvalds's avatar
Linus Torvalds committed
518

519
520
		if (bdata->last_success > node_boot_start &&
			bdata->last_success - node_boot_start >= preferred)
521
			if (!limit || (limit && limit > bdata->last_success))
522
				preferred = bdata->last_success - node_boot_start;
523
	}
Linus Torvalds's avatar
Linus Torvalds committed
524

525
	preferred = PFN_DOWN(ALIGN(preferred, align));
526
	areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
527
528
529
	incr = align >> PAGE_SHIFT ? : 1;

restart_scan:
530
	for (i = preferred; i < eidx;) {
Linus Torvalds's avatar
Linus Torvalds committed
531
		unsigned long j;
532

533
		i = find_next_zero_bit(node_bootmem_map, eidx, i);
Linus Torvalds's avatar
Linus Torvalds committed
534
		i = ALIGN(i, incr);
535
536
		if (i >= eidx)
			break;
537
		if (test_bit(i, node_bootmem_map)) {
538
			i += incr;
Linus Torvalds's avatar
Linus Torvalds committed
539
			continue;
540
		}
Linus Torvalds's avatar
Linus Torvalds committed
541
542
543
		for (j = i + 1; j < i + areasize; ++j) {
			if (j >= eidx)
				goto fail_block;
544
			if (test_bit(j, node_bootmem_map))
Linus Torvalds's avatar
Linus Torvalds committed
545
546
547
548
549
550
				goto fail_block;
		}
		start = i;
		goto found;
	fail_block:
		i = ALIGN(j, incr);
551
552
		if (i == j)
			i += incr;
Linus Torvalds's avatar
Linus Torvalds committed
553
554
	}

555
556
	if (preferred > 0) {
		preferred = 0;
Linus Torvalds's avatar
Linus Torvalds committed
557
558
559
560
561
		goto restart_scan;
	}
	return NULL;

found:
562
	bdata->last_success = PFN_PHYS(start) + node_boot_start;
Linus Torvalds's avatar
Linus Torvalds committed
563
564
565
566
567
568
569
570
571
	BUG_ON(start >= eidx);

	/*
	 * Is the next page of the previous allocation-end the start
	 * of this allocation's buffer? If yes then we can 'merge'
	 * the previous partial page with this allocation.
	 */
	if (align < PAGE_SIZE &&
	    bdata->last_offset && bdata->last_pos+1 == start) {
572
		unsigned long offset, remaining_size;
573
		offset = ALIGN(bdata->last_offset, align);
Linus Torvalds's avatar
Linus Torvalds committed
574
		BUG_ON(offset > PAGE_SIZE);
575
		remaining_size = PAGE_SIZE - offset;
Linus Torvalds's avatar
Linus Torvalds committed
576
577
578
		if (size < remaining_size) {
			areasize = 0;
			/* last_pos unchanged */
579
580
			bdata->last_offset = offset + size;
			ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
581
					   offset + node_boot_start);
Linus Torvalds's avatar
Linus Torvalds committed
582
583
		} else {
			remaining_size = size - remaining_size;
584
585
			areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
			ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
586
					   offset + node_boot_start);
587
			bdata->last_pos = start + areasize - 1;
Linus Torvalds's avatar
Linus Torvalds committed
588
589
590
591
592
593
			bdata->last_offset = remaining_size;
		}
		bdata->last_offset &= ~PAGE_MASK;
	} else {
		bdata->last_pos = start + areasize - 1;
		bdata->last_offset = size & ~PAGE_MASK;
594
		ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
Linus Torvalds's avatar
Linus Torvalds committed
595
596
	}

597
598
599
600
601
	bdebug("nid=%td start=%lx end=%lx\n",
		bdata - bootmem_node_data,
		start + PFN_DOWN(bdata->node_boot_start),
		start + areasize + PFN_DOWN(bdata->node_boot_start));

Linus Torvalds's avatar
Linus Torvalds committed
602
603
604
	/*
	 * Reserve the area now:
	 */
605
	for (i = start; i < start + areasize; i++)
606
		if (unlikely(test_and_set_bit(i, node_bootmem_map)))
Linus Torvalds's avatar
Linus Torvalds committed
607
608
609
610
611
			BUG();
	memset(ret, 0, size);
	return ret;
}

612
613
614
615
616
617
618
619
620
621
622
623
624
/**
 * __alloc_bootmem_nopanic - allocate boot memory without panicking
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may happen on any node in the system.
 *
 * Returns NULL on failure.
 */
625
626
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
				      unsigned long goal)
Linus Torvalds's avatar
Linus Torvalds committed
627
{
628
	bootmem_data_t *bdata;
Linus Torvalds's avatar
Linus Torvalds committed
629
630
	void *ptr;

631
	list_for_each_entry(bdata, &bdata_list, list) {
632
		ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
633
634
635
		if (ptr)
			return ptr;
	}
636
637
	return NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
638

639
640
641
642
643
644
645
646
647
648
649
650
651
/**
 * __alloc_bootmem - allocate boot memory
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may happen on any node in the system.
 *
 * The function panics if the request can not be satisfied.
 */
652
653
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
			      unsigned long goal)
654
655
{
	void *mem = __alloc_bootmem_nopanic(size,align,goal);
656

657
658
	if (mem)
		return mem;
Linus Torvalds's avatar
Linus Torvalds committed
659
660
661
662
663
664
665
666
	/*
	 * Whoops, we cannot satisfy the allocation request.
	 */
	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
	panic("Out of memory");
	return NULL;
}

667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
/**
 * __alloc_bootmem_node - allocate boot memory from a specific node
 * @pgdat: node to allocate from
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may fall back to any node in the system if the specified node
 * can not hold the requested memory.
 *
 * The function panics if the request can not be satisfied.
 */
682
683
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
				   unsigned long align, unsigned long goal)
Linus Torvalds's avatar
Linus Torvalds committed
684
685
686
{
	void *ptr;

687
	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
Linus Torvalds's avatar
Linus Torvalds committed
688
	if (ptr)
689
		return ptr;
Linus Torvalds's avatar
Linus Torvalds committed
690

691
	return __alloc_bootmem(size, align, goal);
Linus Torvalds's avatar
Linus Torvalds committed
692
693
}

694
#ifdef CONFIG_SPARSEMEM
695
696
697
698
699
700
701
/**
 * alloc_bootmem_section - allocate boot memory from a specific section
 * @size: size of the request in bytes
 * @section_nr: sparse map section to allocate from
 *
 * Return NULL on failure.
 */
702
703
704
705
706
707
708
709
710
711
712
void * __init alloc_bootmem_section(unsigned long size,
				    unsigned long section_nr)
{
	void *ptr;
	unsigned long limit, goal, start_nr, end_nr, pfn;
	struct pglist_data *pgdat;

	pfn = section_nr_to_pfn(section_nr);
	goal = PFN_PHYS(pfn);
	limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
	pgdat = NODE_DATA(early_pfn_to_nid(pfn));
713
714
	ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
				limit);
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731

	if (!ptr)
		return NULL;

	start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
	end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
	if (start_nr != section_nr || end_nr != section_nr) {
		printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
		       section_nr);
		free_bootmem_core(pgdat->bdata, __pa(ptr), size);
		ptr = NULL;
	}

	return ptr;
}
#endif

732
733
734
735
736
737
738
739
740
741
742
743
void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
				   unsigned long align, unsigned long goal)
{
	void *ptr;

	ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
	if (ptr)
		return ptr;

	return __alloc_bootmem_nopanic(size, align, goal);
}

744
745
746
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
#endif
747

748
749
750
751
752
753
754
755
756
757
758
759
760
/**
 * __alloc_bootmem_low - allocate low boot memory
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may happen on any node in the system.
 *
 * The function panics if the request can not be satisfied.
 */
761
762
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
				  unsigned long goal)
763
{
764
	bootmem_data_t *bdata;
765
766
	void *ptr;

767
	list_for_each_entry(bdata, &bdata_list, list) {
768
769
		ptr = alloc_bootmem_core(bdata, size, align, goal,
					ARCH_LOW_ADDRESS_LIMIT);
770
771
772
		if (ptr)
			return ptr;
	}
773
774
775
776
777
778
779
780
781

	/*
	 * Whoops, we cannot satisfy the allocation request.
	 */
	printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
	panic("Out of low memory");
	return NULL;
}

782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
/**
 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
 * @pgdat: node to allocate from
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may fall back to any node in the system if the specified node
 * can not hold the requested memory.
 *
 * The function panics if the request can not be satisfied.
 */
797
798
799
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
				       unsigned long align, unsigned long goal)
{
800
801
	return alloc_bootmem_core(pgdat->bdata, size, align, goal,
				ARCH_LOW_ADDRESS_LIMIT);
802
}