init.c 7.32 KB
Newer Older
1
2
/*
 * linux/arch/sh/mm/init.c
Linus Torvalds's avatar
Linus Torvalds committed
3
4
 *
 *  Copyright (C) 1999  Niibe Yutaka
5
 *  Copyright (C) 2002 - 2007  Paul Mundt
Linus Torvalds's avatar
Linus Torvalds committed
6
7
8
9
10
11
12
13
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
Paul Mundt's avatar
Paul Mundt committed
14
#include <linux/proc_fs.h>
15
#include <linux/pagemap.h>
16
17
#include <linux/percpu.h>
#include <linux/io.h>
Linus Torvalds's avatar
Linus Torvalds committed
18
19
20
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
21
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
23
24
25
26
27
28
29
30
31
#include <asm/cache.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];

void (*copy_page)(void *from, void *to);
void (*clear_page)(void *to);

void show_mem(void)
{
32
33
34
	int total = 0, reserved = 0, free = 0;
	int shared = 0, cached = 0, slab = 0;
	pg_data_t *pgdat;
Linus Torvalds's avatar
Linus Torvalds committed
35
36
37

	printk("Mem-info:\n");
	show_free_areas();
38
39

	for_each_online_pgdat(pgdat) {
Paul Mundt's avatar
Paul Mundt committed
40
		unsigned long flags, i;
41
42

		pgdat_resize_lock(pgdat, &flags);
Paul Mundt's avatar
Paul Mundt committed
43
44
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			struct page *page = pgdat_page_nr(pgdat, i);
45
46
47
48
49
50
51
52
53
54
55
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
Paul Mundt's avatar
Paul Mundt committed
56
		}
57
		pgdat_resize_unlock(pgdat, &flags);
Linus Torvalds's avatar
Linus Torvalds committed
58
	}
59
60
61
62
63
64
65
66

	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
Paul Mundt's avatar
Paul Mundt committed
67
68
	printk(KERN_INFO "Total of %ld pages in page table cache\n",
	       quicklist_total_size());
Linus Torvalds's avatar
Linus Torvalds committed
69
70
}

71
#ifdef CONFIG_MMU
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
75
	pud_t *pud;
Linus Torvalds's avatar
Linus Torvalds committed
76
77
78
	pmd_t *pmd;
	pte_t *pte;

Stuart Menefy's avatar
Stuart Menefy committed
79
	pgd = pgd_offset_k(addr);
Linus Torvalds's avatar
Linus Torvalds committed
80
81
82
83
84
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

Stuart Menefy's avatar
Stuart Menefy committed
85
86
87
88
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
89
90
	}

Stuart Menefy's avatar
Stuart Menefy committed
91
92
93
94
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
95
96
97
98
99
100
101
102
103
104
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));

105
	flush_tlb_one(get_asid(), addr);
Linus Torvalds's avatar
Linus Torvalds committed
106
107
108
109
110
111
}

/*
 * As a performance optimization, other platforms preserve the fixmap mapping
 * across a context switch, we don't presently do this, but this could be done
 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
Simon Arlott's avatar
Simon Arlott committed
112
 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
Linus Torvalds's avatar
Linus Torvalds committed
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
 * give up a TLB entry for each mapping we want to preserve. While this may be
 * viable for a small number of fixmaps, it's not particularly useful for
 * everything and needs to be carefully evaluated. (ie, we may want this for
 * the vsyscall page).
 *
 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
 * in at __set_fixmap() time to determine the appropriate behavior to follow.
 *
 *					 -- PFM.
 */
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
134
#endif	/* CONFIG_MMU */
Linus Torvalds's avatar
Linus Torvalds committed
135
136
137
138
139
140

/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
141
	unsigned long max_zone_pfns[MAX_NR_ZONES];
142
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
143

144
145
146
147
	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
Linus Torvalds's avatar
Linus Torvalds committed
148

149
150
151
152
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

153
154
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

155
156
157
158
159
160
161
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

		start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
		low = pgdat->bdata->node_low_pfn;

162
163
		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;
164
165
166
167

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}
168
169

	free_area_init_nodes(max_zone_pfns);
Linus Torvalds's avatar
Linus Torvalds committed
170
171
}

Paul Mundt's avatar
Paul Mundt committed
172
173
static struct kcore_list kcore_mem, kcore_vmalloc;

Linus Torvalds's avatar
Linus Torvalds committed
174
175
void __init mem_init(void)
{
Paul Mundt's avatar
Paul Mundt committed
176
	int codesize, datasize, initsize;
177
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
178

179
180
181
	num_physpages = 0;
	high_memory = NULL;

182
183
184
185
186
187
188
189
190
191
192
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long node_pages = 0;
		void *node_high_memory;

		num_physpages += pgdat->node_present_pages;

		if (pgdat->node_spanned_pages)
			node_pages = free_all_bootmem_node(pgdat);

		totalram_pages += node_pages;
Linus Torvalds's avatar
Linus Torvalds committed
193

194
195
196
		node_high_memory = (void *)__va((pgdat->node_start_pfn +
						 pgdat->node_spanned_pages) <<
						 PAGE_SHIFT);
197
198
199
		if (node_high_memory > high_memory)
			high_memory = node_high_memory;
	}
Linus Torvalds's avatar
Linus Torvalds committed
200
201
202
203
204

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

205
	/*
Linus Torvalds's avatar
Linus Torvalds committed
206
207
208
	 * Setup wrappers for copy/clear_page(), these will get overridden
	 * later in the boot process if a better method is available.
	 */
Yoshinori Sato's avatar
Yoshinori Sato committed
209
#ifdef CONFIG_MMU
Linus Torvalds's avatar
Linus Torvalds committed
210
211
	copy_page = copy_page_slow;
	clear_page = clear_page_slow;
Yoshinori Sato's avatar
Yoshinori Sato committed
212
213
214
215
#else
	copy_page = copy_page_nommu;
	clear_page = clear_page_nommu;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
216
217
218
219
220

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

Paul Mundt's avatar
Paul Mundt committed
221
222
223
224
225
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		   VMALLOC_END - VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
Paul Mundt's avatar
Paul Mundt committed
226
	       "%dk data, %dk init)\n",
Linus Torvalds's avatar
Linus Torvalds committed
227
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
228
		num_physpages << (PAGE_SHIFT-10),
Linus Torvalds's avatar
Linus Torvalds committed
229
230
231
232
233
		codesize >> 10,
		datasize >> 10,
		initsize >> 10);

	p3_cache_init();
234
235
236

	/* Initialize the vDSO */
	vsyscall_init();
Linus Torvalds's avatar
Linus Torvalds committed
237
238
239
240
241
}

void free_initmem(void)
{
	unsigned long addr;
242

Linus Torvalds's avatar
Linus Torvalds committed
243
244
245
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
246
		init_page_count(virt_to_page(addr));
Linus Torvalds's avatar
Linus Torvalds committed
247
248
249
		free_page(addr);
		totalram_pages++;
	}
250
251
252
	printk("Freeing unused kernel memory: %ldk freed\n",
	       ((unsigned long)&__init_end -
	        (unsigned long)&__init_begin) >> 10);
Linus Torvalds's avatar
Linus Torvalds committed
253
254
255
256
257
258
259
260
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
261
		init_page_count(virt_to_page(p));
Linus Torvalds's avatar
Linus Torvalds committed
262
263
264
		free_page(p);
		totalram_pages++;
	}
265
	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
Linus Torvalds's avatar
Linus Torvalds committed
266
267
}
#endif
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296

#ifdef CONFIG_MEMORY_HOTPLUG
void online_page(struct page *page)
{
	ClearPageReserved(page);
	init_page_count(page);
	__free_page(page);
	totalram_pages++;
	num_physpages++;
}

int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
	if (unlikely(ret))
		printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__, ret);

	return ret;
}
EXPORT_SYMBOL_GPL(arch_add_memory);

297
#ifdef CONFIG_NUMA
298
299
300
301
302
303
304
int memory_add_physaddr_to_nid(u64 addr)
{
	/* Node 0 for now.. */
	return 0;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
305
#endif