init.c 7.56 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
/* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $
 *
 *  linux/arch/sh/mm/init.c
 *
 *  Copyright (C) 1999  Niibe Yutaka
 *  Copyright (C) 2002, 2004  Paul Mundt
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
Paul Mundt's avatar
Paul Mundt committed
27
#include <linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];

#ifdef CONFIG_MMU
/* It'd be good if these lines were in the standard header file. */
#define START_PFN	(NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
#define MAX_LOW_PFN	(NODE_DATA(0)->bdata->node_low_pfn)
#endif

void (*copy_page)(void *from, void *to);
void (*clear_page)(void *to);

void show_mem(void)
{
	int i, total = 0, reserved = 0;
	int shared = 0, cached = 0;

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
	i = max_mapnr;
	while (i-- > 0) {
		total++;
		if (PageReserved(mem_map+i))
			reserved++;
		else if (PageSwapCache(mem_map+i))
			cached++;
		else if (page_count(mem_map+i))
			shared += page_count(mem_map+i) - 1;
	}
	printk("%d pages of RAM\n",total);
	printk("%d reserved pages\n",reserved);
	printk("%d pages shared\n",shared);
	printk("%d pages swap cached\n",cached);
}

75
#ifdef CONFIG_MMU
Linus Torvalds's avatar
Linus Torvalds committed
76
77
78
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
79
	pud_t *pud;
Linus Torvalds's avatar
Linus Torvalds committed
80
81
82
	pmd_t *pmd;
	pte_t *pte;

Stuart Menefy's avatar
Stuart Menefy committed
83
	pgd = pgd_offset_k(addr);
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
88
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

Stuart Menefy's avatar
Stuart Menefy committed
89
90
91
92
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
93
94
	}

Stuart Menefy's avatar
Stuart Menefy committed
95
96
97
98
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));

	__flush_tlb_page(get_asid(), addr);
}

/*
 * As a performance optimization, other platforms preserve the fixmap mapping
 * across a context switch, we don't presently do this, but this could be done
 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
 * of the memorry mapped UTLB configuration) -- this unfortunately forces us to
 * give up a TLB entry for each mapping we want to preserve. While this may be
 * viable for a small number of fixmaps, it's not particularly useful for
 * everything and needs to be carefully evaluated. (ie, we may want this for
 * the vsyscall page).
 *
 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
 * in at __set_fixmap() time to determine the appropriate behavior to follow.
 *
 *					 -- PFM.
 */
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
138
#endif	/* CONFIG_MMU */
Linus Torvalds's avatar
Linus Torvalds committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166

/* References to section boundaries */

extern char _text, _etext, _edata, __bss_start, _end;
extern char __init_begin, __init_end;

/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = { 0, };

	/*
	 * Setup some defaults for the zone sizes.. these should be safe
	 * regardless of distcontiguous memory or MMU settings.
	 */
	zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
	zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT;
#endif

#ifdef CONFIG_MMU
	/*
	 * If we have an MMU, and want to be using it .. we need to adjust
	 * the zone sizes accordingly, in addition to turning it on.
	 */
	{
167
168
169
170
		/* We don't need to map the kernel through the TLB, as
		 * it is permanatly mapped using P1. So clear the
		 * entire pgd. */
		memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
Linus Torvalds's avatar
Linus Torvalds committed
171
172
173

		/* Turn on the MMU */
		enable_mmu();
174
		zones_size[ZONE_NORMAL] = MAX_LOW_PFN - START_PFN;
Linus Torvalds's avatar
Linus Torvalds committed
175
176
	}

177
178
179
180
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

Linus Torvalds's avatar
Linus Torvalds committed
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
	/*
	 * If we don't have CONFIG_MMU set and the processor in question
	 * still has an MMU, care needs to be taken to make sure it doesn't
	 * stay on.. Since the boot loader could have potentially already
	 * turned it on, and we clearly don't want it, we simply turn it off.
	 *
	 * We don't need to do anything special for the zone sizes, since the
	 * default values that were already configured up above should be
	 * satisfactory.
	 */
	disable_mmu();
#endif
	NODE_DATA(0)->node_mem_map = NULL;
	free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0);
}

Paul Mundt's avatar
Paul Mundt committed
198
199
static struct kcore_list kcore_mem, kcore_vmalloc;

Linus Torvalds's avatar
Linus Torvalds committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
	int tmp;
	extern unsigned long memory_start;

#ifdef CONFIG_MMU
	high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE);
#else
	extern unsigned long memory_end;

	high_memory = (void *)(memory_end & PAGE_MASK);
#endif

	max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start);

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

220
	/*
Linus Torvalds's avatar
Linus Torvalds committed
221
222
223
	 * Setup wrappers for copy/clear_page(), these will get overridden
	 * later in the boot process if a better method is available.
	 */
Yoshinori Sato's avatar
Yoshinori Sato committed
224
#ifdef CONFIG_MMU
Linus Torvalds's avatar
Linus Torvalds committed
225
226
	copy_page = copy_page_slow;
	clear_page = clear_page_slow;
Yoshinori Sato's avatar
Yoshinori Sato committed
227
228
229
230
#else
	copy_page = copy_page_nommu;
	clear_page = clear_page_nommu;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245

	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem_node(NODE_DATA(0));
	reservedpages = 0;
	for (tmp = 0; tmp < num_physpages; tmp++)
		/*
		 * Only count reserved RAM pages
		 */
		if (PageReserved(mem_map+tmp))
			reservedpages++;

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

Paul Mundt's avatar
Paul Mundt committed
246
247
248
249
250
251
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		   VMALLOC_END - VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
	       "%dk reserved, %dk data, %dk init)\n",
Linus Torvalds's avatar
Linus Torvalds committed
252
253
254
255
256
257
258
259
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		max_mapnr << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10);

	p3_cache_init();
260
261
262

	/* Initialize the vDSO */
	vsyscall_init();
Linus Torvalds's avatar
Linus Torvalds committed
263
264
265
266
267
}

void free_initmem(void)
{
	unsigned long addr;
268

Linus Torvalds's avatar
Linus Torvalds committed
269
270
271
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
272
		init_page_count(virt_to_page(addr));
Linus Torvalds's avatar
Linus Torvalds committed
273
274
275
276
277
278
279
280
281
282
283
284
		free_page(addr);
		totalram_pages++;
	}
	printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
285
		init_page_count(virt_to_page(p));
Linus Torvalds's avatar
Linus Torvalds committed
286
287
288
289
290
291
292
		free_page(p);
		totalram_pages++;
	}
	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
}
#endif