init.c 7.96 KB
Newer Older
1
2
/*
 * linux/arch/sh/mm/init.c
Linus Torvalds's avatar
Linus Torvalds committed
3
4
 *
 *  Copyright (C) 1999  Niibe Yutaka
5
 *  Copyright (C) 2002 - 2007  Paul Mundt
Linus Torvalds's avatar
Linus Torvalds committed
6
7
8
9
10
11
12
13
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
Paul Mundt's avatar
Paul Mundt committed
14
#include <linux/proc_fs.h>
15
#include <linux/pagemap.h>
16
17
#include <linux/percpu.h>
#include <linux/io.h>
Linus Torvalds's avatar
Linus Torvalds committed
18
19
20
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
21
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
23
24
25
#include <asm/cache.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
26
27
28
29
30
31
32
33
34
35
36
37
38

#ifdef CONFIG_SUPERH32
/*
 * Handle trivial transitions between cached and uncached
 * segments, making use of the 1:1 mapping relationship in
 * 512MB lowmem.
 *
 * This is the offset of the uncached section from its cached alias.
 * Default value only valid in 29 bit mode, in 32bit mode will be
 * overridden in pmb_init.
 */
unsigned long cached_to_uncached = P2SEG - P1SEG;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
39

40
#ifdef CONFIG_MMU
Linus Torvalds's avatar
Linus Torvalds committed
41
42
43
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
44
	pud_t *pud;
Linus Torvalds's avatar
Linus Torvalds committed
45
46
47
	pmd_t *pmd;
	pte_t *pte;

Stuart Menefy's avatar
Stuart Menefy committed
48
	pgd = pgd_offset_k(addr);
Linus Torvalds's avatar
Linus Torvalds committed
49
50
51
52
53
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

Stuart Menefy's avatar
Stuart Menefy committed
54
55
56
57
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
58
59
	}

Stuart Menefy's avatar
Stuart Menefy committed
60
61
62
63
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
64
65
66
67
68
69
70
71
72
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
73
	local_flush_tlb_one(get_asid(), addr);
Linus Torvalds's avatar
Linus Torvalds committed
74
75
76
77
78
79
}

/*
 * As a performance optimization, other platforms preserve the fixmap mapping
 * across a context switch, we don't presently do this, but this could be done
 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
Simon Arlott's avatar
Simon Arlott committed
80
 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
Linus Torvalds's avatar
Linus Torvalds committed
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
 * give up a TLB entry for each mapping we want to preserve. While this may be
 * viable for a small number of fixmaps, it's not particularly useful for
 * everything and needs to be carefully evaluated. (ie, we may want this for
 * the vsyscall page).
 *
 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
 * in at __set_fixmap() time to determine the appropriate behavior to follow.
 *
 *					 -- PFM.
 */
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

void __init page_table_range_init(unsigned long start, unsigned long end,
					 pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	int pgd_idx;
	unsigned long vaddr;

	vaddr = start & PMD_MASK;
	end = (end + PMD_SIZE - 1) & PMD_MASK;
	pgd_idx = pgd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
		BUG_ON(pgd_none(*pgd));
		pud = pud_offset(pgd, 0);
		BUG_ON(pud_none(*pud));
		pmd = pmd_offset(pud, 0);

		if (!pmd_present(*pmd)) {
			pte_t *pte_table;
			pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
			pmd_populate_kernel(&init_mm, pmd, pte_table);
		}

		vaddr += PMD_SIZE;
	}
}
132
#endif	/* CONFIG_MMU */
Linus Torvalds's avatar
Linus Torvalds committed
133
134
135
136
137
138

/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
139
	unsigned long max_zone_pfns[MAX_NR_ZONES];
140
	unsigned long vaddr;
141
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
142

143
144
145
146
	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
Linus Torvalds's avatar
Linus Torvalds committed
147

148
149
150
151
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

152
153
	/*
	 * Populate the relevant portions of swapper_pg_dir so that
154
	 * we can use the fixmap entries without calling kmalloc.
155
156
157
158
159
160
	 * pte's will be filled in by __set_fixmap().
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
	page_table_range_init(vaddr, 0, swapper_pg_dir);

	kmap_coherent_init();
161

162
163
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

164
165
166
167
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

168
		start_pfn = pgdat->bdata->node_min_pfn;
169
170
		low = pgdat->bdata->node_low_pfn;

171
172
		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;
173
174
175
176

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}
177
178

	free_area_init_nodes(max_zone_pfns);
179

180
#ifdef CONFIG_SUPERH32
181
182
	/* Set up the uncached fixmap */
	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
183
#endif
Linus Torvalds's avatar
Linus Torvalds committed
184
185
}

Paul Mundt's avatar
Paul Mundt committed
186
187
static struct kcore_list kcore_mem, kcore_vmalloc;

Linus Torvalds's avatar
Linus Torvalds committed
188
189
void __init mem_init(void)
{
Paul Mundt's avatar
Paul Mundt committed
190
	int codesize, datasize, initsize;
191
	int nid;
Linus Torvalds's avatar
Linus Torvalds committed
192

193
194
195
	num_physpages = 0;
	high_memory = NULL;

196
197
198
199
200
201
202
203
204
205
206
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long node_pages = 0;
		void *node_high_memory;

		num_physpages += pgdat->node_present_pages;

		if (pgdat->node_spanned_pages)
			node_pages = free_all_bootmem_node(pgdat);

		totalram_pages += node_pages;
Linus Torvalds's avatar
Linus Torvalds committed
207

208
209
210
		node_high_memory = (void *)__va((pgdat->node_start_pfn +
						 pgdat->node_spanned_pages) <<
						 PAGE_SHIFT);
211
212
213
		if (node_high_memory > high_memory)
			high_memory = node_high_memory;
	}
Linus Torvalds's avatar
Linus Torvalds committed
214
215
216
217
218
219
220
221
222

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

Paul Mundt's avatar
Paul Mundt committed
223
224
225
226
227
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		   VMALLOC_END - VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
Paul Mundt's avatar
Paul Mundt committed
228
	       "%dk data, %dk init)\n",
Linus Torvalds's avatar
Linus Torvalds committed
229
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
230
		num_physpages << (PAGE_SHIFT-10),
Linus Torvalds's avatar
Linus Torvalds committed
231
232
233
234
235
		codesize >> 10,
		datasize >> 10,
		initsize >> 10);

	p3_cache_init();
236
237
238

	/* Initialize the vDSO */
	vsyscall_init();
Linus Torvalds's avatar
Linus Torvalds committed
239
240
241
242
243
}

void free_initmem(void)
{
	unsigned long addr;
244

Linus Torvalds's avatar
Linus Torvalds committed
245
246
247
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
248
		init_page_count(virt_to_page(addr));
Linus Torvalds's avatar
Linus Torvalds committed
249
250
251
		free_page(addr);
		totalram_pages++;
	}
252
253
254
	printk("Freeing unused kernel memory: %ldk freed\n",
	       ((unsigned long)&__init_end -
	        (unsigned long)&__init_begin) >> 10);
Linus Torvalds's avatar
Linus Torvalds committed
255
256
257
258
259
260
261
262
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
263
		init_page_count(virt_to_page(p));
Linus Torvalds's avatar
Linus Torvalds committed
264
265
266
		free_page(p);
		totalram_pages++;
	}
267
	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
Linus Torvalds's avatar
Linus Torvalds committed
268
269
}
#endif
270

271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;

struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
	struct thread_info *ti;

	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
	if (unlikely(ti == NULL))
		return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
	memset(ti, 0, THREAD_SIZE);
#endif
	return ti;
}

void free_thread_info(struct thread_info *ti)
{
	kmem_cache_free(thread_info_cache, ti);
}

void thread_info_cache_init(void)
{
	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
					      THREAD_SIZE, 0, NULL);
	BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */

300
301
302
303
304
305
306
307
308
309
310
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
311
312
	ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
				start_pfn, nr_pages);
313
	if (unlikely(ret))
314
		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
315
316
317
318
319

	return ret;
}
EXPORT_SYMBOL_GPL(arch_add_memory);

320
#ifdef CONFIG_NUMA
321
322
323
324
325
326
327
int memory_add_physaddr_to_nid(u64 addr)
{
	/* Node 0 for now.. */
	return 0;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
328
#endif /* CONFIG_MEMORY_HOTPLUG */