1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/parisc/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright 1999 SuSE GmbH
7 * changed by Philipp Rumpf
8 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
9 * Copyright 2004 Randolph Chung (tausq@debian.org)
10 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
11 *
12 */
13
14
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/bootmem.h>
18 #include <linux/memblock.h>
19 #include <linux/gfp.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/swap.h>
24 #include <linux/unistd.h>
25 #include <linux/nodemask.h> /* for node_online_map */
26 #include <linux/pagemap.h> /* for release_pages */
27 #include <linux/compat.h>
28
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/tlb.h>
32 #include <asm/pdc_chassis.h>
33 #include <asm/mmzone.h>
34 #include <asm/sections.h>
35 #include <asm/msgbuf.h>
36
37 extern int data_start;
38 extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
39
40 #if CONFIG_PGTABLE_LEVELS == 3
41 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
42 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
43 * guarantee that global objects will be laid out in memory in the same order
44 * as the order of declaration, so put these in different sections and use
45 * the linker script to order them. */
46 pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
47 #endif
48
49 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
50 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
51
52 #ifdef CONFIG_DISCONTIGMEM
53 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
54 signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
55 #endif
56
57 static struct resource data_resource = {
58 .name = "Kernel data",
59 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
60 };
61
62 static struct resource code_resource = {
63 .name = "Kernel code",
64 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
65 };
66
67 static struct resource pdcdata_resource = {
68 .name = "PDC data (Page Zero)",
69 .start = 0,
70 .end = 0x9ff,
71 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
72 };
73
74 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
75
76 /* The following array is initialized from the firmware specific
77 * information retrieved in kernel/inventory.c.
78 */
79
80 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
81 int npmem_ranges __read_mostly;
82
83 /*
84 * get_memblock() allocates pages via memblock.
85 * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
86 * doesn't allocate from bottom to top which is needed because we only created
87 * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
88 */
get_memblock(unsigned long size)89 static void * __init get_memblock(unsigned long size)
90 {
91 static phys_addr_t search_addr __initdata;
92 phys_addr_t phys;
93
94 if (!search_addr)
95 search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
96 search_addr = ALIGN(search_addr, size);
97 while (!memblock_is_region_memory(search_addr, size) ||
98 memblock_is_region_reserved(search_addr, size)) {
99 search_addr += size;
100 }
101 phys = search_addr;
102
103 if (phys)
104 memblock_reserve(phys, size);
105 else
106 panic("get_memblock() failed.\n");
107
108 memset(__va(phys), 0, size);
109
110 return __va(phys);
111 }
112
113 #ifdef CONFIG_64BIT
114 #define MAX_MEM (~0UL)
115 #else /* !CONFIG_64BIT */
116 #define MAX_MEM (3584U*1024U*1024U)
117 #endif /* !CONFIG_64BIT */
118
119 static unsigned long mem_limit __read_mostly = MAX_MEM;
120
mem_limit_func(void)121 static void __init mem_limit_func(void)
122 {
123 char *cp, *end;
124 unsigned long limit;
125
126 /* We need this before __setup() functions are called */
127
128 limit = MAX_MEM;
129 for (cp = boot_command_line; *cp; ) {
130 if (memcmp(cp, "mem=", 4) == 0) {
131 cp += 4;
132 limit = memparse(cp, &end);
133 if (end != cp)
134 break;
135 cp = end;
136 } else {
137 while (*cp != ' ' && *cp)
138 ++cp;
139 while (*cp == ' ')
140 ++cp;
141 }
142 }
143
144 if (limit < mem_limit)
145 mem_limit = limit;
146 }
147
148 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
149
setup_bootmem(void)150 static void __init setup_bootmem(void)
151 {
152 unsigned long mem_max;
153 #ifndef CONFIG_DISCONTIGMEM
154 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
155 int npmem_holes;
156 #endif
157 int i, sysram_resource_count;
158
159 disable_sr_hashing(); /* Turn off space register hashing */
160
161 /*
162 * Sort the ranges. Since the number of ranges is typically
163 * small, and performance is not an issue here, just do
164 * a simple insertion sort.
165 */
166
167 for (i = 1; i < npmem_ranges; i++) {
168 int j;
169
170 for (j = i; j > 0; j--) {
171 unsigned long tmp;
172
173 if (pmem_ranges[j-1].start_pfn <
174 pmem_ranges[j].start_pfn) {
175
176 break;
177 }
178 tmp = pmem_ranges[j-1].start_pfn;
179 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
180 pmem_ranges[j].start_pfn = tmp;
181 tmp = pmem_ranges[j-1].pages;
182 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
183 pmem_ranges[j].pages = tmp;
184 }
185 }
186
187 #ifndef CONFIG_DISCONTIGMEM
188 /*
189 * Throw out ranges that are too far apart (controlled by
190 * MAX_GAP).
191 */
192
193 for (i = 1; i < npmem_ranges; i++) {
194 if (pmem_ranges[i].start_pfn -
195 (pmem_ranges[i-1].start_pfn +
196 pmem_ranges[i-1].pages) > MAX_GAP) {
197 npmem_ranges = i;
198 printk("Large gap in memory detected (%ld pages). "
199 "Consider turning on CONFIG_DISCONTIGMEM\n",
200 pmem_ranges[i].start_pfn -
201 (pmem_ranges[i-1].start_pfn +
202 pmem_ranges[i-1].pages));
203 break;
204 }
205 }
206 #endif
207
208 /* Print the memory ranges */
209 pr_info("Memory Ranges:\n");
210
211 for (i = 0; i < npmem_ranges; i++) {
212 struct resource *res = &sysram_resources[i];
213 unsigned long start;
214 unsigned long size;
215
216 size = (pmem_ranges[i].pages << PAGE_SHIFT);
217 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
218 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
219 i, start, start + (size - 1), size >> 20);
220
221 /* request memory resource */
222 res->name = "System RAM";
223 res->start = start;
224 res->end = start + size - 1;
225 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
226 request_resource(&iomem_resource, res);
227 }
228
229 sysram_resource_count = npmem_ranges;
230
231 /*
232 * For 32 bit kernels we limit the amount of memory we can
233 * support, in order to preserve enough kernel address space
234 * for other purposes. For 64 bit kernels we don't normally
235 * limit the memory, but this mechanism can be used to
236 * artificially limit the amount of memory (and it is written
237 * to work with multiple memory ranges).
238 */
239
240 mem_limit_func(); /* check for "mem=" argument */
241
242 mem_max = 0;
243 for (i = 0; i < npmem_ranges; i++) {
244 unsigned long rsize;
245
246 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
247 if ((mem_max + rsize) > mem_limit) {
248 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
249 if (mem_max == mem_limit)
250 npmem_ranges = i;
251 else {
252 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
253 - (mem_max >> PAGE_SHIFT);
254 npmem_ranges = i + 1;
255 mem_max = mem_limit;
256 }
257 break;
258 }
259 mem_max += rsize;
260 }
261
262 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
263
264 #ifndef CONFIG_DISCONTIGMEM
265 /* Merge the ranges, keeping track of the holes */
266
267 {
268 unsigned long end_pfn;
269 unsigned long hole_pages;
270
271 npmem_holes = 0;
272 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
273 for (i = 1; i < npmem_ranges; i++) {
274
275 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
276 if (hole_pages) {
277 pmem_holes[npmem_holes].start_pfn = end_pfn;
278 pmem_holes[npmem_holes++].pages = hole_pages;
279 end_pfn += hole_pages;
280 }
281 end_pfn += pmem_ranges[i].pages;
282 }
283
284 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
285 npmem_ranges = 1;
286 }
287 #endif
288
289 #ifdef CONFIG_DISCONTIGMEM
290 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
291 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
292 }
293 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
294
295 for (i = 0; i < npmem_ranges; i++) {
296 node_set_state(i, N_NORMAL_MEMORY);
297 node_set_online(i);
298 }
299 #endif
300
301 /*
302 * Initialize and free the full range of memory in each range.
303 */
304
305 max_pfn = 0;
306 for (i = 0; i < npmem_ranges; i++) {
307 unsigned long start_pfn;
308 unsigned long npages;
309 unsigned long start;
310 unsigned long size;
311
312 start_pfn = pmem_ranges[i].start_pfn;
313 npages = pmem_ranges[i].pages;
314
315 start = start_pfn << PAGE_SHIFT;
316 size = npages << PAGE_SHIFT;
317
318 /* add system RAM memblock */
319 memblock_add(start, size);
320
321 if ((start_pfn + npages) > max_pfn)
322 max_pfn = start_pfn + npages;
323 }
324
325 /* IOMMU is always used to access "high mem" on those boxes
326 * that can support enough mem that a PCI device couldn't
327 * directly DMA to any physical addresses.
328 * ISA DMA support will need to revisit this.
329 */
330 max_low_pfn = max_pfn;
331
332 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
333
334 #define PDC_CONSOLE_IO_IODC_SIZE 32768
335
336 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
337 PDC_CONSOLE_IO_IODC_SIZE));
338 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
339 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
340
341 #ifndef CONFIG_DISCONTIGMEM
342
343 /* reserve the holes */
344
345 for (i = 0; i < npmem_holes; i++) {
346 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
347 (pmem_holes[i].pages << PAGE_SHIFT));
348 }
349 #endif
350
351 #ifdef CONFIG_BLK_DEV_INITRD
352 if (initrd_start) {
353 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
354 if (__pa(initrd_start) < mem_max) {
355 unsigned long initrd_reserve;
356
357 if (__pa(initrd_end) > mem_max) {
358 initrd_reserve = mem_max - __pa(initrd_start);
359 } else {
360 initrd_reserve = initrd_end - initrd_start;
361 }
362 initrd_below_start_ok = 1;
363 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
364
365 memblock_reserve(__pa(initrd_start), initrd_reserve);
366 }
367 }
368 #endif
369
370 data_resource.start = virt_to_phys(&data_start);
371 data_resource.end = virt_to_phys(_end) - 1;
372 code_resource.start = virt_to_phys(_text);
373 code_resource.end = virt_to_phys(&data_start)-1;
374
375 /* We don't know which region the kernel will be in, so try
376 * all of them.
377 */
378 for (i = 0; i < sysram_resource_count; i++) {
379 struct resource *res = &sysram_resources[i];
380 request_resource(res, &code_resource);
381 request_resource(res, &data_resource);
382 }
383 request_resource(&sysram_resources[0], &pdcdata_resource);
384
385 /* Initialize Page Deallocation Table (PDT) and check for bad memory. */
386 pdc_pdt_init();
387 }
388
parisc_text_address(unsigned long vaddr)389 static int __init parisc_text_address(unsigned long vaddr)
390 {
391 static unsigned long head_ptr __initdata;
392
393 if (!head_ptr)
394 head_ptr = PAGE_MASK & (unsigned long)
395 dereference_function_descriptor(&parisc_kernel_start);
396
397 return core_kernel_text(vaddr) || vaddr == head_ptr;
398 }
399
map_pages(unsigned long start_vaddr,unsigned long start_paddr,unsigned long size,pgprot_t pgprot,int force)400 static void __init map_pages(unsigned long start_vaddr,
401 unsigned long start_paddr, unsigned long size,
402 pgprot_t pgprot, int force)
403 {
404 pgd_t *pg_dir;
405 pmd_t *pmd;
406 pte_t *pg_table;
407 unsigned long end_paddr;
408 unsigned long start_pmd;
409 unsigned long start_pte;
410 unsigned long tmp1;
411 unsigned long tmp2;
412 unsigned long address;
413 unsigned long vaddr;
414 unsigned long ro_start;
415 unsigned long ro_end;
416 unsigned long kernel_end;
417
418 ro_start = __pa((unsigned long)_text);
419 ro_end = __pa((unsigned long)&data_start);
420 kernel_end = __pa((unsigned long)&_end);
421
422 end_paddr = start_paddr + size;
423
424 pg_dir = pgd_offset_k(start_vaddr);
425
426 #if PTRS_PER_PMD == 1
427 start_pmd = 0;
428 #else
429 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
430 #endif
431 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
432
433 address = start_paddr;
434 vaddr = start_vaddr;
435 while (address < end_paddr) {
436 #if PTRS_PER_PMD == 1
437 pmd = (pmd_t *)__pa(pg_dir);
438 #else
439 pmd = (pmd_t *)pgd_address(*pg_dir);
440
441 /*
442 * pmd is physical at this point
443 */
444
445 if (!pmd) {
446 pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
447 pmd = (pmd_t *) __pa(pmd);
448 }
449
450 pgd_populate(NULL, pg_dir, __va(pmd));
451 #endif
452 pg_dir++;
453
454 /* now change pmd to kernel virtual addresses */
455
456 pmd = (pmd_t *)__va(pmd) + start_pmd;
457 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
458
459 /*
460 * pg_table is physical at this point
461 */
462
463 pg_table = (pte_t *)pmd_address(*pmd);
464 if (!pg_table) {
465 pg_table = (pte_t *) get_memblock(PAGE_SIZE);
466 pg_table = (pte_t *) __pa(pg_table);
467 }
468
469 pmd_populate_kernel(NULL, pmd, __va(pg_table));
470
471 /* now change pg_table to kernel virtual addresses */
472
473 pg_table = (pte_t *) __va(pg_table) + start_pte;
474 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
475 pte_t pte;
476
477 if (force)
478 pte = __mk_pte(address, pgprot);
479 else if (parisc_text_address(vaddr)) {
480 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
481 if (address >= ro_start && address < kernel_end)
482 pte = pte_mkhuge(pte);
483 }
484 else
485 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
486 if (address >= ro_start && address < ro_end) {
487 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
488 pte = pte_mkhuge(pte);
489 } else
490 #endif
491 {
492 pte = __mk_pte(address, pgprot);
493 if (address >= ro_start && address < kernel_end)
494 pte = pte_mkhuge(pte);
495 }
496
497 if (address >= end_paddr) {
498 if (force)
499 break;
500 else
501 pte_val(pte) = 0;
502 }
503
504 set_pte(pg_table, pte);
505
506 address += PAGE_SIZE;
507 vaddr += PAGE_SIZE;
508 }
509 start_pte = 0;
510
511 if (address >= end_paddr)
512 break;
513 }
514 start_pmd = 0;
515 }
516 }
517
free_initmem(void)518 void __ref free_initmem(void)
519 {
520 unsigned long init_begin = (unsigned long)__init_begin;
521 unsigned long init_end = (unsigned long)__init_end;
522
523 /* The init text pages are marked R-X. We have to
524 * flush the icache and mark them RW-
525 *
526 * This is tricky, because map_pages is in the init section.
527 * Do a dummy remap of the data section first (the data
528 * section is already PAGE_KERNEL) to pull in the TLB entries
529 * for map_kernel */
530 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
531 PAGE_KERNEL_RWX, 1);
532 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
533 * map_pages */
534 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
535 PAGE_KERNEL, 1);
536
537 /* force the kernel to see the new TLB entries */
538 __flush_tlb_range(0, init_begin, init_end);
539
540 /* finally dump all the instructions which were cached, since the
541 * pages are no-longer executable */
542 flush_icache_range(init_begin, init_end);
543
544 free_initmem_default(POISON_FREE_INITMEM);
545
546 /* set up a new led state on systems shipped LED State panel */
547 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
548 }
549
550
551 #ifdef CONFIG_STRICT_KERNEL_RWX
mark_rodata_ro(void)552 void mark_rodata_ro(void)
553 {
554 /* rodata memory was already mapped with KERNEL_RO access rights by
555 pagetable_init() and map_pages(). No need to do additional stuff here */
556 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
557 (unsigned long)(__end_rodata - __start_rodata) >> 10);
558 }
559 #endif
560
561
562 /*
563 * Just an arbitrary offset to serve as a "hole" between mapping areas
564 * (between top of physical memory and a potential pcxl dma mapping
565 * area, and below the vmalloc mapping area).
566 *
567 * The current 32K value just means that there will be a 32K "hole"
568 * between mapping areas. That means that any out-of-bounds memory
569 * accesses will hopefully be caught. The vmalloc() routines leaves
570 * a hole of 4kB between each vmalloced area for the same reason.
571 */
572
573 /* Leave room for gateway page expansion */
574 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
575 #error KERNEL_MAP_START is in gateway reserved region
576 #endif
577 #define MAP_START (KERNEL_MAP_START)
578
579 #define VM_MAP_OFFSET (32*1024)
580 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
581 & ~(VM_MAP_OFFSET-1)))
582
583 void *parisc_vmalloc_start __read_mostly;
584 EXPORT_SYMBOL(parisc_vmalloc_start);
585
586 #ifdef CONFIG_PA11
587 unsigned long pcxl_dma_start __read_mostly;
588 #endif
589
mem_init(void)590 void __init mem_init(void)
591 {
592 /* Do sanity checks on IPC (compat) structures */
593 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
594 #ifndef CONFIG_64BIT
595 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
596 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
597 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
598 #endif
599 #ifdef CONFIG_COMPAT
600 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
601 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
602 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
603 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
604 #endif
605
606 /* Do sanity checks on page table constants */
607 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
608 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
609 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
610 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
611 > BITS_PER_LONG);
612
613 high_memory = __va((max_pfn << PAGE_SHIFT));
614 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
615 free_all_bootmem();
616
617 #ifdef CONFIG_PA11
618 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
619 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
620 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
621 + PCXL_DMA_MAP_SIZE);
622 } else
623 #endif
624 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
625
626 mem_init_print_info(NULL);
627
628 #if 0
629 /*
630 * Do not expose the virtual kernel memory layout to userspace.
631 * But keep code for debugging purposes.
632 */
633 printk("virtual kernel memory layout:\n"
634 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
635 " memory : 0x%px - 0x%px (%4ld MB)\n"
636 " .init : 0x%px - 0x%px (%4ld kB)\n"
637 " .data : 0x%px - 0x%px (%4ld kB)\n"
638 " .text : 0x%px - 0x%px (%4ld kB)\n",
639
640 (void*)VMALLOC_START, (void*)VMALLOC_END,
641 (VMALLOC_END - VMALLOC_START) >> 20,
642
643 __va(0), high_memory,
644 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
645
646 __init_begin, __init_end,
647 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
648
649 _etext, _edata,
650 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
651
652 _text, _etext,
653 ((unsigned long)_etext - (unsigned long)_text) >> 10);
654 #endif
655 }
656
657 unsigned long *empty_zero_page __read_mostly;
658 EXPORT_SYMBOL(empty_zero_page);
659
660 /*
661 * pagetable_init() sets up the page tables
662 *
663 * Note that gateway_init() places the Linux gateway page at page 0.
664 * Since gateway pages cannot be dereferenced this has the desirable
665 * side effect of trapping those pesky NULL-reference errors in the
666 * kernel.
667 */
pagetable_init(void)668 static void __init pagetable_init(void)
669 {
670 int range;
671
672 /* Map each physical memory range to its kernel vaddr */
673
674 for (range = 0; range < npmem_ranges; range++) {
675 unsigned long start_paddr;
676 unsigned long end_paddr;
677 unsigned long size;
678
679 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
680 size = pmem_ranges[range].pages << PAGE_SHIFT;
681 end_paddr = start_paddr + size;
682
683 map_pages((unsigned long)__va(start_paddr), start_paddr,
684 size, PAGE_KERNEL, 0);
685 }
686
687 #ifdef CONFIG_BLK_DEV_INITRD
688 if (initrd_end && initrd_end > mem_limit) {
689 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
690 map_pages(initrd_start, __pa(initrd_start),
691 initrd_end - initrd_start, PAGE_KERNEL, 0);
692 }
693 #endif
694
695 empty_zero_page = get_memblock(PAGE_SIZE);
696 }
697
gateway_init(void)698 static void __init gateway_init(void)
699 {
700 unsigned long linux_gateway_page_addr;
701 /* FIXME: This is 'const' in order to trick the compiler
702 into not treating it as DP-relative data. */
703 extern void * const linux_gateway_page;
704
705 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
706
707 /*
708 * Setup Linux Gateway page.
709 *
710 * The Linux gateway page will reside in kernel space (on virtual
711 * page 0), so it doesn't need to be aliased into user space.
712 */
713
714 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
715 PAGE_SIZE, PAGE_GATEWAY, 1);
716 }
717
paging_init(void)718 void __init paging_init(void)
719 {
720 int i;
721
722 setup_bootmem();
723 pagetable_init();
724 gateway_init();
725 flush_cache_all_local(); /* start with known state */
726 flush_tlb_all_local(NULL);
727
728 for (i = 0; i < npmem_ranges; i++) {
729 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
730
731 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
732
733 #ifdef CONFIG_DISCONTIGMEM
734 /* Need to initialize the pfnnid_map before we can initialize
735 the zone */
736 {
737 int j;
738 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
739 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
740 j++) {
741 pfnnid_map[j] = i;
742 }
743 }
744 #endif
745
746 free_area_init_node(i, zones_size,
747 pmem_ranges[i].start_pfn, NULL);
748 }
749 }
750
751 #ifdef CONFIG_PA20
752
753 /*
754 * Currently, all PA20 chips have 18 bit protection IDs, which is the
755 * limiting factor (space ids are 32 bits).
756 */
757
758 #define NR_SPACE_IDS 262144
759
760 #else
761
762 /*
763 * Currently we have a one-to-one relationship between space IDs and
764 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
765 * support 15 bit protection IDs, so that is the limiting factor.
766 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
767 * probably not worth the effort for a special case here.
768 */
769
770 #define NR_SPACE_IDS 32768
771
772 #endif /* !CONFIG_PA20 */
773
774 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
775 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
776
777 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
778 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
779 static unsigned long space_id_index;
780 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
781 static unsigned long dirty_space_ids = 0;
782
783 static DEFINE_SPINLOCK(sid_lock);
784
alloc_sid(void)785 unsigned long alloc_sid(void)
786 {
787 unsigned long index;
788
789 spin_lock(&sid_lock);
790
791 if (free_space_ids == 0) {
792 if (dirty_space_ids != 0) {
793 spin_unlock(&sid_lock);
794 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
795 spin_lock(&sid_lock);
796 }
797 BUG_ON(free_space_ids == 0);
798 }
799
800 free_space_ids--;
801
802 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
803 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
804 space_id_index = index;
805
806 spin_unlock(&sid_lock);
807
808 return index << SPACEID_SHIFT;
809 }
810
free_sid(unsigned long spaceid)811 void free_sid(unsigned long spaceid)
812 {
813 unsigned long index = spaceid >> SPACEID_SHIFT;
814 unsigned long *dirty_space_offset;
815
816 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
817 index &= (BITS_PER_LONG - 1);
818
819 spin_lock(&sid_lock);
820
821 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
822
823 *dirty_space_offset |= (1L << index);
824 dirty_space_ids++;
825
826 spin_unlock(&sid_lock);
827 }
828
829
830 #ifdef CONFIG_SMP
get_dirty_sids(unsigned long * ndirtyptr,unsigned long * dirty_array)831 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
832 {
833 int i;
834
835 /* NOTE: sid_lock must be held upon entry */
836
837 *ndirtyptr = dirty_space_ids;
838 if (dirty_space_ids != 0) {
839 for (i = 0; i < SID_ARRAY_SIZE; i++) {
840 dirty_array[i] = dirty_space_id[i];
841 dirty_space_id[i] = 0;
842 }
843 dirty_space_ids = 0;
844 }
845
846 return;
847 }
848
recycle_sids(unsigned long ndirty,unsigned long * dirty_array)849 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
850 {
851 int i;
852
853 /* NOTE: sid_lock must be held upon entry */
854
855 if (ndirty != 0) {
856 for (i = 0; i < SID_ARRAY_SIZE; i++) {
857 space_id[i] ^= dirty_array[i];
858 }
859
860 free_space_ids += ndirty;
861 space_id_index = 0;
862 }
863 }
864
865 #else /* CONFIG_SMP */
866
recycle_sids(void)867 static void recycle_sids(void)
868 {
869 int i;
870
871 /* NOTE: sid_lock must be held upon entry */
872
873 if (dirty_space_ids != 0) {
874 for (i = 0; i < SID_ARRAY_SIZE; i++) {
875 space_id[i] ^= dirty_space_id[i];
876 dirty_space_id[i] = 0;
877 }
878
879 free_space_ids += dirty_space_ids;
880 dirty_space_ids = 0;
881 space_id_index = 0;
882 }
883 }
884 #endif
885
886 /*
887 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
888 * purged, we can safely reuse the space ids that were released but
889 * not flushed from the tlb.
890 */
891
892 #ifdef CONFIG_SMP
893
894 static unsigned long recycle_ndirty;
895 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
896 static unsigned int recycle_inuse;
897
flush_tlb_all(void)898 void flush_tlb_all(void)
899 {
900 int do_recycle;
901
902 __inc_irq_stat(irq_tlb_count);
903 do_recycle = 0;
904 spin_lock(&sid_lock);
905 if (dirty_space_ids > RECYCLE_THRESHOLD) {
906 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
907 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
908 recycle_inuse++;
909 do_recycle++;
910 }
911 spin_unlock(&sid_lock);
912 on_each_cpu(flush_tlb_all_local, NULL, 1);
913 if (do_recycle) {
914 spin_lock(&sid_lock);
915 recycle_sids(recycle_ndirty,recycle_dirty_array);
916 recycle_inuse = 0;
917 spin_unlock(&sid_lock);
918 }
919 }
920 #else
flush_tlb_all(void)921 void flush_tlb_all(void)
922 {
923 __inc_irq_stat(irq_tlb_count);
924 spin_lock(&sid_lock);
925 flush_tlb_all_local(NULL);
926 recycle_sids();
927 spin_unlock(&sid_lock);
928 }
929 #endif
930
931 #ifdef CONFIG_BLK_DEV_INITRD
free_initrd_mem(unsigned long start,unsigned long end)932 void free_initrd_mem(unsigned long start, unsigned long end)
933 {
934 free_reserved_area((void *)start, (void *)end, -1, "initrd");
935 }
936 #endif
937