1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch/sparc64/mm/init.c
4 *
5 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9 #include <linux/extable.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/init.h>
14 #include <linux/memblock.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
21 #include <linux/fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26 #include <linux/ioport.h>
27 #include <linux/percpu.h>
28 #include <linux/mmzone.h>
29 #include <linux/gfp.h>
30
31 #include <asm/head.h>
32 #include <asm/page.h>
33 #include <asm/pgalloc.h>
34 #include <asm/oplib.h>
35 #include <asm/iommu.h>
36 #include <asm/io.h>
37 #include <linux/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/tlbflush.h>
40 #include <asm/dma.h>
41 #include <asm/starfire.h>
42 #include <asm/tlb.h>
43 #include <asm/spitfire.h>
44 #include <asm/sections.h>
45 #include <asm/tsb.h>
46 #include <asm/hypervisor.h>
47 #include <asm/prom.h>
48 #include <asm/mdesc.h>
49 #include <asm/cpudata.h>
50 #include <asm/setup.h>
51 #include <asm/irq.h>
52
53 #include "init_64.h"
54
55 unsigned long kern_linear_pte_xor[4] __read_mostly;
56 static unsigned long page_cache4v_flag;
57
58 /* A bitmap, two bits for every 256MB of physical memory. These two
59 * bits determine what page size we use for kernel linear
60 * translations. They form an index into kern_linear_pte_xor[]. The
61 * value in the indexed slot is XOR'd with the TLB miss virtual
62 * address to form the resulting TTE. The mapping is:
63 *
64 * 0 ==> 4MB
65 * 1 ==> 256MB
66 * 2 ==> 2GB
67 * 3 ==> 16GB
68 *
69 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
70 * support 2GB pages, and hopefully future cpus will support the 16GB
71 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
72 * if these larger page sizes are not supported by the cpu.
73 *
74 * It would be nice to determine this from the machine description
75 * 'cpu' properties, but we need to have this table setup before the
76 * MDESC is initialized.
77 */
78
79 #ifndef CONFIG_DEBUG_PAGEALLOC
80 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
81 * Space is allocated for this right after the trap table in
82 * arch/sparc64/kernel/head.S
83 */
84 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
85 #endif
86 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
87
88 static unsigned long cpu_pgsz_mask;
89
90 #define MAX_BANKS 1024
91
92 static struct linux_prom64_registers pavail[MAX_BANKS];
93 static int pavail_ents;
94
95 u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
96
cmp_p64(const void * a,const void * b)97 static int cmp_p64(const void *a, const void *b)
98 {
99 const struct linux_prom64_registers *x = a, *y = b;
100
101 if (x->phys_addr > y->phys_addr)
102 return 1;
103 if (x->phys_addr < y->phys_addr)
104 return -1;
105 return 0;
106 }
107
read_obp_memory(const char * property,struct linux_prom64_registers * regs,int * num_ents)108 static void __init read_obp_memory(const char *property,
109 struct linux_prom64_registers *regs,
110 int *num_ents)
111 {
112 phandle node = prom_finddevice("/memory");
113 int prop_size = prom_getproplen(node, property);
114 int ents, ret, i;
115
116 ents = prop_size / sizeof(struct linux_prom64_registers);
117 if (ents > MAX_BANKS) {
118 prom_printf("The machine has more %s property entries than "
119 "this kernel can support (%d).\n",
120 property, MAX_BANKS);
121 prom_halt();
122 }
123
124 ret = prom_getproperty(node, property, (char *) regs, prop_size);
125 if (ret == -1) {
126 prom_printf("Couldn't get %s property from /memory.\n",
127 property);
128 prom_halt();
129 }
130
131 /* Sanitize what we got from the firmware, by page aligning
132 * everything.
133 */
134 for (i = 0; i < ents; i++) {
135 unsigned long base, size;
136
137 base = regs[i].phys_addr;
138 size = regs[i].reg_size;
139
140 size &= PAGE_MASK;
141 if (base & ~PAGE_MASK) {
142 unsigned long new_base = PAGE_ALIGN(base);
143
144 size -= new_base - base;
145 if ((long) size < 0L)
146 size = 0UL;
147 base = new_base;
148 }
149 if (size == 0UL) {
150 /* If it is empty, simply get rid of it.
151 * This simplifies the logic of the other
152 * functions that process these arrays.
153 */
154 memmove(®s[i], ®s[i + 1],
155 (ents - i - 1) * sizeof(regs[0]));
156 i--;
157 ents--;
158 continue;
159 }
160 regs[i].phys_addr = base;
161 regs[i].reg_size = size;
162 }
163
164 *num_ents = ents;
165
166 sort(regs, ents, sizeof(struct linux_prom64_registers),
167 cmp_p64, NULL);
168 }
169
170 /* Kernel physical address base and size in bytes. */
171 unsigned long kern_base __read_mostly;
172 unsigned long kern_size __read_mostly;
173
174 /* Initial ramdisk setup */
175 extern unsigned long sparc_ramdisk_image64;
176 extern unsigned int sparc_ramdisk_image;
177 extern unsigned int sparc_ramdisk_size;
178
179 struct page *mem_map_zero __read_mostly;
180 EXPORT_SYMBOL(mem_map_zero);
181
182 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
183
184 unsigned long sparc64_kern_pri_context __read_mostly;
185 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
186 unsigned long sparc64_kern_sec_context __read_mostly;
187
188 int num_kernel_image_mappings;
189
190 #ifdef CONFIG_DEBUG_DCFLUSH
191 atomic_t dcpage_flushes = ATOMIC_INIT(0);
192 #ifdef CONFIG_SMP
193 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
194 #endif
195 #endif
196
flush_dcache_page_impl(struct page * page)197 inline void flush_dcache_page_impl(struct page *page)
198 {
199 BUG_ON(tlb_type == hypervisor);
200 #ifdef CONFIG_DEBUG_DCFLUSH
201 atomic_inc(&dcpage_flushes);
202 #endif
203
204 #ifdef DCACHE_ALIASING_POSSIBLE
205 __flush_dcache_page(page_address(page),
206 ((tlb_type == spitfire) &&
207 page_mapping_file(page) != NULL));
208 #else
209 if (page_mapping_file(page) != NULL &&
210 tlb_type == spitfire)
211 __flush_icache_page(__pa(page_address(page)));
212 #endif
213 }
214
215 #define PG_dcache_dirty PG_arch_1
216 #define PG_dcache_cpu_shift 32UL
217 #define PG_dcache_cpu_mask \
218 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
219
220 #define dcache_dirty_cpu(page) \
221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
222
set_dcache_dirty(struct page * page,int this_cpu)223 static inline void set_dcache_dirty(struct page *page, int this_cpu)
224 {
225 unsigned long mask = this_cpu;
226 unsigned long non_cpu_bits;
227
228 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
229 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
230
231 __asm__ __volatile__("1:\n\t"
232 "ldx [%2], %%g7\n\t"
233 "and %%g7, %1, %%g1\n\t"
234 "or %%g1, %0, %%g1\n\t"
235 "casx [%2], %%g7, %%g1\n\t"
236 "cmp %%g7, %%g1\n\t"
237 "bne,pn %%xcc, 1b\n\t"
238 " nop"
239 : /* no outputs */
240 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
241 : "g1", "g7");
242 }
243
clear_dcache_dirty_cpu(struct page * page,unsigned long cpu)244 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
245 {
246 unsigned long mask = (1UL << PG_dcache_dirty);
247
248 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
249 "1:\n\t"
250 "ldx [%2], %%g7\n\t"
251 "srlx %%g7, %4, %%g1\n\t"
252 "and %%g1, %3, %%g1\n\t"
253 "cmp %%g1, %0\n\t"
254 "bne,pn %%icc, 2f\n\t"
255 " andn %%g7, %1, %%g1\n\t"
256 "casx [%2], %%g7, %%g1\n\t"
257 "cmp %%g7, %%g1\n\t"
258 "bne,pn %%xcc, 1b\n\t"
259 " nop\n"
260 "2:"
261 : /* no outputs */
262 : "r" (cpu), "r" (mask), "r" (&page->flags),
263 "i" (PG_dcache_cpu_mask),
264 "i" (PG_dcache_cpu_shift)
265 : "g1", "g7");
266 }
267
tsb_insert(struct tsb * ent,unsigned long tag,unsigned long pte)268 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
269 {
270 unsigned long tsb_addr = (unsigned long) ent;
271
272 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
273 tsb_addr = __pa(tsb_addr);
274
275 __tsb_insert(tsb_addr, tag, pte);
276 }
277
278 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
279
flush_dcache(unsigned long pfn)280 static void flush_dcache(unsigned long pfn)
281 {
282 struct page *page;
283
284 page = pfn_to_page(pfn);
285 if (page) {
286 unsigned long pg_flags;
287
288 pg_flags = page->flags;
289 if (pg_flags & (1UL << PG_dcache_dirty)) {
290 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
291 PG_dcache_cpu_mask);
292 int this_cpu = get_cpu();
293
294 /* This is just to optimize away some function calls
295 * in the SMP case.
296 */
297 if (cpu == this_cpu)
298 flush_dcache_page_impl(page);
299 else
300 smp_flush_dcache_page_impl(page, cpu);
301
302 clear_dcache_dirty_cpu(page, cpu);
303
304 put_cpu();
305 }
306 }
307 }
308
309 /* mm->context.lock must be held */
__update_mmu_tsb_insert(struct mm_struct * mm,unsigned long tsb_index,unsigned long tsb_hash_shift,unsigned long address,unsigned long tte)310 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
311 unsigned long tsb_hash_shift, unsigned long address,
312 unsigned long tte)
313 {
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
315 unsigned long tag;
316
317 if (unlikely(!tsb))
318 return;
319
320 tsb += ((address >> tsb_hash_shift) &
321 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
322 tag = (address >> 22UL);
323 tsb_insert(tsb, tag, tte);
324 }
325
326 #ifdef CONFIG_HUGETLB_PAGE
hugetlbpage_init(void)327 static int __init hugetlbpage_init(void)
328 {
329 hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT);
330 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
331 hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT);
332 hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT);
333
334 return 0;
335 }
336
337 arch_initcall(hugetlbpage_init);
338
pud_huge_patch(void)339 static void __init pud_huge_patch(void)
340 {
341 struct pud_huge_patch_entry *p;
342 unsigned long addr;
343
344 p = &__pud_huge_patch;
345 addr = p->addr;
346 *(unsigned int *)addr = p->insn;
347
348 __asm__ __volatile__("flush %0" : : "r" (addr));
349 }
350
arch_hugetlb_valid_size(unsigned long size)351 bool __init arch_hugetlb_valid_size(unsigned long size)
352 {
353 unsigned int hugepage_shift = ilog2(size);
354 unsigned short hv_pgsz_idx;
355 unsigned int hv_pgsz_mask;
356
357 switch (hugepage_shift) {
358 case HPAGE_16GB_SHIFT:
359 hv_pgsz_mask = HV_PGSZ_MASK_16GB;
360 hv_pgsz_idx = HV_PGSZ_IDX_16GB;
361 pud_huge_patch();
362 break;
363 case HPAGE_2GB_SHIFT:
364 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
365 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
366 break;
367 case HPAGE_256MB_SHIFT:
368 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
369 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
370 break;
371 case HPAGE_SHIFT:
372 hv_pgsz_mask = HV_PGSZ_MASK_4MB;
373 hv_pgsz_idx = HV_PGSZ_IDX_4MB;
374 break;
375 case HPAGE_64K_SHIFT:
376 hv_pgsz_mask = HV_PGSZ_MASK_64K;
377 hv_pgsz_idx = HV_PGSZ_IDX_64K;
378 break;
379 default:
380 hv_pgsz_mask = 0;
381 }
382
383 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U)
384 return false;
385
386 return true;
387 }
388 #endif /* CONFIG_HUGETLB_PAGE */
389
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)390 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
391 {
392 struct mm_struct *mm;
393 unsigned long flags;
394 bool is_huge_tsb;
395 pte_t pte = *ptep;
396
397 if (tlb_type != hypervisor) {
398 unsigned long pfn = pte_pfn(pte);
399
400 if (pfn_valid(pfn))
401 flush_dcache(pfn);
402 }
403
404 mm = vma->vm_mm;
405
406 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
407 if (!pte_accessible(mm, pte))
408 return;
409
410 spin_lock_irqsave(&mm->context.lock, flags);
411
412 is_huge_tsb = false;
413 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
414 if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
415 unsigned long hugepage_size = PAGE_SIZE;
416
417 if (is_vm_hugetlb_page(vma))
418 hugepage_size = huge_page_size(hstate_vma(vma));
419
420 if (hugepage_size >= PUD_SIZE) {
421 unsigned long mask = 0x1ffc00000UL;
422
423 /* Transfer bits [32:22] from address to resolve
424 * at 4M granularity.
425 */
426 pte_val(pte) &= ~mask;
427 pte_val(pte) |= (address & mask);
428 } else if (hugepage_size >= PMD_SIZE) {
429 /* We are fabricating 8MB pages using 4MB
430 * real hw pages.
431 */
432 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
433 }
434
435 if (hugepage_size >= PMD_SIZE) {
436 __update_mmu_tsb_insert(mm, MM_TSB_HUGE,
437 REAL_HPAGE_SHIFT, address, pte_val(pte));
438 is_huge_tsb = true;
439 }
440 }
441 #endif
442 if (!is_huge_tsb)
443 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
444 address, pte_val(pte));
445
446 spin_unlock_irqrestore(&mm->context.lock, flags);
447 }
448
flush_dcache_page(struct page * page)449 void flush_dcache_page(struct page *page)
450 {
451 struct address_space *mapping;
452 int this_cpu;
453
454 if (tlb_type == hypervisor)
455 return;
456
457 /* Do not bother with the expensive D-cache flush if it
458 * is merely the zero page. The 'bigcore' testcase in GDB
459 * causes this case to run millions of times.
460 */
461 if (page == ZERO_PAGE(0))
462 return;
463
464 this_cpu = get_cpu();
465
466 mapping = page_mapping_file(page);
467 if (mapping && !mapping_mapped(mapping)) {
468 int dirty = test_bit(PG_dcache_dirty, &page->flags);
469 if (dirty) {
470 int dirty_cpu = dcache_dirty_cpu(page);
471
472 if (dirty_cpu == this_cpu)
473 goto out;
474 smp_flush_dcache_page_impl(page, dirty_cpu);
475 }
476 set_dcache_dirty(page, this_cpu);
477 } else {
478 /* We could delay the flush for the !page_mapping
479 * case too. But that case is for exec env/arg
480 * pages and those are %99 certainly going to get
481 * faulted into the tlb (and thus flushed) anyways.
482 */
483 flush_dcache_page_impl(page);
484 }
485
486 out:
487 put_cpu();
488 }
489 EXPORT_SYMBOL(flush_dcache_page);
490
flush_icache_range(unsigned long start,unsigned long end)491 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
492 {
493 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
494 if (tlb_type == spitfire) {
495 unsigned long kaddr;
496
497 /* This code only runs on Spitfire cpus so this is
498 * why we can assume _PAGE_PADDR_4U.
499 */
500 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
501 unsigned long paddr, mask = _PAGE_PADDR_4U;
502
503 if (kaddr >= PAGE_OFFSET)
504 paddr = kaddr & mask;
505 else {
506 pte_t *ptep = virt_to_kpte(kaddr);
507
508 paddr = pte_val(*ptep) & mask;
509 }
510 __flush_icache_page(paddr);
511 }
512 }
513 }
514 EXPORT_SYMBOL(flush_icache_range);
515
mmu_info(struct seq_file * m)516 void mmu_info(struct seq_file *m)
517 {
518 static const char *pgsz_strings[] = {
519 "8K", "64K", "512K", "4MB", "32MB",
520 "256MB", "2GB", "16GB",
521 };
522 int i, printed;
523
524 if (tlb_type == cheetah)
525 seq_printf(m, "MMU Type\t: Cheetah\n");
526 else if (tlb_type == cheetah_plus)
527 seq_printf(m, "MMU Type\t: Cheetah+\n");
528 else if (tlb_type == spitfire)
529 seq_printf(m, "MMU Type\t: Spitfire\n");
530 else if (tlb_type == hypervisor)
531 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
532 else
533 seq_printf(m, "MMU Type\t: ???\n");
534
535 seq_printf(m, "MMU PGSZs\t: ");
536 printed = 0;
537 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
538 if (cpu_pgsz_mask & (1UL << i)) {
539 seq_printf(m, "%s%s",
540 printed ? "," : "", pgsz_strings[i]);
541 printed++;
542 }
543 }
544 seq_putc(m, '\n');
545
546 #ifdef CONFIG_DEBUG_DCFLUSH
547 seq_printf(m, "DCPageFlushes\t: %d\n",
548 atomic_read(&dcpage_flushes));
549 #ifdef CONFIG_SMP
550 seq_printf(m, "DCPageFlushesXC\t: %d\n",
551 atomic_read(&dcpage_flushes_xcall));
552 #endif /* CONFIG_SMP */
553 #endif /* CONFIG_DEBUG_DCFLUSH */
554 }
555
556 struct linux_prom_translation prom_trans[512] __read_mostly;
557 unsigned int prom_trans_ents __read_mostly;
558
559 unsigned long kern_locked_tte_data;
560
561 /* The obp translations are saved based on 8k pagesize, since obp can
562 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
563 * HI_OBP_ADDRESS range are handled in ktlb.S.
564 */
in_obp_range(unsigned long vaddr)565 static inline int in_obp_range(unsigned long vaddr)
566 {
567 return (vaddr >= LOW_OBP_ADDRESS &&
568 vaddr < HI_OBP_ADDRESS);
569 }
570
cmp_ptrans(const void * a,const void * b)571 static int cmp_ptrans(const void *a, const void *b)
572 {
573 const struct linux_prom_translation *x = a, *y = b;
574
575 if (x->virt > y->virt)
576 return 1;
577 if (x->virt < y->virt)
578 return -1;
579 return 0;
580 }
581
582 /* Read OBP translations property into 'prom_trans[]'. */
read_obp_translations(void)583 static void __init read_obp_translations(void)
584 {
585 int n, node, ents, first, last, i;
586
587 node = prom_finddevice("/virtual-memory");
588 n = prom_getproplen(node, "translations");
589 if (unlikely(n == 0 || n == -1)) {
590 prom_printf("prom_mappings: Couldn't get size.\n");
591 prom_halt();
592 }
593 if (unlikely(n > sizeof(prom_trans))) {
594 prom_printf("prom_mappings: Size %d is too big.\n", n);
595 prom_halt();
596 }
597
598 if ((n = prom_getproperty(node, "translations",
599 (char *)&prom_trans[0],
600 sizeof(prom_trans))) == -1) {
601 prom_printf("prom_mappings: Couldn't get property.\n");
602 prom_halt();
603 }
604
605 n = n / sizeof(struct linux_prom_translation);
606
607 ents = n;
608
609 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
610 cmp_ptrans, NULL);
611
612 /* Now kick out all the non-OBP entries. */
613 for (i = 0; i < ents; i++) {
614 if (in_obp_range(prom_trans[i].virt))
615 break;
616 }
617 first = i;
618 for (; i < ents; i++) {
619 if (!in_obp_range(prom_trans[i].virt))
620 break;
621 }
622 last = i;
623
624 for (i = 0; i < (last - first); i++) {
625 struct linux_prom_translation *src = &prom_trans[i + first];
626 struct linux_prom_translation *dest = &prom_trans[i];
627
628 *dest = *src;
629 }
630 for (; i < ents; i++) {
631 struct linux_prom_translation *dest = &prom_trans[i];
632 dest->virt = dest->size = dest->data = 0x0UL;
633 }
634
635 prom_trans_ents = last - first;
636
637 if (tlb_type == spitfire) {
638 /* Clear diag TTE bits. */
639 for (i = 0; i < prom_trans_ents; i++)
640 prom_trans[i].data &= ~0x0003fe0000000000UL;
641 }
642
643 /* Force execute bit on. */
644 for (i = 0; i < prom_trans_ents; i++)
645 prom_trans[i].data |= (tlb_type == hypervisor ?
646 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
647 }
648
hypervisor_tlb_lock(unsigned long vaddr,unsigned long pte,unsigned long mmu)649 static void __init hypervisor_tlb_lock(unsigned long vaddr,
650 unsigned long pte,
651 unsigned long mmu)
652 {
653 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
654
655 if (ret != 0) {
656 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
657 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
658 prom_halt();
659 }
660 }
661
662 static unsigned long kern_large_tte(unsigned long paddr);
663
remap_kernel(void)664 static void __init remap_kernel(void)
665 {
666 unsigned long phys_page, tte_vaddr, tte_data;
667 int i, tlb_ent = sparc64_highest_locked_tlbent();
668
669 tte_vaddr = (unsigned long) KERNBASE;
670 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
671 tte_data = kern_large_tte(phys_page);
672
673 kern_locked_tte_data = tte_data;
674
675 /* Now lock us into the TLBs via Hypervisor or OBP. */
676 if (tlb_type == hypervisor) {
677 for (i = 0; i < num_kernel_image_mappings; i++) {
678 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
679 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
680 tte_vaddr += 0x400000;
681 tte_data += 0x400000;
682 }
683 } else {
684 for (i = 0; i < num_kernel_image_mappings; i++) {
685 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
686 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
687 tte_vaddr += 0x400000;
688 tte_data += 0x400000;
689 }
690 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
691 }
692 if (tlb_type == cheetah_plus) {
693 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
694 CTX_CHEETAH_PLUS_NUC);
695 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
696 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
697 }
698 }
699
700
inherit_prom_mappings(void)701 static void __init inherit_prom_mappings(void)
702 {
703 /* Now fixup OBP's idea about where we really are mapped. */
704 printk("Remapping the kernel... ");
705 remap_kernel();
706 printk("done.\n");
707 }
708
prom_world(int enter)709 void prom_world(int enter)
710 {
711 if (!enter)
712 set_fs(get_fs());
713
714 __asm__ __volatile__("flushw");
715 }
716
__flush_dcache_range(unsigned long start,unsigned long end)717 void __flush_dcache_range(unsigned long start, unsigned long end)
718 {
719 unsigned long va;
720
721 if (tlb_type == spitfire) {
722 int n = 0;
723
724 for (va = start; va < end; va += 32) {
725 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
726 if (++n >= 512)
727 break;
728 }
729 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
730 start = __pa(start);
731 end = __pa(end);
732 for (va = start; va < end; va += 32)
733 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
734 "membar #Sync"
735 : /* no outputs */
736 : "r" (va),
737 "i" (ASI_DCACHE_INVALIDATE));
738 }
739 }
740 EXPORT_SYMBOL(__flush_dcache_range);
741
742 /* get_new_mmu_context() uses "cache + 1". */
743 DEFINE_SPINLOCK(ctx_alloc_lock);
744 unsigned long tlb_context_cache = CTX_FIRST_VERSION;
745 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
746 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
747 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
748 DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
749
mmu_context_wrap(void)750 static void mmu_context_wrap(void)
751 {
752 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
753 unsigned long new_ver, new_ctx, old_ctx;
754 struct mm_struct *mm;
755 int cpu;
756
757 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
758
759 /* Reserve kernel context */
760 set_bit(0, mmu_context_bmap);
761
762 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
763 if (unlikely(new_ver == 0))
764 new_ver = CTX_FIRST_VERSION;
765 tlb_context_cache = new_ver;
766
767 /*
768 * Make sure that any new mm that are added into per_cpu_secondary_mm,
769 * are going to go through get_new_mmu_context() path.
770 */
771 mb();
772
773 /*
774 * Updated versions to current on those CPUs that had valid secondary
775 * contexts
776 */
777 for_each_online_cpu(cpu) {
778 /*
779 * If a new mm is stored after we took this mm from the array,
780 * it will go into get_new_mmu_context() path, because we
781 * already bumped the version in tlb_context_cache.
782 */
783 mm = per_cpu(per_cpu_secondary_mm, cpu);
784
785 if (unlikely(!mm || mm == &init_mm))
786 continue;
787
788 old_ctx = mm->context.sparc64_ctx_val;
789 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
790 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
791 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
792 mm->context.sparc64_ctx_val = new_ctx;
793 }
794 }
795 }
796
797 /* Caller does TLB context flushing on local CPU if necessary.
798 * The caller also ensures that CTX_VALID(mm->context) is false.
799 *
800 * We must be careful about boundary cases so that we never
801 * let the user have CTX 0 (nucleus) or we ever use a CTX
802 * version of zero (and thus NO_CONTEXT would not be caught
803 * by version mis-match tests in mmu_context.h).
804 *
805 * Always invoked with interrupts disabled.
806 */
get_new_mmu_context(struct mm_struct * mm)807 void get_new_mmu_context(struct mm_struct *mm)
808 {
809 unsigned long ctx, new_ctx;
810 unsigned long orig_pgsz_bits;
811
812 spin_lock(&ctx_alloc_lock);
813 retry:
814 /* wrap might have happened, test again if our context became valid */
815 if (unlikely(CTX_VALID(mm->context)))
816 goto out;
817 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
818 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
819 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
820 if (new_ctx >= (1 << CTX_NR_BITS)) {
821 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
822 if (new_ctx >= ctx) {
823 mmu_context_wrap();
824 goto retry;
825 }
826 }
827 if (mm->context.sparc64_ctx_val)
828 cpumask_clear(mm_cpumask(mm));
829 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
830 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
831 tlb_context_cache = new_ctx;
832 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
833 out:
834 spin_unlock(&ctx_alloc_lock);
835 }
836
837 static int numa_enabled = 1;
838 static int numa_debug;
839
early_numa(char * p)840 static int __init early_numa(char *p)
841 {
842 if (!p)
843 return 0;
844
845 if (strstr(p, "off"))
846 numa_enabled = 0;
847
848 if (strstr(p, "debug"))
849 numa_debug = 1;
850
851 return 0;
852 }
853 early_param("numa", early_numa);
854
855 #define numadbg(f, a...) \
856 do { if (numa_debug) \
857 printk(KERN_INFO f, ## a); \
858 } while (0)
859
find_ramdisk(unsigned long phys_base)860 static void __init find_ramdisk(unsigned long phys_base)
861 {
862 #ifdef CONFIG_BLK_DEV_INITRD
863 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
864 unsigned long ramdisk_image;
865
866 /* Older versions of the bootloader only supported a
867 * 32-bit physical address for the ramdisk image
868 * location, stored at sparc_ramdisk_image. Newer
869 * SILO versions set sparc_ramdisk_image to zero and
870 * provide a full 64-bit physical address at
871 * sparc_ramdisk_image64.
872 */
873 ramdisk_image = sparc_ramdisk_image;
874 if (!ramdisk_image)
875 ramdisk_image = sparc_ramdisk_image64;
876
877 /* Another bootloader quirk. The bootloader normalizes
878 * the physical address to KERNBASE, so we have to
879 * factor that back out and add in the lowest valid
880 * physical page address to get the true physical address.
881 */
882 ramdisk_image -= KERNBASE;
883 ramdisk_image += phys_base;
884
885 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
886 ramdisk_image, sparc_ramdisk_size);
887
888 initrd_start = ramdisk_image;
889 initrd_end = ramdisk_image + sparc_ramdisk_size;
890
891 memblock_reserve(initrd_start, sparc_ramdisk_size);
892
893 initrd_start += PAGE_OFFSET;
894 initrd_end += PAGE_OFFSET;
895 }
896 #endif
897 }
898
899 struct node_mem_mask {
900 unsigned long mask;
901 unsigned long match;
902 };
903 static struct node_mem_mask node_masks[MAX_NUMNODES];
904 static int num_node_masks;
905
906 #ifdef CONFIG_NEED_MULTIPLE_NODES
907
908 struct mdesc_mlgroup {
909 u64 node;
910 u64 latency;
911 u64 match;
912 u64 mask;
913 };
914
915 static struct mdesc_mlgroup *mlgroups;
916 static int num_mlgroups;
917
918 int numa_cpu_lookup_table[NR_CPUS];
919 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
920
921 struct mdesc_mblock {
922 u64 base;
923 u64 size;
924 u64 offset; /* RA-to-PA */
925 };
926 static struct mdesc_mblock *mblocks;
927 static int num_mblocks;
928
addr_to_mblock(unsigned long addr)929 static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
930 {
931 struct mdesc_mblock *m = NULL;
932 int i;
933
934 for (i = 0; i < num_mblocks; i++) {
935 m = &mblocks[i];
936
937 if (addr >= m->base &&
938 addr < (m->base + m->size)) {
939 break;
940 }
941 }
942
943 return m;
944 }
945
memblock_nid_range_sun4u(u64 start,u64 end,int * nid)946 static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
947 {
948 int prev_nid, new_nid;
949
950 prev_nid = NUMA_NO_NODE;
951 for ( ; start < end; start += PAGE_SIZE) {
952 for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
953 struct node_mem_mask *p = &node_masks[new_nid];
954
955 if ((start & p->mask) == p->match) {
956 if (prev_nid == NUMA_NO_NODE)
957 prev_nid = new_nid;
958 break;
959 }
960 }
961
962 if (new_nid == num_node_masks) {
963 prev_nid = 0;
964 WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
965 start);
966 break;
967 }
968
969 if (prev_nid != new_nid)
970 break;
971 }
972 *nid = prev_nid;
973
974 return start > end ? end : start;
975 }
976
memblock_nid_range(u64 start,u64 end,int * nid)977 static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
978 {
979 u64 ret_end, pa_start, m_mask, m_match, m_end;
980 struct mdesc_mblock *mblock;
981 int _nid, i;
982
983 if (tlb_type != hypervisor)
984 return memblock_nid_range_sun4u(start, end, nid);
985
986 mblock = addr_to_mblock(start);
987 if (!mblock) {
988 WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
989 start);
990
991 _nid = 0;
992 ret_end = end;
993 goto done;
994 }
995
996 pa_start = start + mblock->offset;
997 m_match = 0;
998 m_mask = 0;
999
1000 for (_nid = 0; _nid < num_node_masks; _nid++) {
1001 struct node_mem_mask *const m = &node_masks[_nid];
1002
1003 if ((pa_start & m->mask) == m->match) {
1004 m_match = m->match;
1005 m_mask = m->mask;
1006 break;
1007 }
1008 }
1009
1010 if (num_node_masks == _nid) {
1011 /* We could not find NUMA group, so default to 0, but lets
1012 * search for latency group, so we could calculate the correct
1013 * end address that we return
1014 */
1015 _nid = 0;
1016
1017 for (i = 0; i < num_mlgroups; i++) {
1018 struct mdesc_mlgroup *const m = &mlgroups[i];
1019
1020 if ((pa_start & m->mask) == m->match) {
1021 m_match = m->match;
1022 m_mask = m->mask;
1023 break;
1024 }
1025 }
1026
1027 if (i == num_mlgroups) {
1028 WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
1029 start);
1030
1031 ret_end = end;
1032 goto done;
1033 }
1034 }
1035
1036 /*
1037 * Each latency group has match and mask, and each memory block has an
1038 * offset. An address belongs to a latency group if its address matches
1039 * the following formula: ((addr + offset) & mask) == match
1040 * It is, however, slow to check every single page if it matches a
1041 * particular latency group. As optimization we calculate end value by
1042 * using bit arithmetics.
1043 */
1044 m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
1045 m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
1046 ret_end = m_end > end ? end : m_end;
1047
1048 done:
1049 *nid = _nid;
1050 return ret_end;
1051 }
1052 #endif
1053
1054 /* This must be invoked after performing all of the necessary
1055 * memblock_set_node() calls for 'nid'. We need to be able to get
1056 * correct data from get_pfn_range_for_nid().
1057 */
allocate_node_data(int nid)1058 static void __init allocate_node_data(int nid)
1059 {
1060 struct pglist_data *p;
1061 unsigned long start_pfn, end_pfn;
1062 #ifdef CONFIG_NEED_MULTIPLE_NODES
1063
1064 NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
1065 SMP_CACHE_BYTES, nid);
1066 if (!NODE_DATA(nid)) {
1067 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1068 prom_halt();
1069 }
1070
1071 NODE_DATA(nid)->node_id = nid;
1072 #endif
1073
1074 p = NODE_DATA(nid);
1075
1076 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1077 p->node_start_pfn = start_pfn;
1078 p->node_spanned_pages = end_pfn - start_pfn;
1079 }
1080
init_node_masks_nonnuma(void)1081 static void init_node_masks_nonnuma(void)
1082 {
1083 #ifdef CONFIG_NEED_MULTIPLE_NODES
1084 int i;
1085 #endif
1086
1087 numadbg("Initializing tables for non-numa.\n");
1088
1089 node_masks[0].mask = 0;
1090 node_masks[0].match = 0;
1091 num_node_masks = 1;
1092
1093 #ifdef CONFIG_NEED_MULTIPLE_NODES
1094 for (i = 0; i < NR_CPUS; i++)
1095 numa_cpu_lookup_table[i] = 0;
1096
1097 cpumask_setall(&numa_cpumask_lookup_table[0]);
1098 #endif
1099 }
1100
1101 #ifdef CONFIG_NEED_MULTIPLE_NODES
1102 struct pglist_data *node_data[MAX_NUMNODES];
1103
1104 EXPORT_SYMBOL(numa_cpu_lookup_table);
1105 EXPORT_SYMBOL(numa_cpumask_lookup_table);
1106 EXPORT_SYMBOL(node_data);
1107
scan_pio_for_cfg_handle(struct mdesc_handle * md,u64 pio,u32 cfg_handle)1108 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1109 u32 cfg_handle)
1110 {
1111 u64 arc;
1112
1113 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1114 u64 target = mdesc_arc_target(md, arc);
1115 const u64 *val;
1116
1117 val = mdesc_get_property(md, target,
1118 "cfg-handle", NULL);
1119 if (val && *val == cfg_handle)
1120 return 0;
1121 }
1122 return -ENODEV;
1123 }
1124
scan_arcs_for_cfg_handle(struct mdesc_handle * md,u64 grp,u32 cfg_handle)1125 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1126 u32 cfg_handle)
1127 {
1128 u64 arc, candidate, best_latency = ~(u64)0;
1129
1130 candidate = MDESC_NODE_NULL;
1131 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1132 u64 target = mdesc_arc_target(md, arc);
1133 const char *name = mdesc_node_name(md, target);
1134 const u64 *val;
1135
1136 if (strcmp(name, "pio-latency-group"))
1137 continue;
1138
1139 val = mdesc_get_property(md, target, "latency", NULL);
1140 if (!val)
1141 continue;
1142
1143 if (*val < best_latency) {
1144 candidate = target;
1145 best_latency = *val;
1146 }
1147 }
1148
1149 if (candidate == MDESC_NODE_NULL)
1150 return -ENODEV;
1151
1152 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1153 }
1154
of_node_to_nid(struct device_node * dp)1155 int of_node_to_nid(struct device_node *dp)
1156 {
1157 const struct linux_prom64_registers *regs;
1158 struct mdesc_handle *md;
1159 u32 cfg_handle;
1160 int count, nid;
1161 u64 grp;
1162
1163 /* This is the right thing to do on currently supported
1164 * SUN4U NUMA platforms as well, as the PCI controller does
1165 * not sit behind any particular memory controller.
1166 */
1167 if (!mlgroups)
1168 return -1;
1169
1170 regs = of_get_property(dp, "reg", NULL);
1171 if (!regs)
1172 return -1;
1173
1174 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1175
1176 md = mdesc_grab();
1177
1178 count = 0;
1179 nid = NUMA_NO_NODE;
1180 mdesc_for_each_node_by_name(md, grp, "group") {
1181 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1182 nid = count;
1183 break;
1184 }
1185 count++;
1186 }
1187
1188 mdesc_release(md);
1189
1190 return nid;
1191 }
1192
add_node_ranges(void)1193 static void __init add_node_ranges(void)
1194 {
1195 phys_addr_t start, end;
1196 unsigned long prev_max;
1197 u64 i;
1198
1199 memblock_resized:
1200 prev_max = memblock.memory.max;
1201
1202 for_each_mem_range(i, &start, &end) {
1203 while (start < end) {
1204 unsigned long this_end;
1205 int nid;
1206
1207 this_end = memblock_nid_range(start, end, &nid);
1208
1209 numadbg("Setting memblock NUMA node nid[%d] "
1210 "start[%llx] end[%lx]\n",
1211 nid, start, this_end);
1212
1213 memblock_set_node(start, this_end - start,
1214 &memblock.memory, nid);
1215 if (memblock.memory.max != prev_max)
1216 goto memblock_resized;
1217 start = this_end;
1218 }
1219 }
1220 }
1221
grab_mlgroups(struct mdesc_handle * md)1222 static int __init grab_mlgroups(struct mdesc_handle *md)
1223 {
1224 unsigned long paddr;
1225 int count = 0;
1226 u64 node;
1227
1228 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1229 count++;
1230 if (!count)
1231 return -ENOENT;
1232
1233 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
1234 SMP_CACHE_BYTES);
1235 if (!paddr)
1236 return -ENOMEM;
1237
1238 mlgroups = __va(paddr);
1239 num_mlgroups = count;
1240
1241 count = 0;
1242 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1243 struct mdesc_mlgroup *m = &mlgroups[count++];
1244 const u64 *val;
1245
1246 m->node = node;
1247
1248 val = mdesc_get_property(md, node, "latency", NULL);
1249 m->latency = *val;
1250 val = mdesc_get_property(md, node, "address-match", NULL);
1251 m->match = *val;
1252 val = mdesc_get_property(md, node, "address-mask", NULL);
1253 m->mask = *val;
1254
1255 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1256 "match[%llx] mask[%llx]\n",
1257 count - 1, m->node, m->latency, m->match, m->mask);
1258 }
1259
1260 return 0;
1261 }
1262
grab_mblocks(struct mdesc_handle * md)1263 static int __init grab_mblocks(struct mdesc_handle *md)
1264 {
1265 unsigned long paddr;
1266 int count = 0;
1267 u64 node;
1268
1269 mdesc_for_each_node_by_name(md, node, "mblock")
1270 count++;
1271 if (!count)
1272 return -ENOENT;
1273
1274 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
1275 SMP_CACHE_BYTES);
1276 if (!paddr)
1277 return -ENOMEM;
1278
1279 mblocks = __va(paddr);
1280 num_mblocks = count;
1281
1282 count = 0;
1283 mdesc_for_each_node_by_name(md, node, "mblock") {
1284 struct mdesc_mblock *m = &mblocks[count++];
1285 const u64 *val;
1286
1287 val = mdesc_get_property(md, node, "base", NULL);
1288 m->base = *val;
1289 val = mdesc_get_property(md, node, "size", NULL);
1290 m->size = *val;
1291 val = mdesc_get_property(md, node,
1292 "address-congruence-offset", NULL);
1293
1294 /* The address-congruence-offset property is optional.
1295 * Explicity zero it be identifty this.
1296 */
1297 if (val)
1298 m->offset = *val;
1299 else
1300 m->offset = 0UL;
1301
1302 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1303 count - 1, m->base, m->size, m->offset);
1304 }
1305
1306 return 0;
1307 }
1308
numa_parse_mdesc_group_cpus(struct mdesc_handle * md,u64 grp,cpumask_t * mask)1309 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1310 u64 grp, cpumask_t *mask)
1311 {
1312 u64 arc;
1313
1314 cpumask_clear(mask);
1315
1316 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1317 u64 target = mdesc_arc_target(md, arc);
1318 const char *name = mdesc_node_name(md, target);
1319 const u64 *id;
1320
1321 if (strcmp(name, "cpu"))
1322 continue;
1323 id = mdesc_get_property(md, target, "id", NULL);
1324 if (*id < nr_cpu_ids)
1325 cpumask_set_cpu(*id, mask);
1326 }
1327 }
1328
find_mlgroup(u64 node)1329 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1330 {
1331 int i;
1332
1333 for (i = 0; i < num_mlgroups; i++) {
1334 struct mdesc_mlgroup *m = &mlgroups[i];
1335 if (m->node == node)
1336 return m;
1337 }
1338 return NULL;
1339 }
1340
__node_distance(int from,int to)1341 int __node_distance(int from, int to)
1342 {
1343 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1344 pr_warn("Returning default NUMA distance value for %d->%d\n",
1345 from, to);
1346 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1347 }
1348 return numa_latency[from][to];
1349 }
1350 EXPORT_SYMBOL(__node_distance);
1351
find_best_numa_node_for_mlgroup(struct mdesc_mlgroup * grp)1352 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1353 {
1354 int i;
1355
1356 for (i = 0; i < MAX_NUMNODES; i++) {
1357 struct node_mem_mask *n = &node_masks[i];
1358
1359 if ((grp->mask == n->mask) && (grp->match == n->match))
1360 break;
1361 }
1362 return i;
1363 }
1364
find_numa_latencies_for_group(struct mdesc_handle * md,u64 grp,int index)1365 static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1366 u64 grp, int index)
1367 {
1368 u64 arc;
1369
1370 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1371 int tnode;
1372 u64 target = mdesc_arc_target(md, arc);
1373 struct mdesc_mlgroup *m = find_mlgroup(target);
1374
1375 if (!m)
1376 continue;
1377 tnode = find_best_numa_node_for_mlgroup(m);
1378 if (tnode == MAX_NUMNODES)
1379 continue;
1380 numa_latency[index][tnode] = m->latency;
1381 }
1382 }
1383
numa_attach_mlgroup(struct mdesc_handle * md,u64 grp,int index)1384 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1385 int index)
1386 {
1387 struct mdesc_mlgroup *candidate = NULL;
1388 u64 arc, best_latency = ~(u64)0;
1389 struct node_mem_mask *n;
1390
1391 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1392 u64 target = mdesc_arc_target(md, arc);
1393 struct mdesc_mlgroup *m = find_mlgroup(target);
1394 if (!m)
1395 continue;
1396 if (m->latency < best_latency) {
1397 candidate = m;
1398 best_latency = m->latency;
1399 }
1400 }
1401 if (!candidate)
1402 return -ENOENT;
1403
1404 if (num_node_masks != index) {
1405 printk(KERN_ERR "Inconsistent NUMA state, "
1406 "index[%d] != num_node_masks[%d]\n",
1407 index, num_node_masks);
1408 return -EINVAL;
1409 }
1410
1411 n = &node_masks[num_node_masks++];
1412
1413 n->mask = candidate->mask;
1414 n->match = candidate->match;
1415
1416 numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1417 index, n->mask, n->match, candidate->latency);
1418
1419 return 0;
1420 }
1421
numa_parse_mdesc_group(struct mdesc_handle * md,u64 grp,int index)1422 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1423 int index)
1424 {
1425 cpumask_t mask;
1426 int cpu;
1427
1428 numa_parse_mdesc_group_cpus(md, grp, &mask);
1429
1430 for_each_cpu(cpu, &mask)
1431 numa_cpu_lookup_table[cpu] = index;
1432 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1433
1434 if (numa_debug) {
1435 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1436 for_each_cpu(cpu, &mask)
1437 printk("%d ", cpu);
1438 printk("]\n");
1439 }
1440
1441 return numa_attach_mlgroup(md, grp, index);
1442 }
1443
numa_parse_mdesc(void)1444 static int __init numa_parse_mdesc(void)
1445 {
1446 struct mdesc_handle *md = mdesc_grab();
1447 int i, j, err, count;
1448 u64 node;
1449
1450 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1451 if (node == MDESC_NODE_NULL) {
1452 mdesc_release(md);
1453 return -ENOENT;
1454 }
1455
1456 err = grab_mblocks(md);
1457 if (err < 0)
1458 goto out;
1459
1460 err = grab_mlgroups(md);
1461 if (err < 0)
1462 goto out;
1463
1464 count = 0;
1465 mdesc_for_each_node_by_name(md, node, "group") {
1466 err = numa_parse_mdesc_group(md, node, count);
1467 if (err < 0)
1468 break;
1469 count++;
1470 }
1471
1472 count = 0;
1473 mdesc_for_each_node_by_name(md, node, "group") {
1474 find_numa_latencies_for_group(md, node, count);
1475 count++;
1476 }
1477
1478 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1479 for (i = 0; i < MAX_NUMNODES; i++) {
1480 u64 self_latency = numa_latency[i][i];
1481
1482 for (j = 0; j < MAX_NUMNODES; j++) {
1483 numa_latency[i][j] =
1484 (numa_latency[i][j] * LOCAL_DISTANCE) /
1485 self_latency;
1486 }
1487 }
1488
1489 add_node_ranges();
1490
1491 for (i = 0; i < num_node_masks; i++) {
1492 allocate_node_data(i);
1493 node_set_online(i);
1494 }
1495
1496 err = 0;
1497 out:
1498 mdesc_release(md);
1499 return err;
1500 }
1501
numa_parse_jbus(void)1502 static int __init numa_parse_jbus(void)
1503 {
1504 unsigned long cpu, index;
1505
1506 /* NUMA node id is encoded in bits 36 and higher, and there is
1507 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1508 */
1509 index = 0;
1510 for_each_present_cpu(cpu) {
1511 numa_cpu_lookup_table[cpu] = index;
1512 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1513 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1514 node_masks[index].match = cpu << 36UL;
1515
1516 index++;
1517 }
1518 num_node_masks = index;
1519
1520 add_node_ranges();
1521
1522 for (index = 0; index < num_node_masks; index++) {
1523 allocate_node_data(index);
1524 node_set_online(index);
1525 }
1526
1527 return 0;
1528 }
1529
numa_parse_sun4u(void)1530 static int __init numa_parse_sun4u(void)
1531 {
1532 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1533 unsigned long ver;
1534
1535 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1536 if ((ver >> 32UL) == __JALAPENO_ID ||
1537 (ver >> 32UL) == __SERRANO_ID)
1538 return numa_parse_jbus();
1539 }
1540 return -1;
1541 }
1542
bootmem_init_numa(void)1543 static int __init bootmem_init_numa(void)
1544 {
1545 int i, j;
1546 int err = -1;
1547
1548 numadbg("bootmem_init_numa()\n");
1549
1550 /* Some sane defaults for numa latency values */
1551 for (i = 0; i < MAX_NUMNODES; i++) {
1552 for (j = 0; j < MAX_NUMNODES; j++)
1553 numa_latency[i][j] = (i == j) ?
1554 LOCAL_DISTANCE : REMOTE_DISTANCE;
1555 }
1556
1557 if (numa_enabled) {
1558 if (tlb_type == hypervisor)
1559 err = numa_parse_mdesc();
1560 else
1561 err = numa_parse_sun4u();
1562 }
1563 return err;
1564 }
1565
1566 #else
1567
bootmem_init_numa(void)1568 static int bootmem_init_numa(void)
1569 {
1570 return -1;
1571 }
1572
1573 #endif
1574
bootmem_init_nonnuma(void)1575 static void __init bootmem_init_nonnuma(void)
1576 {
1577 unsigned long top_of_ram = memblock_end_of_DRAM();
1578 unsigned long total_ram = memblock_phys_mem_size();
1579
1580 numadbg("bootmem_init_nonnuma()\n");
1581
1582 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1583 top_of_ram, total_ram);
1584 printk(KERN_INFO "Memory hole size: %ldMB\n",
1585 (top_of_ram - total_ram) >> 20);
1586
1587 init_node_masks_nonnuma();
1588 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
1589 allocate_node_data(0);
1590 node_set_online(0);
1591 }
1592
bootmem_init(unsigned long phys_base)1593 static unsigned long __init bootmem_init(unsigned long phys_base)
1594 {
1595 unsigned long end_pfn;
1596
1597 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1598 max_pfn = max_low_pfn = end_pfn;
1599 min_low_pfn = (phys_base >> PAGE_SHIFT);
1600
1601 if (bootmem_init_numa() < 0)
1602 bootmem_init_nonnuma();
1603
1604 /* Dump memblock with node info. */
1605 memblock_dump_all();
1606
1607 /* XXX cpu notifier XXX */
1608
1609 sparse_init();
1610
1611 return end_pfn;
1612 }
1613
1614 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1615 static int pall_ents __initdata;
1616
1617 static unsigned long max_phys_bits = 40;
1618
kern_addr_valid(unsigned long addr)1619 bool kern_addr_valid(unsigned long addr)
1620 {
1621 pgd_t *pgd;
1622 p4d_t *p4d;
1623 pud_t *pud;
1624 pmd_t *pmd;
1625 pte_t *pte;
1626
1627 if ((long)addr < 0L) {
1628 unsigned long pa = __pa(addr);
1629
1630 if ((pa >> max_phys_bits) != 0UL)
1631 return false;
1632
1633 return pfn_valid(pa >> PAGE_SHIFT);
1634 }
1635
1636 if (addr >= (unsigned long) KERNBASE &&
1637 addr < (unsigned long)&_end)
1638 return true;
1639
1640 pgd = pgd_offset_k(addr);
1641 if (pgd_none(*pgd))
1642 return false;
1643
1644 p4d = p4d_offset(pgd, addr);
1645 if (p4d_none(*p4d))
1646 return false;
1647
1648 pud = pud_offset(p4d, addr);
1649 if (pud_none(*pud))
1650 return false;
1651
1652 if (pud_large(*pud))
1653 return pfn_valid(pud_pfn(*pud));
1654
1655 pmd = pmd_offset(pud, addr);
1656 if (pmd_none(*pmd))
1657 return false;
1658
1659 if (pmd_large(*pmd))
1660 return pfn_valid(pmd_pfn(*pmd));
1661
1662 pte = pte_offset_kernel(pmd, addr);
1663 if (pte_none(*pte))
1664 return false;
1665
1666 return pfn_valid(pte_pfn(*pte));
1667 }
1668 EXPORT_SYMBOL(kern_addr_valid);
1669
kernel_map_hugepud(unsigned long vstart,unsigned long vend,pud_t * pud)1670 static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1671 unsigned long vend,
1672 pud_t *pud)
1673 {
1674 const unsigned long mask16gb = (1UL << 34) - 1UL;
1675 u64 pte_val = vstart;
1676
1677 /* Each PUD is 8GB */
1678 if ((vstart & mask16gb) ||
1679 (vend - vstart <= mask16gb)) {
1680 pte_val ^= kern_linear_pte_xor[2];
1681 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1682
1683 return vstart + PUD_SIZE;
1684 }
1685
1686 pte_val ^= kern_linear_pte_xor[3];
1687 pte_val |= _PAGE_PUD_HUGE;
1688
1689 vend = vstart + mask16gb + 1UL;
1690 while (vstart < vend) {
1691 pud_val(*pud) = pte_val;
1692
1693 pte_val += PUD_SIZE;
1694 vstart += PUD_SIZE;
1695 pud++;
1696 }
1697 return vstart;
1698 }
1699
kernel_can_map_hugepud(unsigned long vstart,unsigned long vend,bool guard)1700 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1701 bool guard)
1702 {
1703 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1704 return true;
1705
1706 return false;
1707 }
1708
kernel_map_hugepmd(unsigned long vstart,unsigned long vend,pmd_t * pmd)1709 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1710 unsigned long vend,
1711 pmd_t *pmd)
1712 {
1713 const unsigned long mask256mb = (1UL << 28) - 1UL;
1714 const unsigned long mask2gb = (1UL << 31) - 1UL;
1715 u64 pte_val = vstart;
1716
1717 /* Each PMD is 8MB */
1718 if ((vstart & mask256mb) ||
1719 (vend - vstart <= mask256mb)) {
1720 pte_val ^= kern_linear_pte_xor[0];
1721 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1722
1723 return vstart + PMD_SIZE;
1724 }
1725
1726 if ((vstart & mask2gb) ||
1727 (vend - vstart <= mask2gb)) {
1728 pte_val ^= kern_linear_pte_xor[1];
1729 pte_val |= _PAGE_PMD_HUGE;
1730 vend = vstart + mask256mb + 1UL;
1731 } else {
1732 pte_val ^= kern_linear_pte_xor[2];
1733 pte_val |= _PAGE_PMD_HUGE;
1734 vend = vstart + mask2gb + 1UL;
1735 }
1736
1737 while (vstart < vend) {
1738 pmd_val(*pmd) = pte_val;
1739
1740 pte_val += PMD_SIZE;
1741 vstart += PMD_SIZE;
1742 pmd++;
1743 }
1744
1745 return vstart;
1746 }
1747
kernel_can_map_hugepmd(unsigned long vstart,unsigned long vend,bool guard)1748 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1749 bool guard)
1750 {
1751 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1752 return true;
1753
1754 return false;
1755 }
1756
kernel_map_range(unsigned long pstart,unsigned long pend,pgprot_t prot,bool use_huge)1757 static unsigned long __ref kernel_map_range(unsigned long pstart,
1758 unsigned long pend, pgprot_t prot,
1759 bool use_huge)
1760 {
1761 unsigned long vstart = PAGE_OFFSET + pstart;
1762 unsigned long vend = PAGE_OFFSET + pend;
1763 unsigned long alloc_bytes = 0UL;
1764
1765 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1766 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1767 vstart, vend);
1768 prom_halt();
1769 }
1770
1771 while (vstart < vend) {
1772 unsigned long this_end, paddr = __pa(vstart);
1773 pgd_t *pgd = pgd_offset_k(vstart);
1774 p4d_t *p4d;
1775 pud_t *pud;
1776 pmd_t *pmd;
1777 pte_t *pte;
1778
1779 if (pgd_none(*pgd)) {
1780 pud_t *new;
1781
1782 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1783 PAGE_SIZE);
1784 if (!new)
1785 goto err_alloc;
1786 alloc_bytes += PAGE_SIZE;
1787 pgd_populate(&init_mm, pgd, new);
1788 }
1789
1790 p4d = p4d_offset(pgd, vstart);
1791 if (p4d_none(*p4d)) {
1792 pud_t *new;
1793
1794 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1795 PAGE_SIZE);
1796 if (!new)
1797 goto err_alloc;
1798 alloc_bytes += PAGE_SIZE;
1799 p4d_populate(&init_mm, p4d, new);
1800 }
1801
1802 pud = pud_offset(p4d, vstart);
1803 if (pud_none(*pud)) {
1804 pmd_t *new;
1805
1806 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1807 vstart = kernel_map_hugepud(vstart, vend, pud);
1808 continue;
1809 }
1810 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1811 PAGE_SIZE);
1812 if (!new)
1813 goto err_alloc;
1814 alloc_bytes += PAGE_SIZE;
1815 pud_populate(&init_mm, pud, new);
1816 }
1817
1818 pmd = pmd_offset(pud, vstart);
1819 if (pmd_none(*pmd)) {
1820 pte_t *new;
1821
1822 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1823 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1824 continue;
1825 }
1826 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1827 PAGE_SIZE);
1828 if (!new)
1829 goto err_alloc;
1830 alloc_bytes += PAGE_SIZE;
1831 pmd_populate_kernel(&init_mm, pmd, new);
1832 }
1833
1834 pte = pte_offset_kernel(pmd, vstart);
1835 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1836 if (this_end > vend)
1837 this_end = vend;
1838
1839 while (vstart < this_end) {
1840 pte_val(*pte) = (paddr | pgprot_val(prot));
1841
1842 vstart += PAGE_SIZE;
1843 paddr += PAGE_SIZE;
1844 pte++;
1845 }
1846 }
1847
1848 return alloc_bytes;
1849
1850 err_alloc:
1851 panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
1852 __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1853 return -ENOMEM;
1854 }
1855
flush_all_kernel_tsbs(void)1856 static void __init flush_all_kernel_tsbs(void)
1857 {
1858 int i;
1859
1860 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1861 struct tsb *ent = &swapper_tsb[i];
1862
1863 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1864 }
1865 #ifndef CONFIG_DEBUG_PAGEALLOC
1866 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1867 struct tsb *ent = &swapper_4m_tsb[i];
1868
1869 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1870 }
1871 #endif
1872 }
1873
1874 extern unsigned int kvmap_linear_patch[1];
1875
kernel_physical_mapping_init(void)1876 static void __init kernel_physical_mapping_init(void)
1877 {
1878 unsigned long i, mem_alloced = 0UL;
1879 bool use_huge = true;
1880
1881 #ifdef CONFIG_DEBUG_PAGEALLOC
1882 use_huge = false;
1883 #endif
1884 for (i = 0; i < pall_ents; i++) {
1885 unsigned long phys_start, phys_end;
1886
1887 phys_start = pall[i].phys_addr;
1888 phys_end = phys_start + pall[i].reg_size;
1889
1890 mem_alloced += kernel_map_range(phys_start, phys_end,
1891 PAGE_KERNEL, use_huge);
1892 }
1893
1894 printk("Allocated %ld bytes for kernel page tables.\n",
1895 mem_alloced);
1896
1897 kvmap_linear_patch[0] = 0x01000000; /* nop */
1898 flushi(&kvmap_linear_patch[0]);
1899
1900 flush_all_kernel_tsbs();
1901
1902 __flush_tlb_all();
1903 }
1904
1905 #ifdef CONFIG_DEBUG_PAGEALLOC
__kernel_map_pages(struct page * page,int numpages,int enable)1906 void __kernel_map_pages(struct page *page, int numpages, int enable)
1907 {
1908 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1909 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1910
1911 kernel_map_range(phys_start, phys_end,
1912 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1913
1914 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1915 PAGE_OFFSET + phys_end);
1916
1917 /* we should perform an IPI and flush all tlbs,
1918 * but that can deadlock->flush only current cpu.
1919 */
1920 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1921 PAGE_OFFSET + phys_end);
1922 }
1923 #endif
1924
find_ecache_flush_span(unsigned long size)1925 unsigned long __init find_ecache_flush_span(unsigned long size)
1926 {
1927 int i;
1928
1929 for (i = 0; i < pavail_ents; i++) {
1930 if (pavail[i].reg_size >= size)
1931 return pavail[i].phys_addr;
1932 }
1933
1934 return ~0UL;
1935 }
1936
1937 unsigned long PAGE_OFFSET;
1938 EXPORT_SYMBOL(PAGE_OFFSET);
1939
1940 unsigned long VMALLOC_END = 0x0000010000000000UL;
1941 EXPORT_SYMBOL(VMALLOC_END);
1942
1943 unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1944 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1945
setup_page_offset(void)1946 static void __init setup_page_offset(void)
1947 {
1948 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1949 /* Cheetah/Panther support a full 64-bit virtual
1950 * address, so we can use all that our page tables
1951 * support.
1952 */
1953 sparc64_va_hole_top = 0xfff0000000000000UL;
1954 sparc64_va_hole_bottom = 0x0010000000000000UL;
1955
1956 max_phys_bits = 42;
1957 } else if (tlb_type == hypervisor) {
1958 switch (sun4v_chip_type) {
1959 case SUN4V_CHIP_NIAGARA1:
1960 case SUN4V_CHIP_NIAGARA2:
1961 /* T1 and T2 support 48-bit virtual addresses. */
1962 sparc64_va_hole_top = 0xffff800000000000UL;
1963 sparc64_va_hole_bottom = 0x0000800000000000UL;
1964
1965 max_phys_bits = 39;
1966 break;
1967 case SUN4V_CHIP_NIAGARA3:
1968 /* T3 supports 48-bit virtual addresses. */
1969 sparc64_va_hole_top = 0xffff800000000000UL;
1970 sparc64_va_hole_bottom = 0x0000800000000000UL;
1971
1972 max_phys_bits = 43;
1973 break;
1974 case SUN4V_CHIP_NIAGARA4:
1975 case SUN4V_CHIP_NIAGARA5:
1976 case SUN4V_CHIP_SPARC64X:
1977 case SUN4V_CHIP_SPARC_M6:
1978 /* T4 and later support 52-bit virtual addresses. */
1979 sparc64_va_hole_top = 0xfff8000000000000UL;
1980 sparc64_va_hole_bottom = 0x0008000000000000UL;
1981 max_phys_bits = 47;
1982 break;
1983 case SUN4V_CHIP_SPARC_M7:
1984 case SUN4V_CHIP_SPARC_SN:
1985 /* M7 and later support 52-bit virtual addresses. */
1986 sparc64_va_hole_top = 0xfff8000000000000UL;
1987 sparc64_va_hole_bottom = 0x0008000000000000UL;
1988 max_phys_bits = 49;
1989 break;
1990 case SUN4V_CHIP_SPARC_M8:
1991 default:
1992 /* M8 and later support 54-bit virtual addresses.
1993 * However, restricting M8 and above VA bits to 53
1994 * as 4-level page table cannot support more than
1995 * 53 VA bits.
1996 */
1997 sparc64_va_hole_top = 0xfff0000000000000UL;
1998 sparc64_va_hole_bottom = 0x0010000000000000UL;
1999 max_phys_bits = 51;
2000 break;
2001 }
2002 }
2003
2004 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
2005 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
2006 max_phys_bits);
2007 prom_halt();
2008 }
2009
2010 PAGE_OFFSET = sparc64_va_hole_top;
2011 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
2012 (sparc64_va_hole_bottom >> 2));
2013
2014 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
2015 PAGE_OFFSET, max_phys_bits);
2016 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
2017 VMALLOC_START, VMALLOC_END);
2018 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
2019 VMEMMAP_BASE, VMEMMAP_BASE << 1);
2020 }
2021
tsb_phys_patch(void)2022 static void __init tsb_phys_patch(void)
2023 {
2024 struct tsb_ldquad_phys_patch_entry *pquad;
2025 struct tsb_phys_patch_entry *p;
2026
2027 pquad = &__tsb_ldquad_phys_patch;
2028 while (pquad < &__tsb_ldquad_phys_patch_end) {
2029 unsigned long addr = pquad->addr;
2030
2031 if (tlb_type == hypervisor)
2032 *(unsigned int *) addr = pquad->sun4v_insn;
2033 else
2034 *(unsigned int *) addr = pquad->sun4u_insn;
2035 wmb();
2036 __asm__ __volatile__("flush %0"
2037 : /* no outputs */
2038 : "r" (addr));
2039
2040 pquad++;
2041 }
2042
2043 p = &__tsb_phys_patch;
2044 while (p < &__tsb_phys_patch_end) {
2045 unsigned long addr = p->addr;
2046
2047 *(unsigned int *) addr = p->insn;
2048 wmb();
2049 __asm__ __volatile__("flush %0"
2050 : /* no outputs */
2051 : "r" (addr));
2052
2053 p++;
2054 }
2055 }
2056
2057 /* Don't mark as init, we give this to the Hypervisor. */
2058 #ifndef CONFIG_DEBUG_PAGEALLOC
2059 #define NUM_KTSB_DESCR 2
2060 #else
2061 #define NUM_KTSB_DESCR 1
2062 #endif
2063 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
2064
2065 /* The swapper TSBs are loaded with a base sequence of:
2066 *
2067 * sethi %uhi(SYMBOL), REG1
2068 * sethi %hi(SYMBOL), REG2
2069 * or REG1, %ulo(SYMBOL), REG1
2070 * or REG2, %lo(SYMBOL), REG2
2071 * sllx REG1, 32, REG1
2072 * or REG1, REG2, REG1
2073 *
2074 * When we use physical addressing for the TSB accesses, we patch the
2075 * first four instructions in the above sequence.
2076 */
2077
patch_one_ktsb_phys(unsigned int * start,unsigned int * end,unsigned long pa)2078 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2079 {
2080 unsigned long high_bits, low_bits;
2081
2082 high_bits = (pa >> 32) & 0xffffffff;
2083 low_bits = (pa >> 0) & 0xffffffff;
2084
2085 while (start < end) {
2086 unsigned int *ia = (unsigned int *)(unsigned long)*start;
2087
2088 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
2089 __asm__ __volatile__("flush %0" : : "r" (ia));
2090
2091 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
2092 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
2093
2094 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
2095 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
2096
2097 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2098 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
2099
2100 start++;
2101 }
2102 }
2103
ktsb_phys_patch(void)2104 static void ktsb_phys_patch(void)
2105 {
2106 extern unsigned int __swapper_tsb_phys_patch;
2107 extern unsigned int __swapper_tsb_phys_patch_end;
2108 unsigned long ktsb_pa;
2109
2110 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2111 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2112 &__swapper_tsb_phys_patch_end, ktsb_pa);
2113 #ifndef CONFIG_DEBUG_PAGEALLOC
2114 {
2115 extern unsigned int __swapper_4m_tsb_phys_patch;
2116 extern unsigned int __swapper_4m_tsb_phys_patch_end;
2117 ktsb_pa = (kern_base +
2118 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2119 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2120 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
2121 }
2122 #endif
2123 }
2124
sun4v_ktsb_init(void)2125 static void __init sun4v_ktsb_init(void)
2126 {
2127 unsigned long ktsb_pa;
2128
2129 /* First KTSB for PAGE_SIZE mappings. */
2130 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2131
2132 switch (PAGE_SIZE) {
2133 case 8 * 1024:
2134 default:
2135 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2136 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2137 break;
2138
2139 case 64 * 1024:
2140 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2141 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2142 break;
2143
2144 case 512 * 1024:
2145 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2146 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2147 break;
2148
2149 case 4 * 1024 * 1024:
2150 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2151 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2152 break;
2153 }
2154
2155 ktsb_descr[0].assoc = 1;
2156 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2157 ktsb_descr[0].ctx_idx = 0;
2158 ktsb_descr[0].tsb_base = ktsb_pa;
2159 ktsb_descr[0].resv = 0;
2160
2161 #ifndef CONFIG_DEBUG_PAGEALLOC
2162 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
2163 ktsb_pa = (kern_base +
2164 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2165
2166 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
2167 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2168 HV_PGSZ_MASK_256MB |
2169 HV_PGSZ_MASK_2GB |
2170 HV_PGSZ_MASK_16GB) &
2171 cpu_pgsz_mask);
2172 ktsb_descr[1].assoc = 1;
2173 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2174 ktsb_descr[1].ctx_idx = 0;
2175 ktsb_descr[1].tsb_base = ktsb_pa;
2176 ktsb_descr[1].resv = 0;
2177 #endif
2178 }
2179
sun4v_ktsb_register(void)2180 void sun4v_ktsb_register(void)
2181 {
2182 unsigned long pa, ret;
2183
2184 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2185
2186 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2187 if (ret != 0) {
2188 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2189 "errors with %lx\n", pa, ret);
2190 prom_halt();
2191 }
2192 }
2193
sun4u_linear_pte_xor_finalize(void)2194 static void __init sun4u_linear_pte_xor_finalize(void)
2195 {
2196 #ifndef CONFIG_DEBUG_PAGEALLOC
2197 /* This is where we would add Panther support for
2198 * 32MB and 256MB pages.
2199 */
2200 #endif
2201 }
2202
sun4v_linear_pte_xor_finalize(void)2203 static void __init sun4v_linear_pte_xor_finalize(void)
2204 {
2205 unsigned long pagecv_flag;
2206
2207 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2208 * enables MCD error. Do not set bit 9 on M7 processor.
2209 */
2210 switch (sun4v_chip_type) {
2211 case SUN4V_CHIP_SPARC_M7:
2212 case SUN4V_CHIP_SPARC_M8:
2213 case SUN4V_CHIP_SPARC_SN:
2214 pagecv_flag = 0x00;
2215 break;
2216 default:
2217 pagecv_flag = _PAGE_CV_4V;
2218 break;
2219 }
2220 #ifndef CONFIG_DEBUG_PAGEALLOC
2221 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2222 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2223 PAGE_OFFSET;
2224 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2225 _PAGE_P_4V | _PAGE_W_4V);
2226 } else {
2227 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2228 }
2229
2230 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2231 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2232 PAGE_OFFSET;
2233 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2234 _PAGE_P_4V | _PAGE_W_4V);
2235 } else {
2236 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2237 }
2238
2239 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2240 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2241 PAGE_OFFSET;
2242 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2243 _PAGE_P_4V | _PAGE_W_4V);
2244 } else {
2245 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2246 }
2247 #endif
2248 }
2249
2250 /* paging_init() sets up the page tables */
2251
2252 static unsigned long last_valid_pfn;
2253
2254 static void sun4u_pgprot_init(void);
2255 static void sun4v_pgprot_init(void);
2256
2257 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2258 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2259 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2260 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2261 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2262 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2263
2264 /* We need to exclude reserved regions. This exclusion will include
2265 * vmlinux and initrd. To be more precise the initrd size could be used to
2266 * compute a new lower limit because it is freed later during initialization.
2267 */
reduce_memory(phys_addr_t limit_ram)2268 static void __init reduce_memory(phys_addr_t limit_ram)
2269 {
2270 limit_ram += memblock_reserved_size();
2271 memblock_enforce_memory_limit(limit_ram);
2272 }
2273
paging_init(void)2274 void __init paging_init(void)
2275 {
2276 unsigned long end_pfn, shift, phys_base;
2277 unsigned long real_end, i;
2278
2279 setup_page_offset();
2280
2281 /* These build time checkes make sure that the dcache_dirty_cpu()
2282 * page->flags usage will work.
2283 *
2284 * When a page gets marked as dcache-dirty, we store the
2285 * cpu number starting at bit 32 in the page->flags. Also,
2286 * functions like clear_dcache_dirty_cpu use the cpu mask
2287 * in 13-bit signed-immediate instruction fields.
2288 */
2289
2290 /*
2291 * Page flags must not reach into upper 32 bits that are used
2292 * for the cpu number
2293 */
2294 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2295
2296 /*
2297 * The bit fields placed in the high range must not reach below
2298 * the 32 bit boundary. Otherwise we cannot place the cpu field
2299 * at the 32 bit boundary.
2300 */
2301 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2302 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2303
2304 BUILD_BUG_ON(NR_CPUS > 4096);
2305
2306 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2307 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2308
2309 /* Invalidate both kernel TSBs. */
2310 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2311 #ifndef CONFIG_DEBUG_PAGEALLOC
2312 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2313 #endif
2314
2315 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2316 * bit on M7 processor. This is a conflicting usage of the same
2317 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2318 * Detection error on all pages and this will lead to problems
2319 * later. Kernel does not run with MCD enabled and hence rest
2320 * of the required steps to fully configure memory corruption
2321 * detection are not taken. We need to ensure TTE.mcde is not
2322 * set on M7 processor. Compute the value of cacheability
2323 * flag for use later taking this into consideration.
2324 */
2325 switch (sun4v_chip_type) {
2326 case SUN4V_CHIP_SPARC_M7:
2327 case SUN4V_CHIP_SPARC_M8:
2328 case SUN4V_CHIP_SPARC_SN:
2329 page_cache4v_flag = _PAGE_CP_4V;
2330 break;
2331 default:
2332 page_cache4v_flag = _PAGE_CACHE_4V;
2333 break;
2334 }
2335
2336 if (tlb_type == hypervisor)
2337 sun4v_pgprot_init();
2338 else
2339 sun4u_pgprot_init();
2340
2341 if (tlb_type == cheetah_plus ||
2342 tlb_type == hypervisor) {
2343 tsb_phys_patch();
2344 ktsb_phys_patch();
2345 }
2346
2347 if (tlb_type == hypervisor)
2348 sun4v_patch_tlb_handlers();
2349
2350 /* Find available physical memory...
2351 *
2352 * Read it twice in order to work around a bug in openfirmware.
2353 * The call to grab this table itself can cause openfirmware to
2354 * allocate memory, which in turn can take away some space from
2355 * the list of available memory. Reading it twice makes sure
2356 * we really do get the final value.
2357 */
2358 read_obp_translations();
2359 read_obp_memory("reg", &pall[0], &pall_ents);
2360 read_obp_memory("available", &pavail[0], &pavail_ents);
2361 read_obp_memory("available", &pavail[0], &pavail_ents);
2362
2363 phys_base = 0xffffffffffffffffUL;
2364 for (i = 0; i < pavail_ents; i++) {
2365 phys_base = min(phys_base, pavail[i].phys_addr);
2366 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2367 }
2368
2369 memblock_reserve(kern_base, kern_size);
2370
2371 find_ramdisk(phys_base);
2372
2373 if (cmdline_memory_size)
2374 reduce_memory(cmdline_memory_size);
2375
2376 memblock_allow_resize();
2377 memblock_dump_all();
2378
2379 set_bit(0, mmu_context_bmap);
2380
2381 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2382
2383 real_end = (unsigned long)_end;
2384 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2385 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2386 num_kernel_image_mappings);
2387
2388 /* Set kernel pgd to upper alias so physical page computations
2389 * work.
2390 */
2391 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2392
2393 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2394
2395 inherit_prom_mappings();
2396
2397 /* Ok, we can use our TLB miss and window trap handlers safely. */
2398 setup_tba();
2399
2400 __flush_tlb_all();
2401
2402 prom_build_devicetree();
2403 of_populate_present_mask();
2404 #ifndef CONFIG_SMP
2405 of_fill_in_cpu_data();
2406 #endif
2407
2408 if (tlb_type == hypervisor) {
2409 sun4v_mdesc_init();
2410 mdesc_populate_present_mask(cpu_all_mask);
2411 #ifndef CONFIG_SMP
2412 mdesc_fill_in_cpu_data(cpu_all_mask);
2413 #endif
2414 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2415
2416 sun4v_linear_pte_xor_finalize();
2417
2418 sun4v_ktsb_init();
2419 sun4v_ktsb_register();
2420 } else {
2421 unsigned long impl, ver;
2422
2423 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2424 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2425
2426 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2427 impl = ((ver >> 32) & 0xffff);
2428 if (impl == PANTHER_IMPL)
2429 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2430 HV_PGSZ_MASK_256MB);
2431
2432 sun4u_linear_pte_xor_finalize();
2433 }
2434
2435 /* Flush the TLBs and the 4M TSB so that the updated linear
2436 * pte XOR settings are realized for all mappings.
2437 */
2438 __flush_tlb_all();
2439 #ifndef CONFIG_DEBUG_PAGEALLOC
2440 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2441 #endif
2442 __flush_tlb_all();
2443
2444 /* Setup bootmem... */
2445 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2446
2447 kernel_physical_mapping_init();
2448
2449 {
2450 unsigned long max_zone_pfns[MAX_NR_ZONES];
2451
2452 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2453
2454 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2455
2456 free_area_init(max_zone_pfns);
2457 }
2458
2459 printk("Booting Linux...\n");
2460 }
2461
page_in_phys_avail(unsigned long paddr)2462 int page_in_phys_avail(unsigned long paddr)
2463 {
2464 int i;
2465
2466 paddr &= PAGE_MASK;
2467
2468 for (i = 0; i < pavail_ents; i++) {
2469 unsigned long start, end;
2470
2471 start = pavail[i].phys_addr;
2472 end = start + pavail[i].reg_size;
2473
2474 if (paddr >= start && paddr < end)
2475 return 1;
2476 }
2477 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2478 return 1;
2479 #ifdef CONFIG_BLK_DEV_INITRD
2480 if (paddr >= __pa(initrd_start) &&
2481 paddr < __pa(PAGE_ALIGN(initrd_end)))
2482 return 1;
2483 #endif
2484
2485 return 0;
2486 }
2487
register_page_bootmem_info(void)2488 static void __init register_page_bootmem_info(void)
2489 {
2490 #ifdef CONFIG_NEED_MULTIPLE_NODES
2491 int i;
2492
2493 for_each_online_node(i)
2494 if (NODE_DATA(i)->node_spanned_pages)
2495 register_page_bootmem_info_node(NODE_DATA(i));
2496 #endif
2497 }
mem_init(void)2498 void __init mem_init(void)
2499 {
2500 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2501
2502 memblock_free_all();
2503
2504 /*
2505 * Must be done after boot memory is put on freelist, because here we
2506 * might set fields in deferred struct pages that have not yet been
2507 * initialized, and memblock_free_all() initializes all the reserved
2508 * deferred pages for us.
2509 */
2510 register_page_bootmem_info();
2511
2512 /*
2513 * Set up the zero page, mark it reserved, so that page count
2514 * is not manipulated when freeing the page from user ptes.
2515 */
2516 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2517 if (mem_map_zero == NULL) {
2518 prom_printf("paging_init: Cannot alloc zero page.\n");
2519 prom_halt();
2520 }
2521 mark_page_reserved(mem_map_zero);
2522
2523 mem_init_print_info(NULL);
2524
2525 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2526 cheetah_ecache_flush_init();
2527 }
2528
free_initmem(void)2529 void free_initmem(void)
2530 {
2531 unsigned long addr, initend;
2532 int do_free = 1;
2533
2534 /* If the physical memory maps were trimmed by kernel command
2535 * line options, don't even try freeing this initmem stuff up.
2536 * The kernel image could have been in the trimmed out region
2537 * and if so the freeing below will free invalid page structs.
2538 */
2539 if (cmdline_memory_size)
2540 do_free = 0;
2541
2542 /*
2543 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2544 */
2545 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2546 initend = (unsigned long)(__init_end) & PAGE_MASK;
2547 for (; addr < initend; addr += PAGE_SIZE) {
2548 unsigned long page;
2549
2550 page = (addr +
2551 ((unsigned long) __va(kern_base)) -
2552 ((unsigned long) KERNBASE));
2553 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2554
2555 if (do_free)
2556 free_reserved_page(virt_to_page(page));
2557 }
2558 }
2559
2560 pgprot_t PAGE_KERNEL __read_mostly;
2561 EXPORT_SYMBOL(PAGE_KERNEL);
2562
2563 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2564 pgprot_t PAGE_COPY __read_mostly;
2565
2566 pgprot_t PAGE_SHARED __read_mostly;
2567 EXPORT_SYMBOL(PAGE_SHARED);
2568
2569 unsigned long pg_iobits __read_mostly;
2570
2571 unsigned long _PAGE_IE __read_mostly;
2572 EXPORT_SYMBOL(_PAGE_IE);
2573
2574 unsigned long _PAGE_E __read_mostly;
2575 EXPORT_SYMBOL(_PAGE_E);
2576
2577 unsigned long _PAGE_CACHE __read_mostly;
2578 EXPORT_SYMBOL(_PAGE_CACHE);
2579
2580 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_populate(unsigned long vstart,unsigned long vend,int node,struct vmem_altmap * altmap)2581 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2582 int node, struct vmem_altmap *altmap)
2583 {
2584 unsigned long pte_base;
2585
2586 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2587 _PAGE_CP_4U | _PAGE_CV_4U |
2588 _PAGE_P_4U | _PAGE_W_4U);
2589 if (tlb_type == hypervisor)
2590 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2591 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2592
2593 pte_base |= _PAGE_PMD_HUGE;
2594
2595 vstart = vstart & PMD_MASK;
2596 vend = ALIGN(vend, PMD_SIZE);
2597 for (; vstart < vend; vstart += PMD_SIZE) {
2598 pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
2599 unsigned long pte;
2600 p4d_t *p4d;
2601 pud_t *pud;
2602 pmd_t *pmd;
2603
2604 if (!pgd)
2605 return -ENOMEM;
2606
2607 p4d = vmemmap_p4d_populate(pgd, vstart, node);
2608 if (!p4d)
2609 return -ENOMEM;
2610
2611 pud = vmemmap_pud_populate(p4d, vstart, node);
2612 if (!pud)
2613 return -ENOMEM;
2614
2615 pmd = pmd_offset(pud, vstart);
2616 pte = pmd_val(*pmd);
2617 if (!(pte & _PAGE_VALID)) {
2618 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2619
2620 if (!block)
2621 return -ENOMEM;
2622
2623 pmd_val(*pmd) = pte_base | __pa(block);
2624 }
2625 }
2626
2627 return 0;
2628 }
2629
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)2630 void vmemmap_free(unsigned long start, unsigned long end,
2631 struct vmem_altmap *altmap)
2632 {
2633 }
2634 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2635
prot_init_common(unsigned long page_none,unsigned long page_shared,unsigned long page_copy,unsigned long page_readonly,unsigned long page_exec_bit)2636 static void prot_init_common(unsigned long page_none,
2637 unsigned long page_shared,
2638 unsigned long page_copy,
2639 unsigned long page_readonly,
2640 unsigned long page_exec_bit)
2641 {
2642 PAGE_COPY = __pgprot(page_copy);
2643 PAGE_SHARED = __pgprot(page_shared);
2644
2645 protection_map[0x0] = __pgprot(page_none);
2646 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2647 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2648 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2649 protection_map[0x4] = __pgprot(page_readonly);
2650 protection_map[0x5] = __pgprot(page_readonly);
2651 protection_map[0x6] = __pgprot(page_copy);
2652 protection_map[0x7] = __pgprot(page_copy);
2653 protection_map[0x8] = __pgprot(page_none);
2654 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2655 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2656 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2657 protection_map[0xc] = __pgprot(page_readonly);
2658 protection_map[0xd] = __pgprot(page_readonly);
2659 protection_map[0xe] = __pgprot(page_shared);
2660 protection_map[0xf] = __pgprot(page_shared);
2661 }
2662
sun4u_pgprot_init(void)2663 static void __init sun4u_pgprot_init(void)
2664 {
2665 unsigned long page_none, page_shared, page_copy, page_readonly;
2666 unsigned long page_exec_bit;
2667 int i;
2668
2669 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2670 _PAGE_CACHE_4U | _PAGE_P_4U |
2671 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2672 _PAGE_EXEC_4U);
2673 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2674 _PAGE_CACHE_4U | _PAGE_P_4U |
2675 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2676 _PAGE_EXEC_4U | _PAGE_L_4U);
2677
2678 _PAGE_IE = _PAGE_IE_4U;
2679 _PAGE_E = _PAGE_E_4U;
2680 _PAGE_CACHE = _PAGE_CACHE_4U;
2681
2682 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2683 __ACCESS_BITS_4U | _PAGE_E_4U);
2684
2685 #ifdef CONFIG_DEBUG_PAGEALLOC
2686 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2687 #else
2688 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2689 PAGE_OFFSET;
2690 #endif
2691 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2692 _PAGE_P_4U | _PAGE_W_4U);
2693
2694 for (i = 1; i < 4; i++)
2695 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2696
2697 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2698 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2699 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2700
2701
2702 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2703 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2704 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2705 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2706 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2707 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2708 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2709
2710 page_exec_bit = _PAGE_EXEC_4U;
2711
2712 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2713 page_exec_bit);
2714 }
2715
sun4v_pgprot_init(void)2716 static void __init sun4v_pgprot_init(void)
2717 {
2718 unsigned long page_none, page_shared, page_copy, page_readonly;
2719 unsigned long page_exec_bit;
2720 int i;
2721
2722 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2723 page_cache4v_flag | _PAGE_P_4V |
2724 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2725 _PAGE_EXEC_4V);
2726 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2727
2728 _PAGE_IE = _PAGE_IE_4V;
2729 _PAGE_E = _PAGE_E_4V;
2730 _PAGE_CACHE = page_cache4v_flag;
2731
2732 #ifdef CONFIG_DEBUG_PAGEALLOC
2733 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2734 #else
2735 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2736 PAGE_OFFSET;
2737 #endif
2738 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2739 _PAGE_W_4V);
2740
2741 for (i = 1; i < 4; i++)
2742 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2743
2744 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2745 __ACCESS_BITS_4V | _PAGE_E_4V);
2746
2747 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2748 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2749 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2750 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2751
2752 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2753 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2754 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2755 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2756 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2757 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2758 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2759
2760 page_exec_bit = _PAGE_EXEC_4V;
2761
2762 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2763 page_exec_bit);
2764 }
2765
pte_sz_bits(unsigned long sz)2766 unsigned long pte_sz_bits(unsigned long sz)
2767 {
2768 if (tlb_type == hypervisor) {
2769 switch (sz) {
2770 case 8 * 1024:
2771 default:
2772 return _PAGE_SZ8K_4V;
2773 case 64 * 1024:
2774 return _PAGE_SZ64K_4V;
2775 case 512 * 1024:
2776 return _PAGE_SZ512K_4V;
2777 case 4 * 1024 * 1024:
2778 return _PAGE_SZ4MB_4V;
2779 }
2780 } else {
2781 switch (sz) {
2782 case 8 * 1024:
2783 default:
2784 return _PAGE_SZ8K_4U;
2785 case 64 * 1024:
2786 return _PAGE_SZ64K_4U;
2787 case 512 * 1024:
2788 return _PAGE_SZ512K_4U;
2789 case 4 * 1024 * 1024:
2790 return _PAGE_SZ4MB_4U;
2791 }
2792 }
2793 }
2794
mk_pte_io(unsigned long page,pgprot_t prot,int space,unsigned long page_size)2795 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2796 {
2797 pte_t pte;
2798
2799 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2800 pte_val(pte) |= (((unsigned long)space) << 32);
2801 pte_val(pte) |= pte_sz_bits(page_size);
2802
2803 return pte;
2804 }
2805
kern_large_tte(unsigned long paddr)2806 static unsigned long kern_large_tte(unsigned long paddr)
2807 {
2808 unsigned long val;
2809
2810 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2811 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2812 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2813 if (tlb_type == hypervisor)
2814 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2815 page_cache4v_flag | _PAGE_P_4V |
2816 _PAGE_EXEC_4V | _PAGE_W_4V);
2817
2818 return val | paddr;
2819 }
2820
2821 /* If not locked, zap it. */
__flush_tlb_all(void)2822 void __flush_tlb_all(void)
2823 {
2824 unsigned long pstate;
2825 int i;
2826
2827 __asm__ __volatile__("flushw\n\t"
2828 "rdpr %%pstate, %0\n\t"
2829 "wrpr %0, %1, %%pstate"
2830 : "=r" (pstate)
2831 : "i" (PSTATE_IE));
2832 if (tlb_type == hypervisor) {
2833 sun4v_mmu_demap_all();
2834 } else if (tlb_type == spitfire) {
2835 for (i = 0; i < 64; i++) {
2836 /* Spitfire Errata #32 workaround */
2837 /* NOTE: Always runs on spitfire, so no
2838 * cheetah+ page size encodings.
2839 */
2840 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2841 "flush %%g6"
2842 : /* No outputs */
2843 : "r" (0),
2844 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2845
2846 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2847 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2848 "membar #Sync"
2849 : /* no outputs */
2850 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2851 spitfire_put_dtlb_data(i, 0x0UL);
2852 }
2853
2854 /* Spitfire Errata #32 workaround */
2855 /* NOTE: Always runs on spitfire, so no
2856 * cheetah+ page size encodings.
2857 */
2858 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2859 "flush %%g6"
2860 : /* No outputs */
2861 : "r" (0),
2862 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2863
2864 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2865 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2866 "membar #Sync"
2867 : /* no outputs */
2868 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2869 spitfire_put_itlb_data(i, 0x0UL);
2870 }
2871 }
2872 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2873 cheetah_flush_dtlb_all();
2874 cheetah_flush_itlb_all();
2875 }
2876 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2877 : : "r" (pstate));
2878 }
2879
pte_alloc_one_kernel(struct mm_struct * mm)2880 pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
2881 {
2882 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2883 pte_t *pte = NULL;
2884
2885 if (page)
2886 pte = (pte_t *) page_address(page);
2887
2888 return pte;
2889 }
2890
pte_alloc_one(struct mm_struct * mm)2891 pgtable_t pte_alloc_one(struct mm_struct *mm)
2892 {
2893 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2894 if (!page)
2895 return NULL;
2896 if (!pgtable_pte_page_ctor(page)) {
2897 free_unref_page(page);
2898 return NULL;
2899 }
2900 return (pte_t *) page_address(page);
2901 }
2902
pte_free_kernel(struct mm_struct * mm,pte_t * pte)2903 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2904 {
2905 free_page((unsigned long)pte);
2906 }
2907
__pte_free(pgtable_t pte)2908 static void __pte_free(pgtable_t pte)
2909 {
2910 struct page *page = virt_to_page(pte);
2911
2912 pgtable_pte_page_dtor(page);
2913 __free_page(page);
2914 }
2915
pte_free(struct mm_struct * mm,pgtable_t pte)2916 void pte_free(struct mm_struct *mm, pgtable_t pte)
2917 {
2918 __pte_free(pte);
2919 }
2920
pgtable_free(void * table,bool is_page)2921 void pgtable_free(void *table, bool is_page)
2922 {
2923 if (is_page)
2924 __pte_free(table);
2925 else
2926 kmem_cache_free(pgtable_cache, table);
2927 }
2928
2929 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)2930 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2931 pmd_t *pmd)
2932 {
2933 unsigned long pte, flags;
2934 struct mm_struct *mm;
2935 pmd_t entry = *pmd;
2936
2937 if (!pmd_large(entry) || !pmd_young(entry))
2938 return;
2939
2940 pte = pmd_val(entry);
2941
2942 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2943 if (!(pte & _PAGE_VALID))
2944 return;
2945
2946 /* We are fabricating 8MB pages using 4MB real hw pages. */
2947 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2948
2949 mm = vma->vm_mm;
2950
2951 spin_lock_irqsave(&mm->context.lock, flags);
2952
2953 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2954 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2955 addr, pte);
2956
2957 spin_unlock_irqrestore(&mm->context.lock, flags);
2958 }
2959 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2960
2961 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
context_reload(void * __data)2962 static void context_reload(void *__data)
2963 {
2964 struct mm_struct *mm = __data;
2965
2966 if (mm == current->mm)
2967 load_secondary_context(mm);
2968 }
2969
hugetlb_setup(struct pt_regs * regs)2970 void hugetlb_setup(struct pt_regs *regs)
2971 {
2972 struct mm_struct *mm = current->mm;
2973 struct tsb_config *tp;
2974
2975 if (faulthandler_disabled() || !mm) {
2976 const struct exception_table_entry *entry;
2977
2978 entry = search_exception_tables(regs->tpc);
2979 if (entry) {
2980 regs->tpc = entry->fixup;
2981 regs->tnpc = regs->tpc + 4;
2982 return;
2983 }
2984 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2985 die_if_kernel("HugeTSB in atomic", regs);
2986 }
2987
2988 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2989 if (likely(tp->tsb == NULL))
2990 tsb_grow(mm, MM_TSB_HUGE, 0);
2991
2992 tsb_context_switch(mm);
2993 smp_tsb_sync(mm);
2994
2995 /* On UltraSPARC-III+ and later, configure the second half of
2996 * the Data-TLB for huge pages.
2997 */
2998 if (tlb_type == cheetah_plus) {
2999 bool need_context_reload = false;
3000 unsigned long ctx;
3001
3002 spin_lock_irq(&ctx_alloc_lock);
3003 ctx = mm->context.sparc64_ctx_val;
3004 ctx &= ~CTX_PGSZ_MASK;
3005 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
3006 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
3007
3008 if (ctx != mm->context.sparc64_ctx_val) {
3009 /* When changing the page size fields, we
3010 * must perform a context flush so that no
3011 * stale entries match. This flush must
3012 * occur with the original context register
3013 * settings.
3014 */
3015 do_flush_tlb_mm(mm);
3016
3017 /* Reload the context register of all processors
3018 * also executing in this address space.
3019 */
3020 mm->context.sparc64_ctx_val = ctx;
3021 need_context_reload = true;
3022 }
3023 spin_unlock_irq(&ctx_alloc_lock);
3024
3025 if (need_context_reload)
3026 on_each_cpu(context_reload, mm, 0);
3027 }
3028 }
3029 #endif
3030
3031 static struct resource code_resource = {
3032 .name = "Kernel code",
3033 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3034 };
3035
3036 static struct resource data_resource = {
3037 .name = "Kernel data",
3038 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3039 };
3040
3041 static struct resource bss_resource = {
3042 .name = "Kernel bss",
3043 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3044 };
3045
compute_kern_paddr(void * addr)3046 static inline resource_size_t compute_kern_paddr(void *addr)
3047 {
3048 return (resource_size_t) (addr - KERNBASE + kern_base);
3049 }
3050
kernel_lds_init(void)3051 static void __init kernel_lds_init(void)
3052 {
3053 code_resource.start = compute_kern_paddr(_text);
3054 code_resource.end = compute_kern_paddr(_etext - 1);
3055 data_resource.start = compute_kern_paddr(_etext);
3056 data_resource.end = compute_kern_paddr(_edata - 1);
3057 bss_resource.start = compute_kern_paddr(__bss_start);
3058 bss_resource.end = compute_kern_paddr(_end - 1);
3059 }
3060
report_memory(void)3061 static int __init report_memory(void)
3062 {
3063 int i;
3064 struct resource *res;
3065
3066 kernel_lds_init();
3067
3068 for (i = 0; i < pavail_ents; i++) {
3069 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3070
3071 if (!res) {
3072 pr_warn("Failed to allocate source.\n");
3073 break;
3074 }
3075
3076 res->name = "System RAM";
3077 res->start = pavail[i].phys_addr;
3078 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3079 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
3080
3081 if (insert_resource(&iomem_resource, res) < 0) {
3082 pr_warn("Resource insertion failed.\n");
3083 break;
3084 }
3085
3086 insert_resource(res, &code_resource);
3087 insert_resource(res, &data_resource);
3088 insert_resource(res, &bss_resource);
3089 }
3090
3091 return 0;
3092 }
3093 arch_initcall(report_memory);
3094
3095 #ifdef CONFIG_SMP
3096 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
3097 #else
3098 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
3099 #endif
3100
flush_tlb_kernel_range(unsigned long start,unsigned long end)3101 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3102 {
3103 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3104 if (start < LOW_OBP_ADDRESS) {
3105 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3106 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3107 }
3108 if (end > HI_OBP_ADDRESS) {
3109 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3110 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3111 }
3112 } else {
3113 flush_tsb_kernel_range(start, end);
3114 do_flush_tlb_kernel_range(start, end);
3115 }
3116 }
3117
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)3118 void copy_user_highpage(struct page *to, struct page *from,
3119 unsigned long vaddr, struct vm_area_struct *vma)
3120 {
3121 char *vfrom, *vto;
3122
3123 vfrom = kmap_atomic(from);
3124 vto = kmap_atomic(to);
3125 copy_user_page(vto, vfrom, vaddr, to);
3126 kunmap_atomic(vto);
3127 kunmap_atomic(vfrom);
3128
3129 /* If this page has ADI enabled, copy over any ADI tags
3130 * as well
3131 */
3132 if (vma->vm_flags & VM_SPARC_ADI) {
3133 unsigned long pfrom, pto, i, adi_tag;
3134
3135 pfrom = page_to_phys(from);
3136 pto = page_to_phys(to);
3137
3138 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3139 asm volatile("ldxa [%1] %2, %0\n\t"
3140 : "=r" (adi_tag)
3141 : "r" (i), "i" (ASI_MCD_REAL));
3142 asm volatile("stxa %0, [%1] %2\n\t"
3143 :
3144 : "r" (adi_tag), "r" (pto),
3145 "i" (ASI_MCD_REAL));
3146 pto += adi_blksize();
3147 }
3148 asm volatile("membar #Sync\n\t");
3149 }
3150 }
3151 EXPORT_SYMBOL(copy_user_highpage);
3152
copy_highpage(struct page * to,struct page * from)3153 void copy_highpage(struct page *to, struct page *from)
3154 {
3155 char *vfrom, *vto;
3156
3157 vfrom = kmap_atomic(from);
3158 vto = kmap_atomic(to);
3159 copy_page(vto, vfrom);
3160 kunmap_atomic(vto);
3161 kunmap_atomic(vfrom);
3162
3163 /* If this platform is ADI enabled, copy any ADI tags
3164 * as well
3165 */
3166 if (adi_capable()) {
3167 unsigned long pfrom, pto, i, adi_tag;
3168
3169 pfrom = page_to_phys(from);
3170 pto = page_to_phys(to);
3171
3172 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3173 asm volatile("ldxa [%1] %2, %0\n\t"
3174 : "=r" (adi_tag)
3175 : "r" (i), "i" (ASI_MCD_REAL));
3176 asm volatile("stxa %0, [%1] %2\n\t"
3177 :
3178 : "r" (adi_tag), "r" (pto),
3179 "i" (ASI_MCD_REAL));
3180 pto += adi_blksize();
3181 }
3182 asm volatile("membar #Sync\n\t");
3183 }
3184 }
3185 EXPORT_SYMBOL(copy_highpage);
3186