1 /*
2 * High memory support for Xtensa architecture
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Copyright (C) 2014 Cadence Design Systems Inc.
9 */
10
11 #include <linux/export.h>
12 #include <linux/highmem.h>
13 #include <asm/tlbflush.h>
14
15 static pte_t *kmap_pte;
16
17 #if DCACHE_WAY_SIZE > PAGE_SIZE
18 unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
19 wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
20
kmap_waitqueues_init(void)21 static void __init kmap_waitqueues_init(void)
22 {
23 unsigned int i;
24
25 for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
26 init_waitqueue_head(pkmap_map_wait_arr + i);
27 }
28 #else
kmap_waitqueues_init(void)29 static inline void kmap_waitqueues_init(void)
30 {
31 }
32 #endif
33
kmap_idx(int type,unsigned long color)34 static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
35 {
36 return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
37 color;
38 }
39
kmap_atomic_high_prot(struct page * page,pgprot_t prot)40 void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
41 {
42 enum fixed_addresses idx;
43 unsigned long vaddr;
44
45 idx = kmap_idx(kmap_atomic_idx_push(),
46 DCACHE_ALIAS(page_to_phys(page)));
47 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48 #ifdef CONFIG_DEBUG_HIGHMEM
49 BUG_ON(!pte_none(*(kmap_pte + idx)));
50 #endif
51 set_pte(kmap_pte + idx, mk_pte(page, prot));
52
53 return (void *)vaddr;
54 }
55 EXPORT_SYMBOL(kmap_atomic_high_prot);
56
kunmap_atomic_high(void * kvaddr)57 void kunmap_atomic_high(void *kvaddr)
58 {
59 if (kvaddr >= (void *)FIXADDR_START &&
60 kvaddr < (void *)FIXADDR_TOP) {
61 int idx = kmap_idx(kmap_atomic_idx(),
62 DCACHE_ALIAS((unsigned long)kvaddr));
63
64 /*
65 * Force other mappings to Oops if they'll try to access this
66 * pte without first remap it. Keeping stale mappings around
67 * is a bad idea also, in case the page changes cacheability
68 * attributes or becomes a protected page in a hypervisor.
69 */
70 pte_clear(&init_mm, kvaddr, kmap_pte + idx);
71 local_flush_tlb_kernel_range((unsigned long)kvaddr,
72 (unsigned long)kvaddr + PAGE_SIZE);
73
74 kmap_atomic_idx_pop();
75 }
76 }
77 EXPORT_SYMBOL(kunmap_atomic_high);
78
kmap_init(void)79 void __init kmap_init(void)
80 {
81 unsigned long kmap_vstart;
82
83 /* Check if this memory layout is broken because PKMAP overlaps
84 * page table.
85 */
86 BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
87 /* cache the first kmap pte */
88 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
89 kmap_pte = virt_to_kpte(kmap_vstart);
90 kmap_waitqueues_init();
91 }
92