1 /*
2  * native hashtable management.
3  *
4  * SMP scalability work:
5  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #undef DEBUG_LOW
14 
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/processor.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 
22 #include <asm/machdep.h>
23 #include <asm/mmu.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/trace.h>
27 #include <asm/tlb.h>
28 #include <asm/cputable.h>
29 #include <asm/udbg.h>
30 #include <asm/kexec.h>
31 #include <asm/ppc-opcode.h>
32 #include <asm/feature-fixups.h>
33 
34 #include <misc/cxl-base.h>
35 
36 #ifdef DEBUG_LOW
37 #define DBG_LOW(fmt...) udbg_printf(fmt)
38 #else
39 #define DBG_LOW(fmt...)
40 #endif
41 
42 #ifdef __BIG_ENDIAN__
43 #define HPTE_LOCK_BIT 3
44 #else
45 #define HPTE_LOCK_BIT (56+3)
46 #endif
47 
48 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
49 
tlbiel_hash_set_isa206(unsigned int set,unsigned int is)50 static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
51 {
52 	unsigned long rb;
53 
54 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
55 
56 	asm volatile("tlbiel %0" : : "r" (rb));
57 }
58 
59 /*
60  * tlbiel instruction for hash, set invalidation
61  * i.e., r=1 and is=01 or is=10 or is=11
62  */
tlbiel_hash_set_isa300(unsigned int set,unsigned int is,unsigned int pid,unsigned int ric,unsigned int prs)63 static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
64 					unsigned int pid,
65 					unsigned int ric, unsigned int prs)
66 {
67 	unsigned long rb;
68 	unsigned long rs;
69 	unsigned int r = 0; /* hash format */
70 
71 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
72 	rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
73 
74 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
75 		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
76 		     : "memory");
77 }
78 
79 
tlbiel_all_isa206(unsigned int num_sets,unsigned int is)80 static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
81 {
82 	unsigned int set;
83 
84 	asm volatile("ptesync": : :"memory");
85 
86 	for (set = 0; set < num_sets; set++)
87 		tlbiel_hash_set_isa206(set, is);
88 
89 	asm volatile("ptesync": : :"memory");
90 }
91 
tlbiel_all_isa300(unsigned int num_sets,unsigned int is)92 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
93 {
94 	unsigned int set;
95 
96 	asm volatile("ptesync": : :"memory");
97 
98 	/*
99 	 * Flush the first set of the TLB, and any caching of partition table
100 	 * entries. Then flush the remaining sets of the TLB. Hash mode uses
101 	 * partition scoped TLB translations.
102 	 */
103 	tlbiel_hash_set_isa300(0, is, 0, 2, 0);
104 	for (set = 1; set < num_sets; set++)
105 		tlbiel_hash_set_isa300(set, is, 0, 0, 0);
106 
107 	/*
108 	 * Now invalidate the process table cache.
109 	 *
110 	 * From ISA v3.0B p. 1078:
111 	 *     The following forms are invalid.
112 	 *      * PRS=1, R=0, and RIC!=2 (The only process-scoped
113 	 *        HPT caching is of the Process Table.)
114 	 */
115 	tlbiel_hash_set_isa300(0, is, 0, 2, 1);
116 
117 	asm volatile("ptesync": : :"memory");
118 }
119 
hash__tlbiel_all(unsigned int action)120 void hash__tlbiel_all(unsigned int action)
121 {
122 	unsigned int is;
123 
124 	switch (action) {
125 	case TLB_INVAL_SCOPE_GLOBAL:
126 		is = 3;
127 		break;
128 	case TLB_INVAL_SCOPE_LPID:
129 		is = 2;
130 		break;
131 	default:
132 		BUG();
133 	}
134 
135 	if (early_cpu_has_feature(CPU_FTR_ARCH_300))
136 		tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
137 	else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
138 		tlbiel_all_isa206(POWER8_TLB_SETS, is);
139 	else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
140 		tlbiel_all_isa206(POWER7_TLB_SETS, is);
141 	else
142 		WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
143 
144 	asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
145 }
146 
___tlbie(unsigned long vpn,int psize,int apsize,int ssize)147 static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
148 						int apsize, int ssize)
149 {
150 	unsigned long va;
151 	unsigned int penc;
152 	unsigned long sllp;
153 
154 	/*
155 	 * We need 14 to 65 bits of va for a tlibe of 4K page
156 	 * With vpn we ignore the lower VPN_SHIFT bits already.
157 	 * And top two bits are already ignored because we can
158 	 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
159 	 * of 12.
160 	 */
161 	va = vpn << VPN_SHIFT;
162 	/*
163 	 * clear top 16 bits of 64bit va, non SLS segment
164 	 * Older versions of the architecture (2.02 and earler) require the
165 	 * masking of the top 16 bits.
166 	 */
167 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
168 		va &= ~(0xffffULL << 48);
169 
170 	switch (psize) {
171 	case MMU_PAGE_4K:
172 		/* clear out bits after (52) [0....52.....63] */
173 		va &= ~((1ul << (64 - 52)) - 1);
174 		va |= ssize << 8;
175 		sllp = get_sllp_encoding(apsize);
176 		va |= sllp << 5;
177 		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
178 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
179 			     : "memory");
180 		break;
181 	default:
182 		/* We need 14 to 14 + i bits of va */
183 		penc = mmu_psize_defs[psize].penc[apsize];
184 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
185 		va |= penc << 12;
186 		va |= ssize << 8;
187 		/*
188 		 * AVAL bits:
189 		 * We don't need all the bits, but rest of the bits
190 		 * must be ignored by the processor.
191 		 * vpn cover upto 65 bits of va. (0...65) and we need
192 		 * 58..64 bits of va.
193 		 */
194 		va |= (vpn & 0xfe); /* AVAL */
195 		va |= 1; /* L */
196 		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
197 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
198 			     : "memory");
199 		break;
200 	}
201 	return va;
202 }
203 
fixup_tlbie(unsigned long vpn,int psize,int apsize,int ssize)204 static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
205 {
206 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
207 		/* Need the extra ptesync to ensure we don't reorder tlbie*/
208 		asm volatile("ptesync": : :"memory");
209 		___tlbie(vpn, psize, apsize, ssize);
210 	}
211 }
212 
__tlbie(unsigned long vpn,int psize,int apsize,int ssize)213 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
214 {
215 	unsigned long rb;
216 
217 	rb = ___tlbie(vpn, psize, apsize, ssize);
218 	trace_tlbie(0, 0, rb, 0, 0, 0, 0);
219 }
220 
__tlbiel(unsigned long vpn,int psize,int apsize,int ssize)221 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
222 {
223 	unsigned long va;
224 	unsigned int penc;
225 	unsigned long sllp;
226 
227 	/* VPN_SHIFT can be atmost 12 */
228 	va = vpn << VPN_SHIFT;
229 	/*
230 	 * clear top 16 bits of 64 bit va, non SLS segment
231 	 * Older versions of the architecture (2.02 and earler) require the
232 	 * masking of the top 16 bits.
233 	 */
234 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
235 		va &= ~(0xffffULL << 48);
236 
237 	switch (psize) {
238 	case MMU_PAGE_4K:
239 		/* clear out bits after(52) [0....52.....63] */
240 		va &= ~((1ul << (64 - 52)) - 1);
241 		va |= ssize << 8;
242 		sllp = get_sllp_encoding(apsize);
243 		va |= sllp << 5;
244 		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
245 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
246 			     : "memory");
247 		break;
248 	default:
249 		/* We need 14 to 14 + i bits of va */
250 		penc = mmu_psize_defs[psize].penc[apsize];
251 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
252 		va |= penc << 12;
253 		va |= ssize << 8;
254 		/*
255 		 * AVAL bits:
256 		 * We don't need all the bits, but rest of the bits
257 		 * must be ignored by the processor.
258 		 * vpn cover upto 65 bits of va. (0...65) and we need
259 		 * 58..64 bits of va.
260 		 */
261 		va |= (vpn & 0xfe);
262 		va |= 1; /* L */
263 		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
264 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
265 			     : "memory");
266 		break;
267 	}
268 	trace_tlbie(0, 1, va, 0, 0, 0, 0);
269 
270 }
271 
tlbie(unsigned long vpn,int psize,int apsize,int ssize,int local)272 static inline void tlbie(unsigned long vpn, int psize, int apsize,
273 			 int ssize, int local)
274 {
275 	unsigned int use_local;
276 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
277 
278 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
279 
280 	if (use_local)
281 		use_local = mmu_psize_defs[psize].tlbiel;
282 	if (lock_tlbie && !use_local)
283 		raw_spin_lock(&native_tlbie_lock);
284 	asm volatile("ptesync": : :"memory");
285 	if (use_local) {
286 		__tlbiel(vpn, psize, apsize, ssize);
287 		asm volatile("ptesync": : :"memory");
288 	} else {
289 		__tlbie(vpn, psize, apsize, ssize);
290 		fixup_tlbie(vpn, psize, apsize, ssize);
291 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
292 	}
293 	if (lock_tlbie && !use_local)
294 		raw_spin_unlock(&native_tlbie_lock);
295 }
296 
native_lock_hpte(struct hash_pte * hptep)297 static inline void native_lock_hpte(struct hash_pte *hptep)
298 {
299 	unsigned long *word = (unsigned long *)&hptep->v;
300 
301 	while (1) {
302 		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
303 			break;
304 		spin_begin();
305 		while(test_bit(HPTE_LOCK_BIT, word))
306 			spin_cpu_relax();
307 		spin_end();
308 	}
309 }
310 
native_unlock_hpte(struct hash_pte * hptep)311 static inline void native_unlock_hpte(struct hash_pte *hptep)
312 {
313 	unsigned long *word = (unsigned long *)&hptep->v;
314 
315 	clear_bit_unlock(HPTE_LOCK_BIT, word);
316 }
317 
native_hpte_insert(unsigned long hpte_group,unsigned long vpn,unsigned long pa,unsigned long rflags,unsigned long vflags,int psize,int apsize,int ssize)318 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
319 			unsigned long pa, unsigned long rflags,
320 			unsigned long vflags, int psize, int apsize, int ssize)
321 {
322 	struct hash_pte *hptep = htab_address + hpte_group;
323 	unsigned long hpte_v, hpte_r;
324 	int i;
325 
326 	if (!(vflags & HPTE_V_BOLTED)) {
327 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
328 			" rflags=%lx, vflags=%lx, psize=%d)\n",
329 			hpte_group, vpn, pa, rflags, vflags, psize);
330 	}
331 
332 	for (i = 0; i < HPTES_PER_GROUP; i++) {
333 		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
334 			/* retry with lock held */
335 			native_lock_hpte(hptep);
336 			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
337 				break;
338 			native_unlock_hpte(hptep);
339 		}
340 
341 		hptep++;
342 	}
343 
344 	if (i == HPTES_PER_GROUP)
345 		return -1;
346 
347 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
348 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
349 
350 	if (!(vflags & HPTE_V_BOLTED)) {
351 		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
352 			i, hpte_v, hpte_r);
353 	}
354 
355 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
356 		hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
357 		hpte_v = hpte_old_to_new_v(hpte_v);
358 	}
359 
360 	hptep->r = cpu_to_be64(hpte_r);
361 	/* Guarantee the second dword is visible before the valid bit */
362 	eieio();
363 	/*
364 	 * Now set the first dword including the valid bit
365 	 * NOTE: this also unlocks the hpte
366 	 */
367 	hptep->v = cpu_to_be64(hpte_v);
368 
369 	__asm__ __volatile__ ("ptesync" : : : "memory");
370 
371 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
372 }
373 
native_hpte_remove(unsigned long hpte_group)374 static long native_hpte_remove(unsigned long hpte_group)
375 {
376 	struct hash_pte *hptep;
377 	int i;
378 	int slot_offset;
379 	unsigned long hpte_v;
380 
381 	DBG_LOW("    remove(group=%lx)\n", hpte_group);
382 
383 	/* pick a random entry to start at */
384 	slot_offset = mftb() & 0x7;
385 
386 	for (i = 0; i < HPTES_PER_GROUP; i++) {
387 		hptep = htab_address + hpte_group + slot_offset;
388 		hpte_v = be64_to_cpu(hptep->v);
389 
390 		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
391 			/* retry with lock held */
392 			native_lock_hpte(hptep);
393 			hpte_v = be64_to_cpu(hptep->v);
394 			if ((hpte_v & HPTE_V_VALID)
395 			    && !(hpte_v & HPTE_V_BOLTED))
396 				break;
397 			native_unlock_hpte(hptep);
398 		}
399 
400 		slot_offset++;
401 		slot_offset &= 0x7;
402 	}
403 
404 	if (i == HPTES_PER_GROUP)
405 		return -1;
406 
407 	/* Invalidate the hpte. NOTE: this also unlocks it */
408 	hptep->v = 0;
409 
410 	return i;
411 }
412 
native_hpte_updatepp(unsigned long slot,unsigned long newpp,unsigned long vpn,int bpsize,int apsize,int ssize,unsigned long flags)413 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
414 				 unsigned long vpn, int bpsize,
415 				 int apsize, int ssize, unsigned long flags)
416 {
417 	struct hash_pte *hptep = htab_address + slot;
418 	unsigned long hpte_v, want_v;
419 	int ret = 0, local = 0;
420 
421 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
422 
423 	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
424 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
425 
426 	hpte_v = hpte_get_old_v(hptep);
427 	/*
428 	 * We need to invalidate the TLB always because hpte_remove doesn't do
429 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
430 	 * random entry from it. When we do that we don't invalidate the TLB
431 	 * (hpte_remove) because we assume the old translation is still
432 	 * technically "valid".
433 	 */
434 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
435 		DBG_LOW(" -> miss\n");
436 		ret = -1;
437 	} else {
438 		native_lock_hpte(hptep);
439 		/* recheck with locks held */
440 		hpte_v = hpte_get_old_v(hptep);
441 		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
442 			     !(hpte_v & HPTE_V_VALID))) {
443 			ret = -1;
444 		} else {
445 			DBG_LOW(" -> hit\n");
446 			/* Update the HPTE */
447 			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
448 						~(HPTE_R_PPP | HPTE_R_N)) |
449 					       (newpp & (HPTE_R_PPP | HPTE_R_N |
450 							 HPTE_R_C)));
451 		}
452 		native_unlock_hpte(hptep);
453 	}
454 
455 	if (flags & HPTE_LOCAL_UPDATE)
456 		local = 1;
457 	/*
458 	 * Ensure it is out of the tlb too if it is not a nohpte fault
459 	 */
460 	if (!(flags & HPTE_NOHPTE_UPDATE))
461 		tlbie(vpn, bpsize, apsize, ssize, local);
462 
463 	return ret;
464 }
465 
native_hpte_find(unsigned long vpn,int psize,int ssize)466 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
467 {
468 	struct hash_pte *hptep;
469 	unsigned long hash;
470 	unsigned long i;
471 	long slot;
472 	unsigned long want_v, hpte_v;
473 
474 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
475 	want_v = hpte_encode_avpn(vpn, psize, ssize);
476 
477 	/* Bolted mappings are only ever in the primary group */
478 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
479 	for (i = 0; i < HPTES_PER_GROUP; i++) {
480 
481 		hptep = htab_address + slot;
482 		hpte_v = hpte_get_old_v(hptep);
483 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
484 			/* HPTE matches */
485 			return slot;
486 		++slot;
487 	}
488 
489 	return -1;
490 }
491 
492 /*
493  * Update the page protection bits. Intended to be used to create
494  * guard pages for kernel data structures on pages which are bolted
495  * in the HPT. Assumes pages being operated on will not be stolen.
496  *
497  * No need to lock here because we should be the only user.
498  */
native_hpte_updateboltedpp(unsigned long newpp,unsigned long ea,int psize,int ssize)499 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
500 				       int psize, int ssize)
501 {
502 	unsigned long vpn;
503 	unsigned long vsid;
504 	long slot;
505 	struct hash_pte *hptep;
506 
507 	vsid = get_kernel_vsid(ea, ssize);
508 	vpn = hpt_vpn(ea, vsid, ssize);
509 
510 	slot = native_hpte_find(vpn, psize, ssize);
511 	if (slot == -1)
512 		panic("could not find page to bolt\n");
513 	hptep = htab_address + slot;
514 
515 	/* Update the HPTE */
516 	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
517 				~(HPTE_R_PPP | HPTE_R_N)) |
518 			       (newpp & (HPTE_R_PPP | HPTE_R_N)));
519 	/*
520 	 * Ensure it is out of the tlb too. Bolted entries base and
521 	 * actual page size will be same.
522 	 */
523 	tlbie(vpn, psize, psize, ssize, 0);
524 }
525 
526 /*
527  * Remove a bolted kernel entry. Memory hotplug uses this.
528  *
529  * No need to lock here because we should be the only user.
530  */
native_hpte_removebolted(unsigned long ea,int psize,int ssize)531 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
532 {
533 	unsigned long vpn;
534 	unsigned long vsid;
535 	long slot;
536 	struct hash_pte *hptep;
537 
538 	vsid = get_kernel_vsid(ea, ssize);
539 	vpn = hpt_vpn(ea, vsid, ssize);
540 
541 	slot = native_hpte_find(vpn, psize, ssize);
542 	if (slot == -1)
543 		return -ENOENT;
544 
545 	hptep = htab_address + slot;
546 
547 	VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
548 
549 	/* Invalidate the hpte */
550 	hptep->v = 0;
551 
552 	/* Invalidate the TLB */
553 	tlbie(vpn, psize, psize, ssize, 0);
554 	return 0;
555 }
556 
557 
native_hpte_invalidate(unsigned long slot,unsigned long vpn,int bpsize,int apsize,int ssize,int local)558 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
559 				   int bpsize, int apsize, int ssize, int local)
560 {
561 	struct hash_pte *hptep = htab_address + slot;
562 	unsigned long hpte_v;
563 	unsigned long want_v;
564 	unsigned long flags;
565 
566 	local_irq_save(flags);
567 
568 	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
569 
570 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
571 	hpte_v = hpte_get_old_v(hptep);
572 
573 	if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
574 		native_lock_hpte(hptep);
575 		/* recheck with locks held */
576 		hpte_v = hpte_get_old_v(hptep);
577 
578 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
579 			/* Invalidate the hpte. NOTE: this also unlocks it */
580 			hptep->v = 0;
581 		else
582 			native_unlock_hpte(hptep);
583 	}
584 	/*
585 	 * We need to invalidate the TLB always because hpte_remove doesn't do
586 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
587 	 * random entry from it. When we do that we don't invalidate the TLB
588 	 * (hpte_remove) because we assume the old translation is still
589 	 * technically "valid".
590 	 */
591 	tlbie(vpn, bpsize, apsize, ssize, local);
592 
593 	local_irq_restore(flags);
594 }
595 
596 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
native_hugepage_invalidate(unsigned long vsid,unsigned long addr,unsigned char * hpte_slot_array,int psize,int ssize,int local)597 static void native_hugepage_invalidate(unsigned long vsid,
598 				       unsigned long addr,
599 				       unsigned char *hpte_slot_array,
600 				       int psize, int ssize, int local)
601 {
602 	int i;
603 	struct hash_pte *hptep;
604 	int actual_psize = MMU_PAGE_16M;
605 	unsigned int max_hpte_count, valid;
606 	unsigned long flags, s_addr = addr;
607 	unsigned long hpte_v, want_v, shift;
608 	unsigned long hidx, vpn = 0, hash, slot;
609 
610 	shift = mmu_psize_defs[psize].shift;
611 	max_hpte_count = 1U << (PMD_SHIFT - shift);
612 
613 	local_irq_save(flags);
614 	for (i = 0; i < max_hpte_count; i++) {
615 		valid = hpte_valid(hpte_slot_array, i);
616 		if (!valid)
617 			continue;
618 		hidx =  hpte_hash_index(hpte_slot_array, i);
619 
620 		/* get the vpn */
621 		addr = s_addr + (i * (1ul << shift));
622 		vpn = hpt_vpn(addr, vsid, ssize);
623 		hash = hpt_hash(vpn, shift, ssize);
624 		if (hidx & _PTEIDX_SECONDARY)
625 			hash = ~hash;
626 
627 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
628 		slot += hidx & _PTEIDX_GROUP_IX;
629 
630 		hptep = htab_address + slot;
631 		want_v = hpte_encode_avpn(vpn, psize, ssize);
632 		hpte_v = hpte_get_old_v(hptep);
633 
634 		/* Even if we miss, we need to invalidate the TLB */
635 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
636 			/* recheck with locks held */
637 			native_lock_hpte(hptep);
638 			hpte_v = hpte_get_old_v(hptep);
639 
640 			if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
641 				/*
642 				 * Invalidate the hpte. NOTE: this also unlocks it
643 				 */
644 
645 				hptep->v = 0;
646 			} else
647 				native_unlock_hpte(hptep);
648 		}
649 		/*
650 		 * We need to do tlb invalidate for all the address, tlbie
651 		 * instruction compares entry_VA in tlb with the VA specified
652 		 * here
653 		 */
654 		tlbie(vpn, psize, actual_psize, ssize, local);
655 	}
656 	local_irq_restore(flags);
657 }
658 #else
native_hugepage_invalidate(unsigned long vsid,unsigned long addr,unsigned char * hpte_slot_array,int psize,int ssize,int local)659 static void native_hugepage_invalidate(unsigned long vsid,
660 				       unsigned long addr,
661 				       unsigned char *hpte_slot_array,
662 				       int psize, int ssize, int local)
663 {
664 	WARN(1, "%s called without THP support\n", __func__);
665 }
666 #endif
667 
hpte_decode(struct hash_pte * hpte,unsigned long slot,int * psize,int * apsize,int * ssize,unsigned long * vpn)668 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
669 			int *psize, int *apsize, int *ssize, unsigned long *vpn)
670 {
671 	unsigned long avpn, pteg, vpi;
672 	unsigned long hpte_v = be64_to_cpu(hpte->v);
673 	unsigned long hpte_r = be64_to_cpu(hpte->r);
674 	unsigned long vsid, seg_off;
675 	int size, a_size, shift;
676 	/* Look at the 8 bit LP value */
677 	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
678 
679 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
680 		hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
681 		hpte_r = hpte_new_to_old_r(hpte_r);
682 	}
683 	if (!(hpte_v & HPTE_V_LARGE)) {
684 		size   = MMU_PAGE_4K;
685 		a_size = MMU_PAGE_4K;
686 	} else {
687 		size = hpte_page_sizes[lp] & 0xf;
688 		a_size = hpte_page_sizes[lp] >> 4;
689 	}
690 	/* This works for all page sizes, and for 256M and 1T segments */
691 	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
692 	shift = mmu_psize_defs[size].shift;
693 
694 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
695 	pteg = slot / HPTES_PER_GROUP;
696 	if (hpte_v & HPTE_V_SECONDARY)
697 		pteg = ~pteg;
698 
699 	switch (*ssize) {
700 	case MMU_SEGSIZE_256M:
701 		/* We only have 28 - 23 bits of seg_off in avpn */
702 		seg_off = (avpn & 0x1f) << 23;
703 		vsid    =  avpn >> 5;
704 		/* We can find more bits from the pteg value */
705 		if (shift < 23) {
706 			vpi = (vsid ^ pteg) & htab_hash_mask;
707 			seg_off |= vpi << shift;
708 		}
709 		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
710 		break;
711 	case MMU_SEGSIZE_1T:
712 		/* We only have 40 - 23 bits of seg_off in avpn */
713 		seg_off = (avpn & 0x1ffff) << 23;
714 		vsid    = avpn >> 17;
715 		if (shift < 23) {
716 			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
717 			seg_off |= vpi << shift;
718 		}
719 		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
720 		break;
721 	default:
722 		*vpn = size = 0;
723 	}
724 	*psize  = size;
725 	*apsize = a_size;
726 }
727 
728 /*
729  * clear all mappings on kexec.  All cpus are in real mode (or they will
730  * be when they isi), and we are the only one left.  We rely on our kernel
731  * mapping being 0xC0's and the hardware ignoring those two real bits.
732  *
733  * This must be called with interrupts disabled.
734  *
735  * Taking the native_tlbie_lock is unsafe here due to the possibility of
736  * lockdep being on. On pre POWER5 hardware, not taking the lock could
737  * cause deadlock. POWER5 and newer not taking the lock is fine. This only
738  * gets called during boot before secondary CPUs have come up and during
739  * crashdump and all bets are off anyway.
740  *
741  * TODO: add batching support when enabled.  remember, no dynamic memory here,
742  * although there is the control page available...
743  */
native_hpte_clear(void)744 static void native_hpte_clear(void)
745 {
746 	unsigned long vpn = 0;
747 	unsigned long slot, slots;
748 	struct hash_pte *hptep = htab_address;
749 	unsigned long hpte_v;
750 	unsigned long pteg_count;
751 	int psize, apsize, ssize;
752 
753 	pteg_count = htab_hash_mask + 1;
754 
755 	slots = pteg_count * HPTES_PER_GROUP;
756 
757 	for (slot = 0; slot < slots; slot++, hptep++) {
758 		/*
759 		 * we could lock the pte here, but we are the only cpu
760 		 * running,  right?  and for crash dump, we probably
761 		 * don't want to wait for a maybe bad cpu.
762 		 */
763 		hpte_v = be64_to_cpu(hptep->v);
764 
765 		/*
766 		 * Call __tlbie() here rather than tlbie() since we can't take the
767 		 * native_tlbie_lock.
768 		 */
769 		if (hpte_v & HPTE_V_VALID) {
770 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
771 			hptep->v = 0;
772 			___tlbie(vpn, psize, apsize, ssize);
773 		}
774 	}
775 
776 	asm volatile("eieio; tlbsync; ptesync":::"memory");
777 }
778 
779 /*
780  * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
781  * the lock all the time
782  */
native_flush_hash_range(unsigned long number,int local)783 static void native_flush_hash_range(unsigned long number, int local)
784 {
785 	unsigned long vpn = 0;
786 	unsigned long hash, index, hidx, shift, slot;
787 	struct hash_pte *hptep;
788 	unsigned long hpte_v;
789 	unsigned long want_v;
790 	unsigned long flags;
791 	real_pte_t pte;
792 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
793 	unsigned long psize = batch->psize;
794 	int ssize = batch->ssize;
795 	int i;
796 	unsigned int use_local;
797 
798 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
799 		mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
800 
801 	local_irq_save(flags);
802 
803 	for (i = 0; i < number; i++) {
804 		vpn = batch->vpn[i];
805 		pte = batch->pte[i];
806 
807 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
808 			hash = hpt_hash(vpn, shift, ssize);
809 			hidx = __rpte_to_hidx(pte, index);
810 			if (hidx & _PTEIDX_SECONDARY)
811 				hash = ~hash;
812 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
813 			slot += hidx & _PTEIDX_GROUP_IX;
814 			hptep = htab_address + slot;
815 			want_v = hpte_encode_avpn(vpn, psize, ssize);
816 			hpte_v = hpte_get_old_v(hptep);
817 
818 			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
819 				continue;
820 			/* lock and try again */
821 			native_lock_hpte(hptep);
822 			hpte_v = hpte_get_old_v(hptep);
823 
824 			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
825 				native_unlock_hpte(hptep);
826 			else
827 				hptep->v = 0;
828 
829 		} pte_iterate_hashed_end();
830 	}
831 
832 	if (use_local) {
833 		asm volatile("ptesync":::"memory");
834 		for (i = 0; i < number; i++) {
835 			vpn = batch->vpn[i];
836 			pte = batch->pte[i];
837 
838 			pte_iterate_hashed_subpages(pte, psize,
839 						    vpn, index, shift) {
840 				__tlbiel(vpn, psize, psize, ssize);
841 			} pte_iterate_hashed_end();
842 		}
843 		asm volatile("ptesync":::"memory");
844 	} else {
845 		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
846 
847 		if (lock_tlbie)
848 			raw_spin_lock(&native_tlbie_lock);
849 
850 		asm volatile("ptesync":::"memory");
851 		for (i = 0; i < number; i++) {
852 			vpn = batch->vpn[i];
853 			pte = batch->pte[i];
854 
855 			pte_iterate_hashed_subpages(pte, psize,
856 						    vpn, index, shift) {
857 				__tlbie(vpn, psize, psize, ssize);
858 			} pte_iterate_hashed_end();
859 		}
860 		/*
861 		 * Just do one more with the last used values.
862 		 */
863 		fixup_tlbie(vpn, psize, psize, ssize);
864 		asm volatile("eieio; tlbsync; ptesync":::"memory");
865 
866 		if (lock_tlbie)
867 			raw_spin_unlock(&native_tlbie_lock);
868 	}
869 
870 	local_irq_restore(flags);
871 }
872 
hpte_init_native(void)873 void __init hpte_init_native(void)
874 {
875 	mmu_hash_ops.hpte_invalidate	= native_hpte_invalidate;
876 	mmu_hash_ops.hpte_updatepp	= native_hpte_updatepp;
877 	mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
878 	mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
879 	mmu_hash_ops.hpte_insert	= native_hpte_insert;
880 	mmu_hash_ops.hpte_remove	= native_hpte_remove;
881 	mmu_hash_ops.hpte_clear_all	= native_hpte_clear;
882 	mmu_hash_ops.flush_hash_range = native_flush_hash_range;
883 	mmu_hash_ops.hugepage_invalidate   = native_hugepage_invalidate;
884 }
885