1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/hugetlb.h>
17 #include <linux/list.h>
18 #include <linux/stringify.h>
19 
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/book3s/64/mmu-hash.h>
23 #include <asm/mmu_context.h>
24 #include <asm/hvcall.h>
25 #include <asm/synch.h>
26 #include <asm/ppc-opcode.h>
27 #include <asm/kvm_host.h>
28 #include <asm/udbg.h>
29 #include <asm/iommu.h>
30 #include <asm/tce.h>
31 #include <asm/pte-walk.h>
32 
33 #ifdef CONFIG_BUG
34 
35 #define WARN_ON_ONCE_RM(condition)	({			\
36 	static bool __section(.data.unlikely) __warned;		\
37 	int __ret_warn_once = !!(condition);			\
38 								\
39 	if (unlikely(__ret_warn_once && !__warned)) {		\
40 		__warned = true;				\
41 		pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n",	\
42 				__stringify(condition),		\
43 				__func__, __LINE__);		\
44 		dump_stack();					\
45 	}							\
46 	unlikely(__ret_warn_once);				\
47 })
48 
49 #else
50 
51 #define WARN_ON_ONCE_RM(condition) ({				\
52 	int __ret_warn_on = !!(condition);			\
53 	unlikely(__ret_warn_on);				\
54 })
55 
56 #endif
57 
58 /*
59  * Finds a TCE table descriptor by LIOBN.
60  *
61  * WARNING: This will be called in real or virtual mode on HV KVM and virtual
62  *          mode on PR KVM
63  */
kvmppc_find_table(struct kvm * kvm,unsigned long liobn)64 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
65 		unsigned long liobn)
66 {
67 	struct kvmppc_spapr_tce_table *stt;
68 
69 	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
70 		if (stt->liobn == liobn)
71 			return stt;
72 
73 	return NULL;
74 }
75 EXPORT_SYMBOL_GPL(kvmppc_find_table);
76 
77 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvmppc_rm_tce_to_ua(struct kvm * kvm,unsigned long tce,unsigned long * ua,unsigned long ** prmap)78 static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
79 		unsigned long *ua, unsigned long **prmap)
80 {
81 	unsigned long gfn = tce >> PAGE_SHIFT;
82 	struct kvm_memory_slot *memslot;
83 
84 	memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
85 	if (!memslot)
86 		return -EINVAL;
87 
88 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
89 		(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
90 
91 	if (prmap)
92 		*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
93 
94 	return 0;
95 }
96 
97 /*
98  * Validates TCE address.
99  * At the moment flags and page mask are validated.
100  * As the host kernel does not access those addresses (just puts them
101  * to the table and user space is supposed to process them), we can skip
102  * checking other things (such as TCE is a guest RAM address or the page
103  * was actually allocated).
104  */
kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)105 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
106 		unsigned long tce)
107 {
108 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
109 	enum dma_data_direction dir = iommu_tce_direction(tce);
110 	struct kvmppc_spapr_tce_iommu_table *stit;
111 	unsigned long ua = 0;
112 
113 	/* Allow userspace to poison TCE table */
114 	if (dir == DMA_NONE)
115 		return H_SUCCESS;
116 
117 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
118 		return H_PARAMETER;
119 
120 	if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
121 		return H_TOO_HARD;
122 
123 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
124 		unsigned long hpa = 0;
125 		struct mm_iommu_table_group_mem_t *mem;
126 		long shift = stit->tbl->it_page_shift;
127 
128 		mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
129 		if (!mem)
130 			return H_TOO_HARD;
131 
132 		if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
133 			return H_TOO_HARD;
134 	}
135 
136 	return H_SUCCESS;
137 }
138 
139 /* Note on the use of page_address() in real mode,
140  *
141  * It is safe to use page_address() in real mode on ppc64 because
142  * page_address() is always defined as lowmem_page_address()
143  * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
144  * operation and does not access page struct.
145  *
146  * Theoretically page_address() could be defined different
147  * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
148  * would have to be enabled.
149  * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
150  * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
151  * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
152  * is not expected to be enabled on ppc32, page_address()
153  * is safe for ppc32 as well.
154  *
155  * WARNING: This will be called in real-mode on HV KVM and virtual
156  *          mode on PR KVM
157  */
kvmppc_page_address(struct page * page)158 static u64 *kvmppc_page_address(struct page *page)
159 {
160 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
161 #error TODO: fix to avoid page_address() here
162 #endif
163 	return (u64 *) page_address(page);
164 }
165 
166 /*
167  * Handles TCE requests for emulated devices.
168  * Puts guest TCE values to the table and expects user space to convert them.
169  * Cannot fail so kvmppc_rm_tce_validate must be called before it.
170  */
kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)171 static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
172 		unsigned long idx, unsigned long tce)
173 {
174 	struct page *page;
175 	u64 *tbl;
176 
177 	idx -= stt->offset;
178 	page = stt->pages[idx / TCES_PER_PAGE];
179 	/*
180 	 * page must not be NULL in real mode,
181 	 * kvmppc_rm_ioba_validate() must have taken care of this.
182 	 */
183 	WARN_ON_ONCE_RM(!page);
184 	tbl = kvmppc_page_address(page);
185 
186 	tbl[idx % TCES_PER_PAGE] = tce;
187 }
188 
189 /*
190  * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
191  * in real mode.
192  * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
193  * allocated or not required (when clearing a tce entry).
194  */
kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table * stt,unsigned long ioba,unsigned long npages,bool clearing)195 static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
196 		unsigned long ioba, unsigned long npages, bool clearing)
197 {
198 	unsigned long i, idx, sttpage, sttpages;
199 	unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
200 
201 	if (ret)
202 		return ret;
203 	/*
204 	 * clearing==true says kvmppc_rm_tce_put won't be allocating pages
205 	 * for empty tces.
206 	 */
207 	if (clearing)
208 		return H_SUCCESS;
209 
210 	idx = (ioba >> stt->page_shift) - stt->offset;
211 	sttpage = idx / TCES_PER_PAGE;
212 	sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
213 			TCES_PER_PAGE;
214 	for (i = sttpage; i < sttpage + sttpages; ++i)
215 		if (!stt->pages[i])
216 			return H_TOO_HARD;
217 
218 	return H_SUCCESS;
219 }
220 
iommu_tce_xchg_no_kill_rm(struct mm_struct * mm,struct iommu_table * tbl,unsigned long entry,unsigned long * hpa,enum dma_data_direction * direction)221 static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
222 		struct iommu_table *tbl,
223 		unsigned long entry, unsigned long *hpa,
224 		enum dma_data_direction *direction)
225 {
226 	long ret;
227 
228 	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
229 
230 	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
231 				(*direction == DMA_BIDIRECTIONAL))) {
232 		__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
233 		/*
234 		 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
235 		 * calling this so we still get here a valid UA.
236 		 */
237 		if (pua && *pua)
238 			mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
239 	}
240 
241 	return ret;
242 }
243 
iommu_tce_kill_rm(struct iommu_table * tbl,unsigned long entry,unsigned long pages)244 extern void iommu_tce_kill_rm(struct iommu_table *tbl,
245 		unsigned long entry, unsigned long pages)
246 {
247 	if (tbl->it_ops->tce_kill)
248 		tbl->it_ops->tce_kill(tbl, entry, pages, true);
249 }
250 
kvmppc_rm_clear_tce(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)251 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
252 		unsigned long entry)
253 {
254 	unsigned long hpa = 0;
255 	enum dma_data_direction dir = DMA_NONE;
256 
257 	iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
258 }
259 
kvmppc_rm_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)260 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
261 		struct iommu_table *tbl, unsigned long entry)
262 {
263 	struct mm_iommu_table_group_mem_t *mem = NULL;
264 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
265 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
266 
267 	if (!pua)
268 		/* it_userspace allocation might be delayed */
269 		return H_TOO_HARD;
270 
271 	mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
272 	if (!mem)
273 		return H_TOO_HARD;
274 
275 	mm_iommu_mapped_dec(mem);
276 
277 	*pua = cpu_to_be64(0);
278 
279 	return H_SUCCESS;
280 }
281 
kvmppc_rm_tce_iommu_do_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)282 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
283 		struct iommu_table *tbl, unsigned long entry)
284 {
285 	enum dma_data_direction dir = DMA_NONE;
286 	unsigned long hpa = 0;
287 	long ret;
288 
289 	if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
290 		/*
291 		 * real mode xchg can fail if struct page crosses
292 		 * a page boundary
293 		 */
294 		return H_TOO_HARD;
295 
296 	if (dir == DMA_NONE)
297 		return H_SUCCESS;
298 
299 	ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
300 	if (ret)
301 		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
302 
303 	return ret;
304 }
305 
kvmppc_rm_tce_iommu_unmap(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)306 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
307 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
308 		unsigned long entry)
309 {
310 	unsigned long i, ret = H_SUCCESS;
311 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
312 	unsigned long io_entry = entry * subpages;
313 
314 	for (i = 0; i < subpages; ++i) {
315 		ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
316 		if (ret != H_SUCCESS)
317 			break;
318 	}
319 
320 	return ret;
321 }
322 
kvmppc_rm_tce_iommu_do_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)323 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
324 		unsigned long entry, unsigned long ua,
325 		enum dma_data_direction dir)
326 {
327 	long ret;
328 	unsigned long hpa = 0;
329 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
330 	struct mm_iommu_table_group_mem_t *mem;
331 
332 	if (!pua)
333 		/* it_userspace allocation might be delayed */
334 		return H_TOO_HARD;
335 
336 	mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
337 	if (!mem)
338 		return H_TOO_HARD;
339 
340 	if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
341 			&hpa)))
342 		return H_TOO_HARD;
343 
344 	if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
345 		return H_TOO_HARD;
346 
347 	ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
348 	if (ret) {
349 		mm_iommu_mapped_dec(mem);
350 		/*
351 		 * real mode xchg can fail if struct page crosses
352 		 * a page boundary
353 		 */
354 		return H_TOO_HARD;
355 	}
356 
357 	if (dir != DMA_NONE)
358 		kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
359 
360 	*pua = cpu_to_be64(ua);
361 
362 	return 0;
363 }
364 
kvmppc_rm_tce_iommu_map(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)365 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
366 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
367 		unsigned long entry, unsigned long ua,
368 		enum dma_data_direction dir)
369 {
370 	unsigned long i, pgoff, ret = H_SUCCESS;
371 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
372 	unsigned long io_entry = entry * subpages;
373 
374 	for (i = 0, pgoff = 0; i < subpages;
375 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
376 
377 		ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
378 				io_entry + i, ua + pgoff, dir);
379 		if (ret != H_SUCCESS)
380 			break;
381 	}
382 
383 	return ret;
384 }
385 
kvmppc_rm_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)386 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
387 		unsigned long ioba, unsigned long tce)
388 {
389 	struct kvmppc_spapr_tce_table *stt;
390 	long ret;
391 	struct kvmppc_spapr_tce_iommu_table *stit;
392 	unsigned long entry, ua = 0;
393 	enum dma_data_direction dir;
394 
395 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
396 	/* 	    liobn, ioba, tce); */
397 
398 	/* For radix, we might be in virtual mode, so punt */
399 	if (kvm_is_radix(vcpu->kvm))
400 		return H_TOO_HARD;
401 
402 	stt = kvmppc_find_table(vcpu->kvm, liobn);
403 	if (!stt)
404 		return H_TOO_HARD;
405 
406 	ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
407 	if (ret != H_SUCCESS)
408 		return ret;
409 
410 	ret = kvmppc_rm_tce_validate(stt, tce);
411 	if (ret != H_SUCCESS)
412 		return ret;
413 
414 	dir = iommu_tce_direction(tce);
415 	if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
416 		return H_PARAMETER;
417 
418 	entry = ioba >> stt->page_shift;
419 
420 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
421 		if (dir == DMA_NONE)
422 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
423 					stit->tbl, entry);
424 		else
425 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
426 					stit->tbl, entry, ua, dir);
427 
428 		iommu_tce_kill_rm(stit->tbl, entry, 1);
429 
430 		if (ret != H_SUCCESS) {
431 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
432 			return ret;
433 		}
434 	}
435 
436 	kvmppc_rm_tce_put(stt, entry, tce);
437 
438 	return H_SUCCESS;
439 }
440 
kvmppc_rm_ua_to_hpa(struct kvm_vcpu * vcpu,unsigned long ua,unsigned long * phpa)441 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
442 		unsigned long ua, unsigned long *phpa)
443 {
444 	pte_t *ptep, pte;
445 	unsigned shift = 0;
446 
447 	/*
448 	 * Called in real mode with MSR_EE = 0. We are safe here.
449 	 * It is ok to do the lookup with arch.pgdir here, because
450 	 * we are doing this on secondary cpus and current task there
451 	 * is not the hypervisor. Also this is safe against THP in the
452 	 * host, because an IPI to primary thread will wait for the secondary
453 	 * to exit which will agains result in the below page table walk
454 	 * to finish.
455 	 */
456 	ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
457 	if (!ptep || !pte_present(*ptep))
458 		return -ENXIO;
459 	pte = *ptep;
460 
461 	if (!shift)
462 		shift = PAGE_SHIFT;
463 
464 	/* Avoid handling anything potentially complicated in realmode */
465 	if (shift > PAGE_SHIFT)
466 		return -EAGAIN;
467 
468 	if (!pte_young(pte))
469 		return -EAGAIN;
470 
471 	*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
472 			(ua & ~PAGE_MASK);
473 
474 	return 0;
475 }
476 
kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)477 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
478 		unsigned long liobn, unsigned long ioba,
479 		unsigned long tce_list,	unsigned long npages)
480 {
481 	struct kvmppc_spapr_tce_table *stt;
482 	long i, ret = H_SUCCESS;
483 	unsigned long tces, entry, ua = 0;
484 	unsigned long *rmap = NULL;
485 	bool prereg = false;
486 	struct kvmppc_spapr_tce_iommu_table *stit;
487 
488 	/* For radix, we might be in virtual mode, so punt */
489 	if (kvm_is_radix(vcpu->kvm))
490 		return H_TOO_HARD;
491 
492 	stt = kvmppc_find_table(vcpu->kvm, liobn);
493 	if (!stt)
494 		return H_TOO_HARD;
495 
496 	entry = ioba >> stt->page_shift;
497 	/*
498 	 * The spec says that the maximum size of the list is 512 TCEs
499 	 * so the whole table addressed resides in 4K page
500 	 */
501 	if (npages > 512)
502 		return H_PARAMETER;
503 
504 	if (tce_list & (SZ_4K - 1))
505 		return H_PARAMETER;
506 
507 	ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
508 	if (ret != H_SUCCESS)
509 		return ret;
510 
511 	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
512 		/*
513 		 * We get here if guest memory was pre-registered which
514 		 * is normally VFIO case and gpa->hpa translation does not
515 		 * depend on hpt.
516 		 */
517 		struct mm_iommu_table_group_mem_t *mem;
518 
519 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
520 			return H_TOO_HARD;
521 
522 		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
523 		if (mem)
524 			prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
525 					IOMMU_PAGE_SHIFT_4K, &tces) == 0;
526 	}
527 
528 	if (!prereg) {
529 		/*
530 		 * This is usually a case of a guest with emulated devices only
531 		 * when TCE list is not in preregistered memory.
532 		 * We do not require memory to be preregistered in this case
533 		 * so lock rmap and do __find_linux_pte_or_hugepte().
534 		 */
535 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
536 			return H_TOO_HARD;
537 
538 		rmap = (void *) vmalloc_to_phys(rmap);
539 		if (WARN_ON_ONCE_RM(!rmap))
540 			return H_TOO_HARD;
541 
542 		/*
543 		 * Synchronize with the MMU notifier callbacks in
544 		 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
545 		 * While we have the rmap lock, code running on other CPUs
546 		 * cannot finish unmapping the host real page that backs
547 		 * this guest real page, so we are OK to access the host
548 		 * real page.
549 		 */
550 		lock_rmap(rmap);
551 		if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
552 			ret = H_TOO_HARD;
553 			goto unlock_exit;
554 		}
555 	}
556 
557 	for (i = 0; i < npages; ++i) {
558 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
559 
560 		ret = kvmppc_rm_tce_validate(stt, tce);
561 		if (ret != H_SUCCESS)
562 			goto unlock_exit;
563 	}
564 
565 	for (i = 0; i < npages; ++i) {
566 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
567 
568 		ua = 0;
569 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
570 			ret = H_PARAMETER;
571 			goto invalidate_exit;
572 		}
573 
574 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
575 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
576 					stit->tbl, entry + i, ua,
577 					iommu_tce_direction(tce));
578 
579 			if (ret != H_SUCCESS) {
580 				kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
581 						entry);
582 				goto invalidate_exit;
583 			}
584 		}
585 
586 		kvmppc_rm_tce_put(stt, entry + i, tce);
587 	}
588 
589 invalidate_exit:
590 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
591 		iommu_tce_kill_rm(stit->tbl, entry, npages);
592 
593 unlock_exit:
594 	if (rmap)
595 		unlock_rmap(rmap);
596 
597 	return ret;
598 }
599 
kvmppc_rm_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)600 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
601 		unsigned long liobn, unsigned long ioba,
602 		unsigned long tce_value, unsigned long npages)
603 {
604 	struct kvmppc_spapr_tce_table *stt;
605 	long i, ret;
606 	struct kvmppc_spapr_tce_iommu_table *stit;
607 
608 	/* For radix, we might be in virtual mode, so punt */
609 	if (kvm_is_radix(vcpu->kvm))
610 		return H_TOO_HARD;
611 
612 	stt = kvmppc_find_table(vcpu->kvm, liobn);
613 	if (!stt)
614 		return H_TOO_HARD;
615 
616 	ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
617 	if (ret != H_SUCCESS)
618 		return ret;
619 
620 	/* Check permission bits only to allow userspace poison TCE for debug */
621 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
622 		return H_PARAMETER;
623 
624 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
625 		unsigned long entry = ioba >> stt->page_shift;
626 
627 		for (i = 0; i < npages; ++i) {
628 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
629 					stit->tbl, entry + i);
630 
631 			if (ret == H_SUCCESS)
632 				continue;
633 
634 			if (ret == H_TOO_HARD)
635 				goto invalidate_exit;
636 
637 			WARN_ON_ONCE_RM(1);
638 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
639 		}
640 	}
641 
642 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
643 		kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
644 
645 invalidate_exit:
646 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
647 		iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
648 
649 	return ret;
650 }
651 
652 /* This can be called in either virtual mode or real mode */
kvmppc_h_get_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba)653 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
654 		      unsigned long ioba)
655 {
656 	struct kvmppc_spapr_tce_table *stt;
657 	long ret;
658 	unsigned long idx;
659 	struct page *page;
660 	u64 *tbl;
661 
662 	stt = kvmppc_find_table(vcpu->kvm, liobn);
663 	if (!stt)
664 		return H_TOO_HARD;
665 
666 	ret = kvmppc_ioba_validate(stt, ioba, 1);
667 	if (ret != H_SUCCESS)
668 		return ret;
669 
670 	idx = (ioba >> stt->page_shift) - stt->offset;
671 	page = stt->pages[idx / TCES_PER_PAGE];
672 	if (!page) {
673 		vcpu->arch.regs.gpr[4] = 0;
674 		return H_SUCCESS;
675 	}
676 	tbl = (u64 *)page_address(page);
677 
678 	vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
679 
680 	return H_SUCCESS;
681 }
682 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
683 
684 #endif /* KVM_BOOK3S_HV_POSSIBLE */
685