1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18  */
19 
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
29 #include <linux/stringify.h>
30 
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
39 #include <asm/udbg.h>
40 #include <asm/iommu.h>
41 #include <asm/tce.h>
42 #include <asm/pte-walk.h>
43 
44 #ifdef CONFIG_BUG
45 
46 #define WARN_ON_ONCE_RM(condition)	({			\
47 	static bool __section(.data.unlikely) __warned;		\
48 	int __ret_warn_once = !!(condition);			\
49 								\
50 	if (unlikely(__ret_warn_once && !__warned)) {		\
51 		__warned = true;				\
52 		pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n",	\
53 				__stringify(condition),		\
54 				__func__, __LINE__);		\
55 		dump_stack();					\
56 	}							\
57 	unlikely(__ret_warn_once);				\
58 })
59 
60 #else
61 
62 #define WARN_ON_ONCE_RM(condition) ({				\
63 	int __ret_warn_on = !!(condition);			\
64 	unlikely(__ret_warn_on);				\
65 })
66 
67 #endif
68 
69 #define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))
70 
71 /*
72  * Finds a TCE table descriptor by LIOBN.
73  *
74  * WARNING: This will be called in real or virtual mode on HV KVM and virtual
75  *          mode on PR KVM
76  */
kvmppc_find_table(struct kvm * kvm,unsigned long liobn)77 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
78 		unsigned long liobn)
79 {
80 	struct kvmppc_spapr_tce_table *stt;
81 
82 	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
83 		if (stt->liobn == liobn)
84 			return stt;
85 
86 	return NULL;
87 }
88 EXPORT_SYMBOL_GPL(kvmppc_find_table);
89 
90 /*
91  * Validates TCE address.
92  * At the moment flags and page mask are validated.
93  * As the host kernel does not access those addresses (just puts them
94  * to the table and user space is supposed to process them), we can skip
95  * checking other things (such as TCE is a guest RAM address or the page
96  * was actually allocated).
97  *
98  * WARNING: This will be called in real-mode on HV KVM and virtual
99  *          mode on PR KVM
100  */
kvmppc_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)101 long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
102 {
103 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
104 	enum dma_data_direction dir = iommu_tce_direction(tce);
105 
106 	/* Allow userspace to poison TCE table */
107 	if (dir == DMA_NONE)
108 		return H_SUCCESS;
109 
110 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
111 		return H_PARAMETER;
112 
113 	return H_SUCCESS;
114 }
115 EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
116 
117 /* Note on the use of page_address() in real mode,
118  *
119  * It is safe to use page_address() in real mode on ppc64 because
120  * page_address() is always defined as lowmem_page_address()
121  * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
122  * operation and does not access page struct.
123  *
124  * Theoretically page_address() could be defined different
125  * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
126  * would have to be enabled.
127  * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
128  * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
129  * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
130  * is not expected to be enabled on ppc32, page_address()
131  * is safe for ppc32 as well.
132  *
133  * WARNING: This will be called in real-mode on HV KVM and virtual
134  *          mode on PR KVM
135  */
kvmppc_page_address(struct page * page)136 static u64 *kvmppc_page_address(struct page *page)
137 {
138 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
139 #error TODO: fix to avoid page_address() here
140 #endif
141 	return (u64 *) page_address(page);
142 }
143 
144 /*
145  * Handles TCE requests for emulated devices.
146  * Puts guest TCE values to the table and expects user space to convert them.
147  * Called in both real and virtual modes.
148  * Cannot fail so kvmppc_tce_validate must be called before it.
149  *
150  * WARNING: This will be called in real-mode on HV KVM and virtual
151  *          mode on PR KVM
152  */
kvmppc_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)153 void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
154 		unsigned long idx, unsigned long tce)
155 {
156 	struct page *page;
157 	u64 *tbl;
158 
159 	idx -= stt->offset;
160 	page = stt->pages[idx / TCES_PER_PAGE];
161 	tbl = kvmppc_page_address(page);
162 
163 	tbl[idx % TCES_PER_PAGE] = tce;
164 }
165 EXPORT_SYMBOL_GPL(kvmppc_tce_put);
166 
kvmppc_gpa_to_ua(struct kvm * kvm,unsigned long gpa,unsigned long * ua,unsigned long ** prmap)167 long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
168 		unsigned long *ua, unsigned long **prmap)
169 {
170 	unsigned long gfn = gpa >> PAGE_SHIFT;
171 	struct kvm_memory_slot *memslot;
172 
173 	memslot = search_memslots(kvm_memslots(kvm), gfn);
174 	if (!memslot)
175 		return -EINVAL;
176 
177 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
178 		(gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
179 
180 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
181 	if (prmap)
182 		*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
183 #endif
184 
185 	return 0;
186 }
187 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188 
189 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
iommu_tce_xchg_rm(struct mm_struct * mm,struct iommu_table * tbl,unsigned long entry,unsigned long * hpa,enum dma_data_direction * direction)190 static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
191 		unsigned long entry, unsigned long *hpa,
192 		enum dma_data_direction *direction)
193 {
194 	long ret;
195 
196 	ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
197 
198 	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
199 				(*direction == DMA_BIDIRECTIONAL))) {
200 		__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
201 		/*
202 		 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
203 		 * calling this so we still get here a valid UA.
204 		 */
205 		if (pua && *pua)
206 			mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
207 	}
208 
209 	return ret;
210 }
211 
kvmppc_rm_clear_tce(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)212 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
213 		unsigned long entry)
214 {
215 	unsigned long hpa = 0;
216 	enum dma_data_direction dir = DMA_NONE;
217 
218 	iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
219 }
220 
kvmppc_rm_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)221 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
222 		struct iommu_table *tbl, unsigned long entry)
223 {
224 	struct mm_iommu_table_group_mem_t *mem = NULL;
225 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
226 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
227 
228 	if (!pua)
229 		/* it_userspace allocation might be delayed */
230 		return H_TOO_HARD;
231 
232 	mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
233 	if (!mem)
234 		return H_TOO_HARD;
235 
236 	mm_iommu_mapped_dec(mem);
237 
238 	*pua = cpu_to_be64(0);
239 
240 	return H_SUCCESS;
241 }
242 
kvmppc_rm_tce_iommu_do_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)243 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
244 		struct iommu_table *tbl, unsigned long entry)
245 {
246 	enum dma_data_direction dir = DMA_NONE;
247 	unsigned long hpa = 0;
248 	long ret;
249 
250 	if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
251 		/*
252 		 * real mode xchg can fail if struct page crosses
253 		 * a page boundary
254 		 */
255 		return H_TOO_HARD;
256 
257 	if (dir == DMA_NONE)
258 		return H_SUCCESS;
259 
260 	ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
261 	if (ret)
262 		iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
263 
264 	return ret;
265 }
266 
kvmppc_rm_tce_iommu_unmap(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)267 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
268 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
269 		unsigned long entry)
270 {
271 	unsigned long i, ret = H_SUCCESS;
272 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
273 	unsigned long io_entry = entry * subpages;
274 
275 	for (i = 0; i < subpages; ++i) {
276 		ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
277 		if (ret != H_SUCCESS)
278 			break;
279 	}
280 
281 	return ret;
282 }
283 
kvmppc_rm_tce_iommu_do_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)284 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
285 		unsigned long entry, unsigned long ua,
286 		enum dma_data_direction dir)
287 {
288 	long ret;
289 	unsigned long hpa = 0;
290 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
291 	struct mm_iommu_table_group_mem_t *mem;
292 
293 	if (!pua)
294 		/* it_userspace allocation might be delayed */
295 		return H_TOO_HARD;
296 
297 	mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
298 	if (!mem)
299 		return H_TOO_HARD;
300 
301 	if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
302 			&hpa)))
303 		return H_HARDWARE;
304 
305 	if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
306 		return H_CLOSED;
307 
308 	ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
309 	if (ret) {
310 		mm_iommu_mapped_dec(mem);
311 		/*
312 		 * real mode xchg can fail if struct page crosses
313 		 * a page boundary
314 		 */
315 		return H_TOO_HARD;
316 	}
317 
318 	if (dir != DMA_NONE)
319 		kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
320 
321 	*pua = cpu_to_be64(ua);
322 
323 	return 0;
324 }
325 
kvmppc_rm_tce_iommu_map(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)326 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
327 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
328 		unsigned long entry, unsigned long ua,
329 		enum dma_data_direction dir)
330 {
331 	unsigned long i, pgoff, ret = H_SUCCESS;
332 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
333 	unsigned long io_entry = entry * subpages;
334 
335 	for (i = 0, pgoff = 0; i < subpages;
336 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
337 
338 		ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
339 				io_entry + i, ua + pgoff, dir);
340 		if (ret != H_SUCCESS)
341 			break;
342 	}
343 
344 	return ret;
345 }
346 
kvmppc_rm_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)347 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
348 		unsigned long ioba, unsigned long tce)
349 {
350 	struct kvmppc_spapr_tce_table *stt;
351 	long ret;
352 	struct kvmppc_spapr_tce_iommu_table *stit;
353 	unsigned long entry, ua = 0;
354 	enum dma_data_direction dir;
355 
356 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
357 	/* 	    liobn, ioba, tce); */
358 
359 	/* For radix, we might be in virtual mode, so punt */
360 	if (kvm_is_radix(vcpu->kvm))
361 		return H_TOO_HARD;
362 
363 	stt = kvmppc_find_table(vcpu->kvm, liobn);
364 	if (!stt)
365 		return H_TOO_HARD;
366 
367 	ret = kvmppc_ioba_validate(stt, ioba, 1);
368 	if (ret != H_SUCCESS)
369 		return ret;
370 
371 	ret = kvmppc_tce_validate(stt, tce);
372 	if (ret != H_SUCCESS)
373 		return ret;
374 
375 	dir = iommu_tce_direction(tce);
376 	if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
377 			tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
378 		return H_PARAMETER;
379 
380 	entry = ioba >> stt->page_shift;
381 
382 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
383 		if (dir == DMA_NONE)
384 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
385 					stit->tbl, entry);
386 		else
387 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
388 					stit->tbl, entry, ua, dir);
389 
390 		if (ret == H_SUCCESS)
391 			continue;
392 
393 		if (ret == H_TOO_HARD)
394 			return ret;
395 
396 		WARN_ON_ONCE_RM(1);
397 		kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
398 	}
399 
400 	kvmppc_tce_put(stt, entry, tce);
401 
402 	return H_SUCCESS;
403 }
404 
kvmppc_rm_ua_to_hpa(struct kvm_vcpu * vcpu,unsigned long ua,unsigned long * phpa)405 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
406 		unsigned long ua, unsigned long *phpa)
407 {
408 	pte_t *ptep, pte;
409 	unsigned shift = 0;
410 
411 	/*
412 	 * Called in real mode with MSR_EE = 0. We are safe here.
413 	 * It is ok to do the lookup with arch.pgdir here, because
414 	 * we are doing this on secondary cpus and current task there
415 	 * is not the hypervisor. Also this is safe against THP in the
416 	 * host, because an IPI to primary thread will wait for the secondary
417 	 * to exit which will agains result in the below page table walk
418 	 * to finish.
419 	 */
420 	ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
421 	if (!ptep || !pte_present(*ptep))
422 		return -ENXIO;
423 	pte = *ptep;
424 
425 	if (!shift)
426 		shift = PAGE_SHIFT;
427 
428 	/* Avoid handling anything potentially complicated in realmode */
429 	if (shift > PAGE_SHIFT)
430 		return -EAGAIN;
431 
432 	if (!pte_young(pte))
433 		return -EAGAIN;
434 
435 	*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
436 			(ua & ~PAGE_MASK);
437 
438 	return 0;
439 }
440 
kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)441 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
442 		unsigned long liobn, unsigned long ioba,
443 		unsigned long tce_list,	unsigned long npages)
444 {
445 	struct kvmppc_spapr_tce_table *stt;
446 	long i, ret = H_SUCCESS;
447 	unsigned long tces, entry, ua = 0;
448 	unsigned long *rmap = NULL;
449 	bool prereg = false;
450 	struct kvmppc_spapr_tce_iommu_table *stit;
451 
452 	/* For radix, we might be in virtual mode, so punt */
453 	if (kvm_is_radix(vcpu->kvm))
454 		return H_TOO_HARD;
455 
456 	stt = kvmppc_find_table(vcpu->kvm, liobn);
457 	if (!stt)
458 		return H_TOO_HARD;
459 
460 	entry = ioba >> stt->page_shift;
461 	/*
462 	 * The spec says that the maximum size of the list is 512 TCEs
463 	 * so the whole table addressed resides in 4K page
464 	 */
465 	if (npages > 512)
466 		return H_PARAMETER;
467 
468 	if (tce_list & (SZ_4K - 1))
469 		return H_PARAMETER;
470 
471 	ret = kvmppc_ioba_validate(stt, ioba, npages);
472 	if (ret != H_SUCCESS)
473 		return ret;
474 
475 	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
476 		/*
477 		 * We get here if guest memory was pre-registered which
478 		 * is normally VFIO case and gpa->hpa translation does not
479 		 * depend on hpt.
480 		 */
481 		struct mm_iommu_table_group_mem_t *mem;
482 
483 		if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
484 			return H_TOO_HARD;
485 
486 		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
487 		if (mem)
488 			prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
489 					IOMMU_PAGE_SHIFT_4K, &tces) == 0;
490 	}
491 
492 	if (!prereg) {
493 		/*
494 		 * This is usually a case of a guest with emulated devices only
495 		 * when TCE list is not in preregistered memory.
496 		 * We do not require memory to be preregistered in this case
497 		 * so lock rmap and do __find_linux_pte_or_hugepte().
498 		 */
499 		if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
500 			return H_TOO_HARD;
501 
502 		rmap = (void *) vmalloc_to_phys(rmap);
503 		if (WARN_ON_ONCE_RM(!rmap))
504 			return H_HARDWARE;
505 
506 		/*
507 		 * Synchronize with the MMU notifier callbacks in
508 		 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
509 		 * While we have the rmap lock, code running on other CPUs
510 		 * cannot finish unmapping the host real page that backs
511 		 * this guest real page, so we are OK to access the host
512 		 * real page.
513 		 */
514 		lock_rmap(rmap);
515 		if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
516 			ret = H_TOO_HARD;
517 			goto unlock_exit;
518 		}
519 	}
520 
521 	for (i = 0; i < npages; ++i) {
522 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
523 
524 		ret = kvmppc_tce_validate(stt, tce);
525 		if (ret != H_SUCCESS)
526 			goto unlock_exit;
527 
528 		ua = 0;
529 		if (kvmppc_gpa_to_ua(vcpu->kvm,
530 				tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
531 				&ua, NULL))
532 			return H_PARAMETER;
533 
534 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
535 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
536 					stit->tbl, entry + i, ua,
537 					iommu_tce_direction(tce));
538 
539 			if (ret == H_SUCCESS)
540 				continue;
541 
542 			if (ret == H_TOO_HARD)
543 				goto unlock_exit;
544 
545 			WARN_ON_ONCE_RM(1);
546 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
547 		}
548 
549 		kvmppc_tce_put(stt, entry + i, tce);
550 	}
551 
552 unlock_exit:
553 	if (rmap)
554 		unlock_rmap(rmap);
555 
556 	return ret;
557 }
558 
kvmppc_rm_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)559 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
560 		unsigned long liobn, unsigned long ioba,
561 		unsigned long tce_value, unsigned long npages)
562 {
563 	struct kvmppc_spapr_tce_table *stt;
564 	long i, ret;
565 	struct kvmppc_spapr_tce_iommu_table *stit;
566 
567 	/* For radix, we might be in virtual mode, so punt */
568 	if (kvm_is_radix(vcpu->kvm))
569 		return H_TOO_HARD;
570 
571 	stt = kvmppc_find_table(vcpu->kvm, liobn);
572 	if (!stt)
573 		return H_TOO_HARD;
574 
575 	ret = kvmppc_ioba_validate(stt, ioba, npages);
576 	if (ret != H_SUCCESS)
577 		return ret;
578 
579 	/* Check permission bits only to allow userspace poison TCE for debug */
580 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
581 		return H_PARAMETER;
582 
583 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
584 		unsigned long entry = ioba >> stt->page_shift;
585 
586 		for (i = 0; i < npages; ++i) {
587 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
588 					stit->tbl, entry + i);
589 
590 			if (ret == H_SUCCESS)
591 				continue;
592 
593 			if (ret == H_TOO_HARD)
594 				return ret;
595 
596 			WARN_ON_ONCE_RM(1);
597 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
598 		}
599 	}
600 
601 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
602 		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
603 
604 	return H_SUCCESS;
605 }
606 
607 /* This can be called in either virtual mode or real mode */
kvmppc_h_get_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba)608 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
609 		      unsigned long ioba)
610 {
611 	struct kvmppc_spapr_tce_table *stt;
612 	long ret;
613 	unsigned long idx;
614 	struct page *page;
615 	u64 *tbl;
616 
617 	stt = kvmppc_find_table(vcpu->kvm, liobn);
618 	if (!stt)
619 		return H_TOO_HARD;
620 
621 	ret = kvmppc_ioba_validate(stt, ioba, 1);
622 	if (ret != H_SUCCESS)
623 		return ret;
624 
625 	idx = (ioba >> stt->page_shift) - stt->offset;
626 	page = stt->pages[idx / TCES_PER_PAGE];
627 	tbl = (u64 *)page_address(page);
628 
629 	vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
630 
631 	return H_SUCCESS;
632 }
633 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
634 
635 #endif /* KVM_BOOK3S_HV_POSSIBLE */
636