1 /*
2  * This file implements the DMA operations for NVLink devices. The NPU
3  * devices all point to the same iommu table as the parent PCI device.
4  *
5  * Copyright Alistair Popple, IBM Corporation 2015.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of version 2 of the GNU General Public
9  * License as published by the Free Software Foundation.
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
15 #include <linux/of.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/memblock.h>
19 #include <linux/iommu.h>
20 #include <linux/debugfs.h>
21 
22 #include <asm/debugfs.h>
23 #include <asm/tlb.h>
24 #include <asm/powernv.h>
25 #include <asm/reg.h>
26 #include <asm/opal.h>
27 #include <asm/io.h>
28 #include <asm/iommu.h>
29 #include <asm/pnv-pci.h>
30 #include <asm/msi_bitmap.h>
31 #include <asm/opal.h>
32 
33 #include "powernv.h"
34 #include "pci.h"
35 
36 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
37 
38 /*
39  * spinlock to protect initialisation of an npu_context for a particular
40  * mm_struct.
41  */
42 static DEFINE_SPINLOCK(npu_context_lock);
43 
44 /*
45  * When an address shootdown range exceeds this threshold we invalidate the
46  * entire TLB on the GPU for the given PID rather than each specific address in
47  * the range.
48  */
49 static uint64_t atsd_threshold = 2 * 1024 * 1024;
50 static struct dentry *atsd_threshold_dentry;
51 
52 /*
53  * Other types of TCE cache invalidation are not functional in the
54  * hardware.
55  */
get_pci_dev(struct device_node * dn)56 static struct pci_dev *get_pci_dev(struct device_node *dn)
57 {
58 	struct pci_dn *pdn = PCI_DN(dn);
59 
60 	return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
61 					   pdn->busno, pdn->devfn);
62 }
63 
64 /* Given a NPU device get the associated PCI device. */
pnv_pci_get_gpu_dev(struct pci_dev * npdev)65 struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
66 {
67 	struct device_node *dn;
68 	struct pci_dev *gpdev;
69 
70 	if (WARN_ON(!npdev))
71 		return NULL;
72 
73 	if (WARN_ON(!npdev->dev.of_node))
74 		return NULL;
75 
76 	/* Get assoicated PCI device */
77 	dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
78 	if (!dn)
79 		return NULL;
80 
81 	gpdev = get_pci_dev(dn);
82 	of_node_put(dn);
83 
84 	return gpdev;
85 }
86 EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
87 
88 /* Given the real PCI device get a linked NPU device. */
pnv_pci_get_npu_dev(struct pci_dev * gpdev,int index)89 struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
90 {
91 	struct device_node *dn;
92 	struct pci_dev *npdev;
93 
94 	if (WARN_ON(!gpdev))
95 		return NULL;
96 
97 	/* Not all PCI devices have device-tree nodes */
98 	if (!gpdev->dev.of_node)
99 		return NULL;
100 
101 	/* Get assoicated PCI device */
102 	dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
103 	if (!dn)
104 		return NULL;
105 
106 	npdev = get_pci_dev(dn);
107 	of_node_put(dn);
108 
109 	return npdev;
110 }
111 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
112 
113 #define NPU_DMA_OP_UNSUPPORTED()					\
114 	dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
115 		__func__)
116 
dma_npu_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)117 static void *dma_npu_alloc(struct device *dev, size_t size,
118 			   dma_addr_t *dma_handle, gfp_t flag,
119 			   unsigned long attrs)
120 {
121 	NPU_DMA_OP_UNSUPPORTED();
122 	return NULL;
123 }
124 
dma_npu_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)125 static void dma_npu_free(struct device *dev, size_t size,
126 			 void *vaddr, dma_addr_t dma_handle,
127 			 unsigned long attrs)
128 {
129 	NPU_DMA_OP_UNSUPPORTED();
130 }
131 
dma_npu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction,unsigned long attrs)132 static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
133 				   unsigned long offset, size_t size,
134 				   enum dma_data_direction direction,
135 				   unsigned long attrs)
136 {
137 	NPU_DMA_OP_UNSUPPORTED();
138 	return 0;
139 }
140 
dma_npu_map_sg(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction,unsigned long attrs)141 static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
142 			  int nelems, enum dma_data_direction direction,
143 			  unsigned long attrs)
144 {
145 	NPU_DMA_OP_UNSUPPORTED();
146 	return 0;
147 }
148 
dma_npu_dma_supported(struct device * dev,u64 mask)149 static int dma_npu_dma_supported(struct device *dev, u64 mask)
150 {
151 	NPU_DMA_OP_UNSUPPORTED();
152 	return 0;
153 }
154 
dma_npu_get_required_mask(struct device * dev)155 static u64 dma_npu_get_required_mask(struct device *dev)
156 {
157 	NPU_DMA_OP_UNSUPPORTED();
158 	return 0;
159 }
160 
161 static const struct dma_map_ops dma_npu_ops = {
162 	.map_page		= dma_npu_map_page,
163 	.map_sg			= dma_npu_map_sg,
164 	.alloc			= dma_npu_alloc,
165 	.free			= dma_npu_free,
166 	.dma_supported		= dma_npu_dma_supported,
167 	.get_required_mask	= dma_npu_get_required_mask,
168 };
169 
170 /*
171  * Returns the PE assoicated with the PCI device of the given
172  * NPU. Returns the linked pci device if pci_dev != NULL.
173  */
get_gpu_pci_dev_and_pe(struct pnv_ioda_pe * npe,struct pci_dev ** gpdev)174 static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
175 						  struct pci_dev **gpdev)
176 {
177 	struct pnv_phb *phb;
178 	struct pci_controller *hose;
179 	struct pci_dev *pdev;
180 	struct pnv_ioda_pe *pe;
181 	struct pci_dn *pdn;
182 
183 	pdev = pnv_pci_get_gpu_dev(npe->pdev);
184 	if (!pdev)
185 		return NULL;
186 
187 	pdn = pci_get_pdn(pdev);
188 	if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
189 		return NULL;
190 
191 	hose = pci_bus_to_host(pdev->bus);
192 	phb = hose->private_data;
193 	pe = &phb->ioda.pe_array[pdn->pe_number];
194 
195 	if (gpdev)
196 		*gpdev = pdev;
197 
198 	return pe;
199 }
200 
pnv_npu_set_window(struct pnv_ioda_pe * npe,int num,struct iommu_table * tbl)201 long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
202 		struct iommu_table *tbl)
203 {
204 	struct pnv_phb *phb = npe->phb;
205 	int64_t rc;
206 	const unsigned long size = tbl->it_indirect_levels ?
207 		tbl->it_level_size : tbl->it_size;
208 	const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
209 	const __u64 win_size = tbl->it_size << tbl->it_page_shift;
210 
211 	pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
212 			start_addr, start_addr + win_size - 1,
213 			IOMMU_PAGE_SIZE(tbl));
214 
215 	rc = opal_pci_map_pe_dma_window(phb->opal_id,
216 			npe->pe_number,
217 			npe->pe_number,
218 			tbl->it_indirect_levels + 1,
219 			__pa(tbl->it_base),
220 			size << 3,
221 			IOMMU_PAGE_SIZE(tbl));
222 	if (rc) {
223 		pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
224 		return rc;
225 	}
226 	pnv_pci_ioda2_tce_invalidate_entire(phb, false);
227 
228 	/* Add the table to the list so its TCE cache will get invalidated */
229 	pnv_pci_link_table_and_group(phb->hose->node, num,
230 			tbl, &npe->table_group);
231 
232 	return 0;
233 }
234 
pnv_npu_unset_window(struct pnv_ioda_pe * npe,int num)235 long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
236 {
237 	struct pnv_phb *phb = npe->phb;
238 	int64_t rc;
239 
240 	pe_info(npe, "Removing DMA window\n");
241 
242 	rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
243 			npe->pe_number,
244 			0/* levels */, 0/* table address */,
245 			0/* table size */, 0/* page size */);
246 	if (rc) {
247 		pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
248 		return rc;
249 	}
250 	pnv_pci_ioda2_tce_invalidate_entire(phb, false);
251 
252 	pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
253 			&npe->table_group);
254 
255 	return 0;
256 }
257 
258 /*
259  * Enables 32 bit DMA on NPU.
260  */
pnv_npu_dma_set_32(struct pnv_ioda_pe * npe)261 static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
262 {
263 	struct pci_dev *gpdev;
264 	struct pnv_ioda_pe *gpe;
265 	int64_t rc;
266 
267 	/*
268 	 * Find the assoicated PCI devices and get the dma window
269 	 * information from there.
270 	 */
271 	if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
272 		return;
273 
274 	gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
275 	if (!gpe)
276 		return;
277 
278 	rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
279 
280 	/*
281 	 * We don't initialise npu_pe->tce32_table as we always use
282 	 * dma_npu_ops which are nops.
283 	 */
284 	set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
285 }
286 
287 /*
288  * Enables bypass mode on the NPU. The NPU only supports one
289  * window per link, so bypass needs to be explicitly enabled or
290  * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
291  * active at the same time.
292  */
pnv_npu_dma_set_bypass(struct pnv_ioda_pe * npe)293 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
294 {
295 	struct pnv_phb *phb = npe->phb;
296 	int64_t rc = 0;
297 	phys_addr_t top = memblock_end_of_DRAM();
298 
299 	if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev)
300 		return -EINVAL;
301 
302 	rc = pnv_npu_unset_window(npe, 0);
303 	if (rc != OPAL_SUCCESS)
304 		return rc;
305 
306 	/* Enable the bypass window */
307 
308 	top = roundup_pow_of_two(top);
309 	dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
310 			npe->pe_number);
311 	rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
312 			npe->pe_number, npe->pe_number,
313 			0 /* bypass base */, top);
314 
315 	if (rc == OPAL_SUCCESS)
316 		pnv_pci_ioda2_tce_invalidate_entire(phb, false);
317 
318 	return rc;
319 }
320 
pnv_npu_try_dma_set_bypass(struct pci_dev * gpdev,bool bypass)321 void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
322 {
323 	int i;
324 	struct pnv_phb *phb;
325 	struct pci_dn *pdn;
326 	struct pnv_ioda_pe *npe;
327 	struct pci_dev *npdev;
328 
329 	for (i = 0; ; ++i) {
330 		npdev = pnv_pci_get_npu_dev(gpdev, i);
331 
332 		if (!npdev)
333 			break;
334 
335 		pdn = pci_get_pdn(npdev);
336 		if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
337 			return;
338 
339 		phb = pci_bus_to_host(npdev->bus)->private_data;
340 
341 		/* We only do bypass if it's enabled on the linked device */
342 		npe = &phb->ioda.pe_array[pdn->pe_number];
343 
344 		if (bypass) {
345 			dev_info(&npdev->dev,
346 					"Using 64-bit DMA iommu bypass\n");
347 			pnv_npu_dma_set_bypass(npe);
348 		} else {
349 			dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
350 			pnv_npu_dma_set_32(npe);
351 		}
352 	}
353 }
354 
355 /* Switch ownership from platform code to external user (e.g. VFIO) */
pnv_npu_take_ownership(struct pnv_ioda_pe * npe)356 void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
357 {
358 	struct pnv_phb *phb = npe->phb;
359 	int64_t rc;
360 
361 	/*
362 	 * Note: NPU has just a single TVE in the hardware which means that
363 	 * while used by the kernel, it can have either 32bit window or
364 	 * DMA bypass but never both. So we deconfigure 32bit window only
365 	 * if it was enabled at the moment of ownership change.
366 	 */
367 	if (npe->table_group.tables[0]) {
368 		pnv_npu_unset_window(npe, 0);
369 		return;
370 	}
371 
372 	/* Disable bypass */
373 	rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
374 			npe->pe_number, npe->pe_number,
375 			0 /* bypass base */, 0);
376 	if (rc) {
377 		pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
378 		return;
379 	}
380 	pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
381 }
382 
pnv_pci_npu_setup_iommu(struct pnv_ioda_pe * npe)383 struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
384 {
385 	struct pnv_phb *phb = npe->phb;
386 	struct pci_bus *pbus = phb->hose->bus;
387 	struct pci_dev *npdev, *gpdev = NULL, *gptmp;
388 	struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
389 
390 	if (!gpe || !gpdev)
391 		return NULL;
392 
393 	list_for_each_entry(npdev, &pbus->devices, bus_list) {
394 		gptmp = pnv_pci_get_gpu_dev(npdev);
395 
396 		if (gptmp != gpdev)
397 			continue;
398 
399 		pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev));
400 		iommu_group_add_device(gpe->table_group.group, &npdev->dev);
401 	}
402 
403 	return gpe;
404 }
405 
406 /* Maximum number of nvlinks per npu */
407 #define NV_MAX_LINKS 6
408 
409 /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
410 static int max_npu2_index;
411 
412 struct npu_context {
413 	struct mm_struct *mm;
414 	struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
415 	struct mmu_notifier mn;
416 	struct kref kref;
417 	bool nmmu_flush;
418 
419 	/* Callback to stop translation requests on a given GPU */
420 	void (*release_cb)(struct npu_context *context, void *priv);
421 
422 	/*
423 	 * Private pointer passed to the above callback for usage by
424 	 * device drivers.
425 	 */
426 	void *priv;
427 };
428 
429 struct mmio_atsd_reg {
430 	struct npu *npu;
431 	int reg;
432 };
433 
434 /*
435  * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
436  * if none are available.
437  */
get_mmio_atsd_reg(struct npu * npu)438 static int get_mmio_atsd_reg(struct npu *npu)
439 {
440 	int i;
441 
442 	for (i = 0; i < npu->mmio_atsd_count; i++) {
443 		if (!test_bit(i, &npu->mmio_atsd_usage))
444 			if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
445 				return i;
446 	}
447 
448 	return -ENOSPC;
449 }
450 
put_mmio_atsd_reg(struct npu * npu,int reg)451 static void put_mmio_atsd_reg(struct npu *npu, int reg)
452 {
453 	clear_bit_unlock(reg, &npu->mmio_atsd_usage);
454 }
455 
456 /* MMIO ATSD register offsets */
457 #define XTS_ATSD_AVA  1
458 #define XTS_ATSD_STAT 2
459 
mmio_launch_invalidate(struct mmio_atsd_reg * mmio_atsd_reg,unsigned long launch,unsigned long va)460 static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg,
461 				unsigned long launch, unsigned long va)
462 {
463 	struct npu *npu = mmio_atsd_reg->npu;
464 	int reg = mmio_atsd_reg->reg;
465 
466 	__raw_writeq_be(va, npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA);
467 	eieio();
468 	__raw_writeq_be(launch, npu->mmio_atsd_regs[reg]);
469 }
470 
mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],unsigned long pid,bool flush)471 static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
472 				unsigned long pid, bool flush)
473 {
474 	int i;
475 	unsigned long launch;
476 
477 	for (i = 0; i <= max_npu2_index; i++) {
478 		if (mmio_atsd_reg[i].reg < 0)
479 			continue;
480 
481 		/* IS set to invalidate matching PID */
482 		launch = PPC_BIT(12);
483 
484 		/* PRS set to process-scoped */
485 		launch |= PPC_BIT(13);
486 
487 		/* AP */
488 		launch |= (u64)
489 			mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
490 
491 		/* PID */
492 		launch |= pid << PPC_BITLSHIFT(38);
493 
494 		/* No flush */
495 		launch |= !flush << PPC_BITLSHIFT(39);
496 
497 		/* Invalidating the entire process doesn't use a va */
498 		mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0);
499 	}
500 }
501 
mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],unsigned long va,unsigned long pid,bool flush)502 static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
503 			unsigned long va, unsigned long pid, bool flush)
504 {
505 	int i;
506 	unsigned long launch;
507 
508 	for (i = 0; i <= max_npu2_index; i++) {
509 		if (mmio_atsd_reg[i].reg < 0)
510 			continue;
511 
512 		/* IS set to invalidate target VA */
513 		launch = 0;
514 
515 		/* PRS set to process scoped */
516 		launch |= PPC_BIT(13);
517 
518 		/* AP */
519 		launch |= (u64)
520 			mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
521 
522 		/* PID */
523 		launch |= pid << PPC_BITLSHIFT(38);
524 
525 		/* No flush */
526 		launch |= !flush << PPC_BITLSHIFT(39);
527 
528 		mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va);
529 	}
530 }
531 
532 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
533 
mmio_invalidate_wait(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])534 static void mmio_invalidate_wait(
535 	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
536 {
537 	struct npu *npu;
538 	int i, reg;
539 
540 	/* Wait for all invalidations to complete */
541 	for (i = 0; i <= max_npu2_index; i++) {
542 		if (mmio_atsd_reg[i].reg < 0)
543 			continue;
544 
545 		/* Wait for completion */
546 		npu = mmio_atsd_reg[i].npu;
547 		reg = mmio_atsd_reg[i].reg;
548 		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
549 			cpu_relax();
550 	}
551 }
552 
553 /*
554  * Acquires all the address translation shootdown (ATSD) registers required to
555  * launch an ATSD on all links this npu_context is active on.
556  */
acquire_atsd_reg(struct npu_context * npu_context,struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])557 static void acquire_atsd_reg(struct npu_context *npu_context,
558 			struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
559 {
560 	int i, j;
561 	struct npu *npu;
562 	struct pci_dev *npdev;
563 	struct pnv_phb *nphb;
564 
565 	for (i = 0; i <= max_npu2_index; i++) {
566 		mmio_atsd_reg[i].reg = -1;
567 		for (j = 0; j < NV_MAX_LINKS; j++) {
568 			/*
569 			 * There are no ordering requirements with respect to
570 			 * the setup of struct npu_context, but to ensure
571 			 * consistent behaviour we need to ensure npdev[][] is
572 			 * only read once.
573 			 */
574 			npdev = READ_ONCE(npu_context->npdev[i][j]);
575 			if (!npdev)
576 				continue;
577 
578 			nphb = pci_bus_to_host(npdev->bus)->private_data;
579 			npu = &nphb->npu;
580 			mmio_atsd_reg[i].npu = npu;
581 			mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
582 			while (mmio_atsd_reg[i].reg < 0) {
583 				mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
584 				cpu_relax();
585 			}
586 			break;
587 		}
588 	}
589 }
590 
591 /*
592  * Release previously acquired ATSD registers. To avoid deadlocks the registers
593  * must be released in the same order they were acquired above in
594  * acquire_atsd_reg.
595  */
release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])596 static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
597 {
598 	int i;
599 
600 	for (i = 0; i <= max_npu2_index; i++) {
601 		/*
602 		 * We can't rely on npu_context->npdev[][] being the same here
603 		 * as when acquire_atsd_reg() was called, hence we use the
604 		 * values stored in mmio_atsd_reg during the acquire phase
605 		 * rather than re-reading npdev[][].
606 		 */
607 		if (mmio_atsd_reg[i].reg < 0)
608 			continue;
609 
610 		put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
611 	}
612 }
613 
614 /*
615  * Invalidate either a single address or an entire PID depending on
616  * the value of va.
617  */
mmio_invalidate(struct npu_context * npu_context,int va,unsigned long address,bool flush)618 static void mmio_invalidate(struct npu_context *npu_context, int va,
619 			unsigned long address, bool flush)
620 {
621 	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
622 	unsigned long pid = npu_context->mm->context.id;
623 
624 	if (npu_context->nmmu_flush)
625 		/*
626 		 * Unfortunately the nest mmu does not support flushing specific
627 		 * addresses so we have to flush the whole mm once before
628 		 * shooting down the GPU translation.
629 		 */
630 		flush_all_mm(npu_context->mm);
631 
632 	/*
633 	 * Loop over all the NPUs this process is active on and launch
634 	 * an invalidate.
635 	 */
636 	acquire_atsd_reg(npu_context, mmio_atsd_reg);
637 	if (va)
638 		mmio_invalidate_va(mmio_atsd_reg, address, pid, flush);
639 	else
640 		mmio_invalidate_pid(mmio_atsd_reg, pid, flush);
641 
642 	mmio_invalidate_wait(mmio_atsd_reg);
643 	if (flush) {
644 		/*
645 		 * The GPU requires two flush ATSDs to ensure all entries have
646 		 * been flushed. We use PID 0 as it will never be used for a
647 		 * process on the GPU.
648 		 */
649 		mmio_invalidate_pid(mmio_atsd_reg, 0, true);
650 		mmio_invalidate_wait(mmio_atsd_reg);
651 		mmio_invalidate_pid(mmio_atsd_reg, 0, true);
652 		mmio_invalidate_wait(mmio_atsd_reg);
653 	}
654 	release_atsd_reg(mmio_atsd_reg);
655 }
656 
pnv_npu2_mn_release(struct mmu_notifier * mn,struct mm_struct * mm)657 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
658 				struct mm_struct *mm)
659 {
660 	struct npu_context *npu_context = mn_to_npu_context(mn);
661 
662 	/* Call into device driver to stop requests to the NMMU */
663 	if (npu_context->release_cb)
664 		npu_context->release_cb(npu_context, npu_context->priv);
665 
666 	/*
667 	 * There should be no more translation requests for this PID, but we
668 	 * need to ensure any entries for it are removed from the TLB.
669 	 */
670 	mmio_invalidate(npu_context, 0, 0, true);
671 }
672 
pnv_npu2_mn_change_pte(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address,pte_t pte)673 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
674 				struct mm_struct *mm,
675 				unsigned long address,
676 				pte_t pte)
677 {
678 	struct npu_context *npu_context = mn_to_npu_context(mn);
679 
680 	mmio_invalidate(npu_context, 1, address, true);
681 }
682 
pnv_npu2_mn_invalidate_range(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)683 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
684 					struct mm_struct *mm,
685 					unsigned long start, unsigned long end)
686 {
687 	struct npu_context *npu_context = mn_to_npu_context(mn);
688 	unsigned long address;
689 
690 	if (end - start > atsd_threshold) {
691 		/*
692 		 * Just invalidate the entire PID if the address range is too
693 		 * large.
694 		 */
695 		mmio_invalidate(npu_context, 0, 0, true);
696 	} else {
697 		for (address = start; address < end; address += PAGE_SIZE)
698 			mmio_invalidate(npu_context, 1, address, false);
699 
700 		/* Do the flush only on the final addess == end */
701 		mmio_invalidate(npu_context, 1, address, true);
702 	}
703 }
704 
705 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
706 	.release = pnv_npu2_mn_release,
707 	.change_pte = pnv_npu2_mn_change_pte,
708 	.invalidate_range = pnv_npu2_mn_invalidate_range,
709 };
710 
711 /*
712  * Call into OPAL to setup the nmmu context for the current task in
713  * the NPU. This must be called to setup the context tables before the
714  * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
715  *
716  * A release callback should be registered to allow a device driver to
717  * be notified that it should not launch any new translation requests
718  * as the final TLB invalidate is about to occur.
719  *
720  * Returns an error if there no contexts are currently available or a
721  * npu_context which should be passed to pnv_npu2_handle_fault().
722  *
723  * mmap_sem must be held in write mode and must not be called from interrupt
724  * context.
725  */
pnv_npu2_init_context(struct pci_dev * gpdev,unsigned long flags,void (* cb)(struct npu_context *,void *),void * priv)726 struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
727 			unsigned long flags,
728 			void (*cb)(struct npu_context *, void *),
729 			void *priv)
730 {
731 	int rc;
732 	u32 nvlink_index;
733 	struct device_node *nvlink_dn;
734 	struct mm_struct *mm = current->mm;
735 	struct pnv_phb *nphb;
736 	struct npu *npu;
737 	struct npu_context *npu_context;
738 
739 	/*
740 	 * At present we don't support GPUs connected to multiple NPUs and I'm
741 	 * not sure the hardware does either.
742 	 */
743 	struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
744 
745 	if (!firmware_has_feature(FW_FEATURE_OPAL))
746 		return ERR_PTR(-ENODEV);
747 
748 	if (!npdev)
749 		/* No nvlink associated with this GPU device */
750 		return ERR_PTR(-ENODEV);
751 
752 	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
753 	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
754 							&nvlink_index)))
755 		return ERR_PTR(-ENODEV);
756 
757 	if (!mm || mm->context.id == 0) {
758 		/*
759 		 * Kernel thread contexts are not supported and context id 0 is
760 		 * reserved on the GPU.
761 		 */
762 		return ERR_PTR(-EINVAL);
763 	}
764 
765 	nphb = pci_bus_to_host(npdev->bus)->private_data;
766 	npu = &nphb->npu;
767 
768 	/*
769 	 * Setup the NPU context table for a particular GPU. These need to be
770 	 * per-GPU as we need the tables to filter ATSDs when there are no
771 	 * active contexts on a particular GPU. It is safe for these to be
772 	 * called concurrently with destroy as the OPAL call takes appropriate
773 	 * locks and refcounts on init/destroy.
774 	 */
775 	rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
776 				PCI_DEVID(gpdev->bus->number, gpdev->devfn));
777 	if (rc < 0)
778 		return ERR_PTR(-ENOSPC);
779 
780 	/*
781 	 * We store the npu pci device so we can more easily get at the
782 	 * associated npus.
783 	 */
784 	spin_lock(&npu_context_lock);
785 	npu_context = mm->context.npu_context;
786 	if (npu_context) {
787 		if (npu_context->release_cb != cb ||
788 			npu_context->priv != priv) {
789 			spin_unlock(&npu_context_lock);
790 			opal_npu_destroy_context(nphb->opal_id, mm->context.id,
791 						PCI_DEVID(gpdev->bus->number,
792 							gpdev->devfn));
793 			return ERR_PTR(-EINVAL);
794 		}
795 
796 		WARN_ON(!kref_get_unless_zero(&npu_context->kref));
797 	}
798 	spin_unlock(&npu_context_lock);
799 
800 	if (!npu_context) {
801 		/*
802 		 * We can set up these fields without holding the
803 		 * npu_context_lock as the npu_context hasn't been returned to
804 		 * the caller meaning it can't be destroyed. Parallel allocation
805 		 * is protected against by mmap_sem.
806 		 */
807 		rc = -ENOMEM;
808 		npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
809 		if (npu_context) {
810 			kref_init(&npu_context->kref);
811 			npu_context->mm = mm;
812 			npu_context->mn.ops = &nv_nmmu_notifier_ops;
813 			rc = __mmu_notifier_register(&npu_context->mn, mm);
814 		}
815 
816 		if (rc) {
817 			kfree(npu_context);
818 			opal_npu_destroy_context(nphb->opal_id, mm->context.id,
819 					PCI_DEVID(gpdev->bus->number,
820 						gpdev->devfn));
821 			return ERR_PTR(rc);
822 		}
823 
824 		mm->context.npu_context = npu_context;
825 	}
826 
827 	npu_context->release_cb = cb;
828 	npu_context->priv = priv;
829 
830 	/*
831 	 * npdev is a pci_dev pointer setup by the PCI code. We assign it to
832 	 * npdev[][] to indicate to the mmu notifiers that an invalidation
833 	 * should also be sent over this nvlink. The notifiers don't use any
834 	 * other fields in npu_context, so we just need to ensure that when they
835 	 * deference npu_context->npdev[][] it is either a valid pointer or
836 	 * NULL.
837 	 */
838 	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
839 
840 	if (!nphb->npu.nmmu_flush) {
841 		/*
842 		 * If we're not explicitly flushing ourselves we need to mark
843 		 * the thread for global flushes
844 		 */
845 		npu_context->nmmu_flush = false;
846 		mm_context_add_copro(mm);
847 	} else
848 		npu_context->nmmu_flush = true;
849 
850 	return npu_context;
851 }
852 EXPORT_SYMBOL(pnv_npu2_init_context);
853 
pnv_npu2_release_context(struct kref * kref)854 static void pnv_npu2_release_context(struct kref *kref)
855 {
856 	struct npu_context *npu_context =
857 		container_of(kref, struct npu_context, kref);
858 
859 	if (!npu_context->nmmu_flush)
860 		mm_context_remove_copro(npu_context->mm);
861 
862 	npu_context->mm->context.npu_context = NULL;
863 }
864 
865 /*
866  * Destroy a context on the given GPU. May free the npu_context if it is no
867  * longer active on any GPUs. Must not be called from interrupt context.
868  */
pnv_npu2_destroy_context(struct npu_context * npu_context,struct pci_dev * gpdev)869 void pnv_npu2_destroy_context(struct npu_context *npu_context,
870 			struct pci_dev *gpdev)
871 {
872 	int removed;
873 	struct pnv_phb *nphb;
874 	struct npu *npu;
875 	struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
876 	struct device_node *nvlink_dn;
877 	u32 nvlink_index;
878 
879 	if (WARN_ON(!npdev))
880 		return;
881 
882 	if (!firmware_has_feature(FW_FEATURE_OPAL))
883 		return;
884 
885 	nphb = pci_bus_to_host(npdev->bus)->private_data;
886 	npu = &nphb->npu;
887 	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
888 	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
889 							&nvlink_index)))
890 		return;
891 	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
892 	opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
893 				PCI_DEVID(gpdev->bus->number, gpdev->devfn));
894 	spin_lock(&npu_context_lock);
895 	removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
896 	spin_unlock(&npu_context_lock);
897 
898 	/*
899 	 * We need to do this outside of pnv_npu2_release_context so that it is
900 	 * outside the spinlock as mmu_notifier_destroy uses SRCU.
901 	 */
902 	if (removed) {
903 		mmu_notifier_unregister(&npu_context->mn,
904 					npu_context->mm);
905 
906 		kfree(npu_context);
907 	}
908 
909 }
910 EXPORT_SYMBOL(pnv_npu2_destroy_context);
911 
912 /*
913  * Assumes mmap_sem is held for the contexts associated mm.
914  */
pnv_npu2_handle_fault(struct npu_context * context,uintptr_t * ea,unsigned long * flags,unsigned long * status,int count)915 int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
916 			unsigned long *flags, unsigned long *status, int count)
917 {
918 	u64 rc = 0, result = 0;
919 	int i, is_write;
920 	struct page *page[1];
921 
922 	/* mmap_sem should be held so the struct_mm must be present */
923 	struct mm_struct *mm = context->mm;
924 
925 	if (!firmware_has_feature(FW_FEATURE_OPAL))
926 		return -ENODEV;
927 
928 	WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
929 
930 	for (i = 0; i < count; i++) {
931 		is_write = flags[i] & NPU2_WRITE;
932 		rc = get_user_pages_remote(NULL, mm, ea[i], 1,
933 					is_write ? FOLL_WRITE : 0,
934 					page, NULL, NULL);
935 
936 		/*
937 		 * To support virtualised environments we will have to do an
938 		 * access to the page to ensure it gets faulted into the
939 		 * hypervisor. For the moment virtualisation is not supported in
940 		 * other areas so leave the access out.
941 		 */
942 		if (rc != 1) {
943 			status[i] = rc;
944 			result = -EFAULT;
945 			continue;
946 		}
947 
948 		status[i] = 0;
949 		put_page(page[0]);
950 	}
951 
952 	return result;
953 }
954 EXPORT_SYMBOL(pnv_npu2_handle_fault);
955 
pnv_npu2_init(struct pnv_phb * phb)956 int pnv_npu2_init(struct pnv_phb *phb)
957 {
958 	unsigned int i;
959 	u64 mmio_atsd;
960 	struct device_node *dn;
961 	struct pci_dev *gpdev;
962 	static int npu_index;
963 	uint64_t rc = 0;
964 
965 	if (!atsd_threshold_dentry) {
966 		atsd_threshold_dentry = debugfs_create_x64("atsd_threshold",
967 				   0600, powerpc_debugfs_root, &atsd_threshold);
968 	}
969 
970 	phb->npu.nmmu_flush =
971 		of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
972 	for_each_child_of_node(phb->hose->dn, dn) {
973 		gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
974 		if (gpdev) {
975 			rc = opal_npu_map_lpar(phb->opal_id,
976 				PCI_DEVID(gpdev->bus->number, gpdev->devfn),
977 				0, 0);
978 			if (rc)
979 				dev_err(&gpdev->dev,
980 					"Error %lld mapping device to LPAR\n",
981 					rc);
982 		}
983 	}
984 
985 	for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
986 							i, &mmio_atsd); i++)
987 		phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
988 
989 	pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
990 	phb->npu.mmio_atsd_count = i;
991 	phb->npu.mmio_atsd_usage = 0;
992 	npu_index++;
993 	if (WARN_ON(npu_index >= NV_MAX_NPUS))
994 		return -ENOSPC;
995 	max_npu2_index = npu_index;
996 	phb->npu.index = npu_index;
997 
998 	return 0;
999 }
1000