1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IOMMU API for s390 PCI devices
4  *
5  * Copyright IBM Corp. 2015
6  * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7  */
8 
9 #include <linux/pci.h>
10 #include <linux/iommu.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/sizes.h>
13 #include <asm/pci_dma.h>
14 
15 /*
16  * Physically contiguous memory regions can be mapped with 4 KiB alignment,
17  * we allow all page sizes that are an order of 4KiB (no special large page
18  * support so far).
19  */
20 #define S390_IOMMU_PGSIZES	(~0xFFFUL)
21 
22 static const struct iommu_ops s390_iommu_ops;
23 
24 struct s390_domain {
25 	struct iommu_domain	domain;
26 	struct list_head	devices;
27 	unsigned long		*dma_table;
28 	spinlock_t		dma_table_lock;
29 	spinlock_t		list_lock;
30 };
31 
32 struct s390_domain_device {
33 	struct list_head	list;
34 	struct zpci_dev		*zdev;
35 };
36 
to_s390_domain(struct iommu_domain * dom)37 static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
38 {
39 	return container_of(dom, struct s390_domain, domain);
40 }
41 
s390_iommu_capable(struct device * dev,enum iommu_cap cap)42 static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
43 {
44 	switch (cap) {
45 	case IOMMU_CAP_CACHE_COHERENCY:
46 		return true;
47 	case IOMMU_CAP_INTR_REMAP:
48 		return true;
49 	default:
50 		return false;
51 	}
52 }
53 
s390_domain_alloc(unsigned domain_type)54 static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
55 {
56 	struct s390_domain *s390_domain;
57 
58 	if (domain_type != IOMMU_DOMAIN_UNMANAGED)
59 		return NULL;
60 
61 	s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
62 	if (!s390_domain)
63 		return NULL;
64 
65 	s390_domain->dma_table = dma_alloc_cpu_table();
66 	if (!s390_domain->dma_table) {
67 		kfree(s390_domain);
68 		return NULL;
69 	}
70 
71 	spin_lock_init(&s390_domain->dma_table_lock);
72 	spin_lock_init(&s390_domain->list_lock);
73 	INIT_LIST_HEAD(&s390_domain->devices);
74 
75 	return &s390_domain->domain;
76 }
77 
s390_domain_free(struct iommu_domain * domain)78 static void s390_domain_free(struct iommu_domain *domain)
79 {
80 	struct s390_domain *s390_domain = to_s390_domain(domain);
81 
82 	dma_cleanup_tables(s390_domain->dma_table);
83 	kfree(s390_domain);
84 }
85 
s390_iommu_attach_device(struct iommu_domain * domain,struct device * dev)86 static int s390_iommu_attach_device(struct iommu_domain *domain,
87 				    struct device *dev)
88 {
89 	struct s390_domain *s390_domain = to_s390_domain(domain);
90 	struct zpci_dev *zdev = to_zpci_dev(dev);
91 	struct s390_domain_device *domain_device;
92 	unsigned long flags;
93 	int cc, rc;
94 
95 	if (!zdev)
96 		return -ENODEV;
97 
98 	domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL);
99 	if (!domain_device)
100 		return -ENOMEM;
101 
102 	if (zdev->dma_table && !zdev->s390_domain) {
103 		cc = zpci_dma_exit_device(zdev);
104 		if (cc) {
105 			rc = -EIO;
106 			goto out_free;
107 		}
108 	}
109 
110 	if (zdev->s390_domain)
111 		zpci_unregister_ioat(zdev, 0);
112 
113 	zdev->dma_table = s390_domain->dma_table;
114 	cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
115 				virt_to_phys(zdev->dma_table));
116 	if (cc) {
117 		rc = -EIO;
118 		goto out_restore;
119 	}
120 
121 	spin_lock_irqsave(&s390_domain->list_lock, flags);
122 	/* First device defines the DMA range limits */
123 	if (list_empty(&s390_domain->devices)) {
124 		domain->geometry.aperture_start = zdev->start_dma;
125 		domain->geometry.aperture_end = zdev->end_dma;
126 		domain->geometry.force_aperture = true;
127 	/* Allow only devices with identical DMA range limits */
128 	} else if (domain->geometry.aperture_start != zdev->start_dma ||
129 		   domain->geometry.aperture_end != zdev->end_dma) {
130 		rc = -EINVAL;
131 		spin_unlock_irqrestore(&s390_domain->list_lock, flags);
132 		goto out_restore;
133 	}
134 	domain_device->zdev = zdev;
135 	zdev->s390_domain = s390_domain;
136 	list_add(&domain_device->list, &s390_domain->devices);
137 	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
138 
139 	return 0;
140 
141 out_restore:
142 	if (!zdev->s390_domain) {
143 		zpci_dma_init_device(zdev);
144 	} else {
145 		zdev->dma_table = zdev->s390_domain->dma_table;
146 		zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
147 				   virt_to_phys(zdev->dma_table));
148 	}
149 out_free:
150 	kfree(domain_device);
151 
152 	return rc;
153 }
154 
s390_iommu_detach_device(struct iommu_domain * domain,struct device * dev)155 static void s390_iommu_detach_device(struct iommu_domain *domain,
156 				     struct device *dev)
157 {
158 	struct s390_domain *s390_domain = to_s390_domain(domain);
159 	struct zpci_dev *zdev = to_zpci_dev(dev);
160 	struct s390_domain_device *domain_device, *tmp;
161 	unsigned long flags;
162 	int found = 0;
163 
164 	if (!zdev)
165 		return;
166 
167 	spin_lock_irqsave(&s390_domain->list_lock, flags);
168 	list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
169 				 list) {
170 		if (domain_device->zdev == zdev) {
171 			list_del(&domain_device->list);
172 			kfree(domain_device);
173 			found = 1;
174 			break;
175 		}
176 	}
177 	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
178 
179 	if (found && (zdev->s390_domain == s390_domain)) {
180 		zdev->s390_domain = NULL;
181 		zpci_unregister_ioat(zdev, 0);
182 		zpci_dma_init_device(zdev);
183 	}
184 }
185 
s390_iommu_probe_device(struct device * dev)186 static struct iommu_device *s390_iommu_probe_device(struct device *dev)
187 {
188 	struct zpci_dev *zdev;
189 
190 	if (!dev_is_pci(dev))
191 		return ERR_PTR(-ENODEV);
192 
193 	zdev = to_zpci_dev(dev);
194 
195 	return &zdev->iommu_dev;
196 }
197 
s390_iommu_release_device(struct device * dev)198 static void s390_iommu_release_device(struct device *dev)
199 {
200 	struct zpci_dev *zdev = to_zpci_dev(dev);
201 	struct iommu_domain *domain;
202 
203 	/*
204 	 * This is a workaround for a scenario where the IOMMU API common code
205 	 * "forgets" to call the detach_dev callback: After binding a device
206 	 * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
207 	 * the attach_dev), removing the device via
208 	 * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
209 	 * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
210 	 * notifier.
211 	 *
212 	 * So let's call detach_dev from here if it hasn't been called before.
213 	 */
214 	if (zdev && zdev->s390_domain) {
215 		domain = iommu_get_domain_for_dev(dev);
216 		if (domain)
217 			s390_iommu_detach_device(domain, dev);
218 	}
219 }
220 
s390_iommu_update_trans(struct s390_domain * s390_domain,phys_addr_t pa,dma_addr_t dma_addr,size_t size,int flags)221 static int s390_iommu_update_trans(struct s390_domain *s390_domain,
222 				   phys_addr_t pa, dma_addr_t dma_addr,
223 				   size_t size, int flags)
224 {
225 	struct s390_domain_device *domain_device;
226 	phys_addr_t page_addr = pa & PAGE_MASK;
227 	dma_addr_t start_dma_addr = dma_addr;
228 	unsigned long irq_flags, nr_pages, i;
229 	unsigned long *entry;
230 	int rc = 0;
231 
232 	if (dma_addr < s390_domain->domain.geometry.aperture_start ||
233 	    dma_addr + size > s390_domain->domain.geometry.aperture_end)
234 		return -EINVAL;
235 
236 	nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
237 	if (!nr_pages)
238 		return 0;
239 
240 	spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
241 	for (i = 0; i < nr_pages; i++) {
242 		entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
243 		if (!entry) {
244 			rc = -ENOMEM;
245 			goto undo_cpu_trans;
246 		}
247 		dma_update_cpu_trans(entry, page_addr, flags);
248 		page_addr += PAGE_SIZE;
249 		dma_addr += PAGE_SIZE;
250 	}
251 
252 	spin_lock(&s390_domain->list_lock);
253 	list_for_each_entry(domain_device, &s390_domain->devices, list) {
254 		rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32,
255 					start_dma_addr, nr_pages * PAGE_SIZE);
256 		if (rc)
257 			break;
258 	}
259 	spin_unlock(&s390_domain->list_lock);
260 
261 undo_cpu_trans:
262 	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
263 		flags = ZPCI_PTE_INVALID;
264 		while (i-- > 0) {
265 			page_addr -= PAGE_SIZE;
266 			dma_addr -= PAGE_SIZE;
267 			entry = dma_walk_cpu_trans(s390_domain->dma_table,
268 						   dma_addr);
269 			if (!entry)
270 				break;
271 			dma_update_cpu_trans(entry, page_addr, flags);
272 		}
273 	}
274 	spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
275 
276 	return rc;
277 }
278 
s390_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)279 static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
280 			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
281 {
282 	struct s390_domain *s390_domain = to_s390_domain(domain);
283 	int flags = ZPCI_PTE_VALID, rc = 0;
284 
285 	if (!(prot & IOMMU_READ))
286 		return -EINVAL;
287 
288 	if (!(prot & IOMMU_WRITE))
289 		flags |= ZPCI_TABLE_PROTECTED;
290 
291 	rc = s390_iommu_update_trans(s390_domain, paddr, iova,
292 				     size, flags);
293 
294 	return rc;
295 }
296 
s390_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)297 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
298 					   dma_addr_t iova)
299 {
300 	struct s390_domain *s390_domain = to_s390_domain(domain);
301 	unsigned long *sto, *pto, *rto, flags;
302 	unsigned int rtx, sx, px;
303 	phys_addr_t phys = 0;
304 
305 	if (iova < domain->geometry.aperture_start ||
306 	    iova > domain->geometry.aperture_end)
307 		return 0;
308 
309 	rtx = calc_rtx(iova);
310 	sx = calc_sx(iova);
311 	px = calc_px(iova);
312 	rto = s390_domain->dma_table;
313 
314 	spin_lock_irqsave(&s390_domain->dma_table_lock, flags);
315 	if (rto && reg_entry_isvalid(rto[rtx])) {
316 		sto = get_rt_sto(rto[rtx]);
317 		if (sto && reg_entry_isvalid(sto[sx])) {
318 			pto = get_st_pto(sto[sx]);
319 			if (pto && pt_entry_isvalid(pto[px]))
320 				phys = pto[px] & ZPCI_PTE_ADDR_MASK;
321 		}
322 	}
323 	spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags);
324 
325 	return phys;
326 }
327 
s390_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)328 static size_t s390_iommu_unmap(struct iommu_domain *domain,
329 			       unsigned long iova, size_t size,
330 			       struct iommu_iotlb_gather *gather)
331 {
332 	struct s390_domain *s390_domain = to_s390_domain(domain);
333 	int flags = ZPCI_PTE_INVALID;
334 	phys_addr_t paddr;
335 	int rc;
336 
337 	paddr = s390_iommu_iova_to_phys(domain, iova);
338 	if (!paddr)
339 		return 0;
340 
341 	rc = s390_iommu_update_trans(s390_domain, paddr, iova,
342 				     size, flags);
343 	if (rc)
344 		return 0;
345 
346 	return size;
347 }
348 
zpci_init_iommu(struct zpci_dev * zdev)349 int zpci_init_iommu(struct zpci_dev *zdev)
350 {
351 	int rc = 0;
352 
353 	rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
354 				    "s390-iommu.%08x", zdev->fid);
355 	if (rc)
356 		goto out_err;
357 
358 	rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
359 	if (rc)
360 		goto out_sysfs;
361 
362 	return 0;
363 
364 out_sysfs:
365 	iommu_device_sysfs_remove(&zdev->iommu_dev);
366 
367 out_err:
368 	return rc;
369 }
370 
zpci_destroy_iommu(struct zpci_dev * zdev)371 void zpci_destroy_iommu(struct zpci_dev *zdev)
372 {
373 	iommu_device_unregister(&zdev->iommu_dev);
374 	iommu_device_sysfs_remove(&zdev->iommu_dev);
375 }
376 
377 static const struct iommu_ops s390_iommu_ops = {
378 	.capable = s390_iommu_capable,
379 	.domain_alloc = s390_domain_alloc,
380 	.probe_device = s390_iommu_probe_device,
381 	.release_device = s390_iommu_release_device,
382 	.device_group = generic_device_group,
383 	.pgsize_bitmap = S390_IOMMU_PGSIZES,
384 	.default_domain_ops = &(const struct iommu_domain_ops) {
385 		.attach_dev	= s390_iommu_attach_device,
386 		.detach_dev	= s390_iommu_detach_device,
387 		.map		= s390_iommu_map,
388 		.unmap		= s390_iommu_unmap,
389 		.iova_to_phys	= s390_iommu_iova_to_phys,
390 		.free		= s390_domain_free,
391 	}
392 };
393