1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/printk.h>
24 #include <linux/device.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/amd-iommu.h>
28 #include "kfd_priv.h"
29 #include "kfd_dbgmgr.h"
30 #include "kfd_topology.h"
31 #include "kfd_iommu.h"
32 
33 static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
34 					AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
35 					AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
36 
37 /** kfd_iommu_check_device - Check whether IOMMU is available for device
38  */
kfd_iommu_check_device(struct kfd_dev * kfd)39 int kfd_iommu_check_device(struct kfd_dev *kfd)
40 {
41 	struct amd_iommu_device_info iommu_info;
42 	int err;
43 
44 	if (!kfd->device_info->needs_iommu_device)
45 		return -ENODEV;
46 
47 	iommu_info.flags = 0;
48 	err = amd_iommu_device_info(kfd->pdev, &iommu_info);
49 	if (err)
50 		return err;
51 
52 	if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags)
53 		return -ENODEV;
54 
55 	return 0;
56 }
57 
58 /** kfd_iommu_device_init - Initialize IOMMU for device
59  */
kfd_iommu_device_init(struct kfd_dev * kfd)60 int kfd_iommu_device_init(struct kfd_dev *kfd)
61 {
62 	struct amd_iommu_device_info iommu_info;
63 	unsigned int pasid_limit;
64 	int err;
65 	struct kfd_topology_device *top_dev;
66 
67 	top_dev = kfd_topology_device_by_id(kfd->id);
68 
69 	if (!kfd->device_info->needs_iommu_device)
70 		return 0;
71 
72 	iommu_info.flags = 0;
73 	err = amd_iommu_device_info(kfd->pdev, &iommu_info);
74 	if (err < 0) {
75 		dev_err(kfd_device,
76 			"error getting iommu info. is the iommu enabled?\n");
77 		return -ENODEV;
78 	}
79 
80 	if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
81 		dev_err(kfd_device,
82 			"error required iommu flags ats %i, pri %i, pasid %i\n",
83 		       (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
84 		       (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
85 		       (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
86 									!= 0);
87 		return -ENODEV;
88 	}
89 
90 	pasid_limit = min_t(unsigned int,
91 			(unsigned int)(1 << kfd->device_info->max_pasid_bits),
92 			iommu_info.max_pasids);
93 
94 	if (!kfd_set_pasid_limit(pasid_limit)) {
95 		dev_err(kfd_device, "error setting pasid limit\n");
96 		return -EBUSY;
97 	}
98 
99 	return 0;
100 }
101 
102 /** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process
103  *
104  * Binds the given process to the given device using its PASID. This
105  * enables IOMMUv2 address translation for the process on the device.
106  *
107  * This function assumes that the process mutex is held.
108  */
kfd_iommu_bind_process_to_device(struct kfd_process_device * pdd)109 int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
110 {
111 	struct kfd_dev *dev = pdd->dev;
112 	struct kfd_process *p = pdd->process;
113 	int err;
114 
115 	if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND)
116 		return 0;
117 
118 	if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
119 		pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
120 		return -EINVAL;
121 	}
122 
123 	err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
124 	if (!err)
125 		pdd->bound = PDD_BOUND;
126 
127 	return err;
128 }
129 
130 /** kfd_iommu_unbind_process - Unbind process from all devices
131  *
132  * This removes all IOMMU device bindings of the process. To be used
133  * before process termination.
134  */
kfd_iommu_unbind_process(struct kfd_process * p)135 void kfd_iommu_unbind_process(struct kfd_process *p)
136 {
137 	struct kfd_process_device *pdd;
138 
139 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
140 		if (pdd->bound == PDD_BOUND)
141 			amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
142 }
143 
144 /* Callback for process shutdown invoked by the IOMMU driver */
iommu_pasid_shutdown_callback(struct pci_dev * pdev,int pasid)145 static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
146 {
147 	struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
148 	struct kfd_process *p;
149 	struct kfd_process_device *pdd;
150 
151 	if (!dev)
152 		return;
153 
154 	/*
155 	 * Look for the process that matches the pasid. If there is no such
156 	 * process, we either released it in amdkfd's own notifier, or there
157 	 * is a bug. Unfortunately, there is no way to tell...
158 	 */
159 	p = kfd_lookup_process_by_pasid(pasid);
160 	if (!p)
161 		return;
162 
163 	pr_debug("Unbinding process %d from IOMMU\n", pasid);
164 
165 	mutex_lock(kfd_get_dbgmgr_mutex());
166 
167 	if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
168 		if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
169 			kfd_dbgmgr_destroy(dev->dbgmgr);
170 			dev->dbgmgr = NULL;
171 		}
172 	}
173 
174 	mutex_unlock(kfd_get_dbgmgr_mutex());
175 
176 	mutex_lock(&p->mutex);
177 
178 	pdd = kfd_get_process_device_data(dev, p);
179 	if (pdd)
180 		/* For GPU relying on IOMMU, we need to dequeue here
181 		 * when PASID is still bound.
182 		 */
183 		kfd_process_dequeue_from_device(pdd);
184 
185 	mutex_unlock(&p->mutex);
186 
187 	kfd_unref_process(p);
188 }
189 
190 /* This function called by IOMMU driver on PPR failure */
iommu_invalid_ppr_cb(struct pci_dev * pdev,int pasid,unsigned long address,u16 flags)191 static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
192 		unsigned long address, u16 flags)
193 {
194 	struct kfd_dev *dev;
195 
196 	dev_warn_ratelimited(kfd_device,
197 			"Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
198 			PCI_BUS_NUM(pdev->devfn),
199 			PCI_SLOT(pdev->devfn),
200 			PCI_FUNC(pdev->devfn),
201 			pasid,
202 			address,
203 			flags);
204 
205 	dev = kfd_device_by_pci_dev(pdev);
206 	if (!WARN_ON(!dev))
207 		kfd_signal_iommu_event(dev, pasid, address,
208 			flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
209 
210 	return AMD_IOMMU_INV_PRI_RSP_INVALID;
211 }
212 
213 /*
214  * Bind processes do the device that have been temporarily unbound
215  * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
216  */
kfd_bind_processes_to_device(struct kfd_dev * kfd)217 static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
218 {
219 	struct kfd_process_device *pdd;
220 	struct kfd_process *p;
221 	unsigned int temp;
222 	int err = 0;
223 
224 	int idx = srcu_read_lock(&kfd_processes_srcu);
225 
226 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
227 		mutex_lock(&p->mutex);
228 		pdd = kfd_get_process_device_data(kfd, p);
229 
230 		if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
231 			mutex_unlock(&p->mutex);
232 			continue;
233 		}
234 
235 		err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
236 				p->lead_thread);
237 		if (err < 0) {
238 			pr_err("Unexpected pasid %d binding failure\n",
239 					p->pasid);
240 			mutex_unlock(&p->mutex);
241 			break;
242 		}
243 
244 		pdd->bound = PDD_BOUND;
245 		mutex_unlock(&p->mutex);
246 	}
247 
248 	srcu_read_unlock(&kfd_processes_srcu, idx);
249 
250 	return err;
251 }
252 
253 /*
254  * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
255  * processes will be restored to PDD_BOUND state in
256  * kfd_bind_processes_to_device.
257  */
kfd_unbind_processes_from_device(struct kfd_dev * kfd)258 static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
259 {
260 	struct kfd_process_device *pdd;
261 	struct kfd_process *p;
262 	unsigned int temp;
263 
264 	int idx = srcu_read_lock(&kfd_processes_srcu);
265 
266 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
267 		mutex_lock(&p->mutex);
268 		pdd = kfd_get_process_device_data(kfd, p);
269 
270 		if (WARN_ON(!pdd)) {
271 			mutex_unlock(&p->mutex);
272 			continue;
273 		}
274 
275 		if (pdd->bound == PDD_BOUND)
276 			pdd->bound = PDD_BOUND_SUSPENDED;
277 		mutex_unlock(&p->mutex);
278 	}
279 
280 	srcu_read_unlock(&kfd_processes_srcu, idx);
281 }
282 
283 /** kfd_iommu_suspend - Prepare IOMMU for suspend
284  *
285  * This unbinds processes from the device and disables the IOMMU for
286  * the device.
287  */
kfd_iommu_suspend(struct kfd_dev * kfd)288 void kfd_iommu_suspend(struct kfd_dev *kfd)
289 {
290 	if (!kfd->device_info->needs_iommu_device)
291 		return;
292 
293 	kfd_unbind_processes_from_device(kfd);
294 
295 	amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
296 	amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
297 	amd_iommu_free_device(kfd->pdev);
298 }
299 
300 /** kfd_iommu_resume - Restore IOMMU after resume
301  *
302  * This reinitializes the IOMMU for the device and re-binds previously
303  * suspended processes to the device.
304  */
kfd_iommu_resume(struct kfd_dev * kfd)305 int kfd_iommu_resume(struct kfd_dev *kfd)
306 {
307 	unsigned int pasid_limit;
308 	int err;
309 
310 	if (!kfd->device_info->needs_iommu_device)
311 		return 0;
312 
313 	pasid_limit = kfd_get_pasid_limit();
314 
315 	err = amd_iommu_init_device(kfd->pdev, pasid_limit);
316 	if (err)
317 		return -ENXIO;
318 
319 	amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
320 					iommu_pasid_shutdown_callback);
321 	amd_iommu_set_invalid_ppr_cb(kfd->pdev,
322 				     iommu_invalid_ppr_cb);
323 
324 	err = kfd_bind_processes_to_device(kfd);
325 	if (err) {
326 		amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
327 		amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
328 		amd_iommu_free_device(kfd->pdev);
329 		return err;
330 	}
331 
332 	return 0;
333 }
334 
335 extern bool amd_iommu_pc_supported(void);
336 extern u8 amd_iommu_pc_get_max_banks(u16 devid);
337 extern u8 amd_iommu_pc_get_max_counters(u16 devid);
338 
339 /** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
340  */
kfd_iommu_add_perf_counters(struct kfd_topology_device * kdev)341 int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
342 {
343 	struct kfd_perf_properties *props;
344 
345 	if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT))
346 		return 0;
347 
348 	if (!amd_iommu_pc_supported())
349 		return 0;
350 
351 	props = kfd_alloc_struct(props);
352 	if (!props)
353 		return -ENOMEM;
354 	strcpy(props->block_name, "iommu");
355 	props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
356 		amd_iommu_pc_get_max_counters(0); /* assume one iommu */
357 	list_add_tail(&props->list, &kdev->perf_props);
358 
359 	return 0;
360 }
361