1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VDPA device simulator core.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <linux/iova.h>
21 #include <uapi/linux/vdpa.h>
22
23 #include "vdpa_sim.h"
24
25 #define DRV_VERSION "0.1"
26 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
27 #define DRV_DESC "vDPA Device Simulator core"
28 #define DRV_LICENSE "GPL v2"
29
30 static int batch_mapping = 1;
31 module_param(batch_mapping, int, 0444);
32 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
33
34 static int max_iotlb_entries = 2048;
35 module_param(max_iotlb_entries, int, 0444);
36 MODULE_PARM_DESC(max_iotlb_entries,
37 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
38
39 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
40 #define VDPASIM_QUEUE_MAX 256
41 #define VDPASIM_VENDOR_ID 0
42
vdpa_to_sim(struct vdpa_device * vdpa)43 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
44 {
45 return container_of(vdpa, struct vdpasim, vdpa);
46 }
47
dev_to_sim(struct device * dev)48 static struct vdpasim *dev_to_sim(struct device *dev)
49 {
50 struct vdpa_device *vdpa = dev_to_vdpa(dev);
51
52 return vdpa_to_sim(vdpa);
53 }
54
vdpasim_vq_notify(struct vringh * vring)55 static void vdpasim_vq_notify(struct vringh *vring)
56 {
57 struct vdpasim_virtqueue *vq =
58 container_of(vring, struct vdpasim_virtqueue, vring);
59
60 if (!vq->cb)
61 return;
62
63 vq->cb(vq->private);
64 }
65
vdpasim_queue_ready(struct vdpasim * vdpasim,unsigned int idx)66 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
67 {
68 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
69
70 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
71 VDPASIM_QUEUE_MAX, false,
72 (struct vring_desc *)(uintptr_t)vq->desc_addr,
73 (struct vring_avail *)
74 (uintptr_t)vq->driver_addr,
75 (struct vring_used *)
76 (uintptr_t)vq->device_addr);
77
78 vq->vring.notify = vdpasim_vq_notify;
79 }
80
vdpasim_vq_reset(struct vdpasim * vdpasim,struct vdpasim_virtqueue * vq)81 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
82 struct vdpasim_virtqueue *vq)
83 {
84 vq->ready = false;
85 vq->desc_addr = 0;
86 vq->driver_addr = 0;
87 vq->device_addr = 0;
88 vq->cb = NULL;
89 vq->private = NULL;
90 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
91 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
92
93 vq->vring.notify = NULL;
94 }
95
vdpasim_do_reset(struct vdpasim * vdpasim)96 static void vdpasim_do_reset(struct vdpasim *vdpasim)
97 {
98 int i;
99
100 spin_lock(&vdpasim->iommu_lock);
101
102 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
103 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
104 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
105 &vdpasim->iommu_lock);
106 }
107
108 for (i = 0; i < vdpasim->dev_attr.nas; i++)
109 vhost_iotlb_reset(&vdpasim->iommu[i]);
110
111 vdpasim->running = true;
112 spin_unlock(&vdpasim->iommu_lock);
113
114 vdpasim->features = 0;
115 vdpasim->status = 0;
116 ++vdpasim->generation;
117 }
118
dir_to_perm(enum dma_data_direction dir)119 static int dir_to_perm(enum dma_data_direction dir)
120 {
121 int perm = -EFAULT;
122
123 switch (dir) {
124 case DMA_FROM_DEVICE:
125 perm = VHOST_MAP_WO;
126 break;
127 case DMA_TO_DEVICE:
128 perm = VHOST_MAP_RO;
129 break;
130 case DMA_BIDIRECTIONAL:
131 perm = VHOST_MAP_RW;
132 break;
133 default:
134 break;
135 }
136
137 return perm;
138 }
139
vdpasim_map_range(struct vdpasim * vdpasim,phys_addr_t paddr,size_t size,unsigned int perm)140 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
141 size_t size, unsigned int perm)
142 {
143 struct iova *iova;
144 dma_addr_t dma_addr;
145 int ret;
146
147 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
148 iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
149 ULONG_MAX - 1, true);
150 if (!iova)
151 return DMA_MAPPING_ERROR;
152
153 dma_addr = iova_dma_addr(&vdpasim->iova, iova);
154
155 spin_lock(&vdpasim->iommu_lock);
156 ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
157 (u64)dma_addr + size - 1, (u64)paddr, perm);
158 spin_unlock(&vdpasim->iommu_lock);
159
160 if (ret) {
161 __free_iova(&vdpasim->iova, iova);
162 return DMA_MAPPING_ERROR;
163 }
164
165 return dma_addr;
166 }
167
vdpasim_unmap_range(struct vdpasim * vdpasim,dma_addr_t dma_addr,size_t size)168 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
169 size_t size)
170 {
171 spin_lock(&vdpasim->iommu_lock);
172 vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
173 (u64)dma_addr + size - 1);
174 spin_unlock(&vdpasim->iommu_lock);
175
176 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
177 }
178
vdpasim_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)179 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
180 unsigned long offset, size_t size,
181 enum dma_data_direction dir,
182 unsigned long attrs)
183 {
184 struct vdpasim *vdpasim = dev_to_sim(dev);
185 phys_addr_t paddr = page_to_phys(page) + offset;
186 int perm = dir_to_perm(dir);
187
188 if (perm < 0)
189 return DMA_MAPPING_ERROR;
190
191 return vdpasim_map_range(vdpasim, paddr, size, perm);
192 }
193
vdpasim_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)194 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
195 size_t size, enum dma_data_direction dir,
196 unsigned long attrs)
197 {
198 struct vdpasim *vdpasim = dev_to_sim(dev);
199
200 vdpasim_unmap_range(vdpasim, dma_addr, size);
201 }
202
vdpasim_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t flag,unsigned long attrs)203 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
204 dma_addr_t *dma_addr, gfp_t flag,
205 unsigned long attrs)
206 {
207 struct vdpasim *vdpasim = dev_to_sim(dev);
208 phys_addr_t paddr;
209 void *addr;
210
211 addr = kmalloc(size, flag);
212 if (!addr) {
213 *dma_addr = DMA_MAPPING_ERROR;
214 return NULL;
215 }
216
217 paddr = virt_to_phys(addr);
218
219 *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
220 if (*dma_addr == DMA_MAPPING_ERROR) {
221 kfree(addr);
222 return NULL;
223 }
224
225 return addr;
226 }
227
vdpasim_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_addr,unsigned long attrs)228 static void vdpasim_free_coherent(struct device *dev, size_t size,
229 void *vaddr, dma_addr_t dma_addr,
230 unsigned long attrs)
231 {
232 struct vdpasim *vdpasim = dev_to_sim(dev);
233
234 vdpasim_unmap_range(vdpasim, dma_addr, size);
235
236 kfree(vaddr);
237 }
238
239 static const struct dma_map_ops vdpasim_dma_ops = {
240 .map_page = vdpasim_map_page,
241 .unmap_page = vdpasim_unmap_page,
242 .alloc = vdpasim_alloc_coherent,
243 .free = vdpasim_free_coherent,
244 };
245
246 static const struct vdpa_config_ops vdpasim_config_ops;
247 static const struct vdpa_config_ops vdpasim_batch_config_ops;
248
vdpasim_create(struct vdpasim_dev_attr * dev_attr,const struct vdpa_dev_set_config * config)249 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
250 const struct vdpa_dev_set_config *config)
251 {
252 const struct vdpa_config_ops *ops;
253 struct vdpasim *vdpasim;
254 struct device *dev;
255 int i, ret = -ENOMEM;
256
257 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
258 if (config->device_features &
259 ~dev_attr->supported_features)
260 return ERR_PTR(-EINVAL);
261 dev_attr->supported_features =
262 config->device_features;
263 }
264
265 if (batch_mapping)
266 ops = &vdpasim_batch_config_ops;
267 else
268 ops = &vdpasim_config_ops;
269
270 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
271 dev_attr->ngroups, dev_attr->nas,
272 dev_attr->name, false);
273 if (IS_ERR(vdpasim)) {
274 ret = PTR_ERR(vdpasim);
275 goto err_alloc;
276 }
277
278 vdpasim->dev_attr = *dev_attr;
279 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
280 spin_lock_init(&vdpasim->lock);
281 spin_lock_init(&vdpasim->iommu_lock);
282
283 dev = &vdpasim->vdpa.dev;
284 dev->dma_mask = &dev->coherent_dma_mask;
285 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
286 goto err_iommu;
287 set_dma_ops(dev, &vdpasim_dma_ops);
288 vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
289
290 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
291 if (!vdpasim->config)
292 goto err_iommu;
293
294 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
295 GFP_KERNEL);
296 if (!vdpasim->vqs)
297 goto err_iommu;
298
299 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
300 sizeof(*vdpasim->iommu), GFP_KERNEL);
301 if (!vdpasim->iommu)
302 goto err_iommu;
303
304 for (i = 0; i < vdpasim->dev_attr.nas; i++)
305 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
306
307 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
308 if (!vdpasim->buffer)
309 goto err_iommu;
310
311 for (i = 0; i < dev_attr->nvqs; i++)
312 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
313 &vdpasim->iommu_lock);
314
315 ret = iova_cache_get();
316 if (ret)
317 goto err_iommu;
318
319 /* For simplicity we use an IOVA allocator with byte granularity */
320 init_iova_domain(&vdpasim->iova, 1, 0);
321
322 vdpasim->vdpa.dma_dev = dev;
323
324 return vdpasim;
325
326 err_iommu:
327 put_device(dev);
328 err_alloc:
329 return ERR_PTR(ret);
330 }
331 EXPORT_SYMBOL_GPL(vdpasim_create);
332
vdpasim_set_vq_address(struct vdpa_device * vdpa,u16 idx,u64 desc_area,u64 driver_area,u64 device_area)333 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
334 u64 desc_area, u64 driver_area,
335 u64 device_area)
336 {
337 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
338 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
339
340 vq->desc_addr = desc_area;
341 vq->driver_addr = driver_area;
342 vq->device_addr = device_area;
343
344 return 0;
345 }
346
vdpasim_set_vq_num(struct vdpa_device * vdpa,u16 idx,u32 num)347 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
348 {
349 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
350 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
351
352 vq->num = num;
353 }
354
vdpasim_kick_vq(struct vdpa_device * vdpa,u16 idx)355 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
356 {
357 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
358 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
359
360 if (vq->ready)
361 schedule_work(&vdpasim->work);
362 }
363
vdpasim_set_vq_cb(struct vdpa_device * vdpa,u16 idx,struct vdpa_callback * cb)364 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
365 struct vdpa_callback *cb)
366 {
367 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
368 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
369
370 vq->cb = cb->callback;
371 vq->private = cb->private;
372 }
373
vdpasim_set_vq_ready(struct vdpa_device * vdpa,u16 idx,bool ready)374 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
375 {
376 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
377 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
378 bool old_ready;
379
380 spin_lock(&vdpasim->lock);
381 old_ready = vq->ready;
382 vq->ready = ready;
383 if (vq->ready && !old_ready) {
384 vdpasim_queue_ready(vdpasim, idx);
385 }
386 spin_unlock(&vdpasim->lock);
387 }
388
vdpasim_get_vq_ready(struct vdpa_device * vdpa,u16 idx)389 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
390 {
391 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
392 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
393
394 return vq->ready;
395 }
396
vdpasim_set_vq_state(struct vdpa_device * vdpa,u16 idx,const struct vdpa_vq_state * state)397 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
398 const struct vdpa_vq_state *state)
399 {
400 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
401 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
402 struct vringh *vrh = &vq->vring;
403
404 spin_lock(&vdpasim->lock);
405 vrh->last_avail_idx = state->split.avail_index;
406 spin_unlock(&vdpasim->lock);
407
408 return 0;
409 }
410
vdpasim_get_vq_state(struct vdpa_device * vdpa,u16 idx,struct vdpa_vq_state * state)411 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
412 struct vdpa_vq_state *state)
413 {
414 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
415 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
416 struct vringh *vrh = &vq->vring;
417
418 state->split.avail_index = vrh->last_avail_idx;
419 return 0;
420 }
421
vdpasim_get_vq_align(struct vdpa_device * vdpa)422 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
423 {
424 return VDPASIM_QUEUE_ALIGN;
425 }
426
vdpasim_get_vq_group(struct vdpa_device * vdpa,u16 idx)427 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
428 {
429 /* RX and TX belongs to group 0, CVQ belongs to group 1 */
430 if (idx == 2)
431 return 1;
432 else
433 return 0;
434 }
435
vdpasim_get_device_features(struct vdpa_device * vdpa)436 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
437 {
438 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
439
440 return vdpasim->dev_attr.supported_features;
441 }
442
vdpasim_set_driver_features(struct vdpa_device * vdpa,u64 features)443 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
444 {
445 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
446
447 /* DMA mapping must be done by driver */
448 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
449 return -EINVAL;
450
451 vdpasim->features = features & vdpasim->dev_attr.supported_features;
452
453 return 0;
454 }
455
vdpasim_get_driver_features(struct vdpa_device * vdpa)456 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
457 {
458 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
459
460 return vdpasim->features;
461 }
462
vdpasim_set_config_cb(struct vdpa_device * vdpa,struct vdpa_callback * cb)463 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
464 struct vdpa_callback *cb)
465 {
466 /* We don't support config interrupt */
467 }
468
vdpasim_get_vq_num_max(struct vdpa_device * vdpa)469 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
470 {
471 return VDPASIM_QUEUE_MAX;
472 }
473
vdpasim_get_device_id(struct vdpa_device * vdpa)474 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
475 {
476 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
477
478 return vdpasim->dev_attr.id;
479 }
480
vdpasim_get_vendor_id(struct vdpa_device * vdpa)481 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
482 {
483 return VDPASIM_VENDOR_ID;
484 }
485
vdpasim_get_status(struct vdpa_device * vdpa)486 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
487 {
488 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
489 u8 status;
490
491 spin_lock(&vdpasim->lock);
492 status = vdpasim->status;
493 spin_unlock(&vdpasim->lock);
494
495 return status;
496 }
497
vdpasim_set_status(struct vdpa_device * vdpa,u8 status)498 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
499 {
500 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
501
502 spin_lock(&vdpasim->lock);
503 vdpasim->status = status;
504 spin_unlock(&vdpasim->lock);
505 }
506
vdpasim_reset(struct vdpa_device * vdpa)507 static int vdpasim_reset(struct vdpa_device *vdpa)
508 {
509 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
510
511 spin_lock(&vdpasim->lock);
512 vdpasim->status = 0;
513 vdpasim_do_reset(vdpasim);
514 spin_unlock(&vdpasim->lock);
515
516 return 0;
517 }
518
vdpasim_suspend(struct vdpa_device * vdpa)519 static int vdpasim_suspend(struct vdpa_device *vdpa)
520 {
521 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
522
523 spin_lock(&vdpasim->lock);
524 vdpasim->running = false;
525 spin_unlock(&vdpasim->lock);
526
527 return 0;
528 }
529
vdpasim_get_config_size(struct vdpa_device * vdpa)530 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
531 {
532 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
533
534 return vdpasim->dev_attr.config_size;
535 }
536
vdpasim_get_config(struct vdpa_device * vdpa,unsigned int offset,void * buf,unsigned int len)537 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
538 void *buf, unsigned int len)
539 {
540 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
541
542 if (offset + len > vdpasim->dev_attr.config_size)
543 return;
544
545 if (vdpasim->dev_attr.get_config)
546 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
547
548 memcpy(buf, vdpasim->config + offset, len);
549 }
550
vdpasim_set_config(struct vdpa_device * vdpa,unsigned int offset,const void * buf,unsigned int len)551 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
552 const void *buf, unsigned int len)
553 {
554 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
555
556 if (offset + len > vdpasim->dev_attr.config_size)
557 return;
558
559 memcpy(vdpasim->config + offset, buf, len);
560
561 if (vdpasim->dev_attr.set_config)
562 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
563 }
564
vdpasim_get_generation(struct vdpa_device * vdpa)565 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
566 {
567 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
568
569 return vdpasim->generation;
570 }
571
vdpasim_get_iova_range(struct vdpa_device * vdpa)572 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
573 {
574 struct vdpa_iova_range range = {
575 .first = 0ULL,
576 .last = ULLONG_MAX,
577 };
578
579 return range;
580 }
581
vdpasim_set_group_asid(struct vdpa_device * vdpa,unsigned int group,unsigned int asid)582 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
583 unsigned int asid)
584 {
585 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
586 struct vhost_iotlb *iommu;
587 int i;
588
589 if (group > vdpasim->dev_attr.ngroups)
590 return -EINVAL;
591
592 if (asid >= vdpasim->dev_attr.nas)
593 return -EINVAL;
594
595 iommu = &vdpasim->iommu[asid];
596
597 spin_lock(&vdpasim->lock);
598
599 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
600 if (vdpasim_get_vq_group(vdpa, i) == group)
601 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
602 &vdpasim->iommu_lock);
603
604 spin_unlock(&vdpasim->lock);
605
606 return 0;
607 }
608
vdpasim_set_map(struct vdpa_device * vdpa,unsigned int asid,struct vhost_iotlb * iotlb)609 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
610 struct vhost_iotlb *iotlb)
611 {
612 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
613 struct vhost_iotlb_map *map;
614 struct vhost_iotlb *iommu;
615 u64 start = 0ULL, last = 0ULL - 1;
616 int ret;
617
618 if (asid >= vdpasim->dev_attr.nas)
619 return -EINVAL;
620
621 spin_lock(&vdpasim->iommu_lock);
622
623 iommu = &vdpasim->iommu[asid];
624 vhost_iotlb_reset(iommu);
625
626 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
627 map = vhost_iotlb_itree_next(map, start, last)) {
628 ret = vhost_iotlb_add_range(iommu, map->start,
629 map->last, map->addr, map->perm);
630 if (ret)
631 goto err;
632 }
633 spin_unlock(&vdpasim->iommu_lock);
634 return 0;
635
636 err:
637 vhost_iotlb_reset(iommu);
638 spin_unlock(&vdpasim->iommu_lock);
639 return ret;
640 }
641
vdpasim_dma_map(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size,u64 pa,u32 perm,void * opaque)642 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
643 u64 iova, u64 size,
644 u64 pa, u32 perm, void *opaque)
645 {
646 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
647 int ret;
648
649 if (asid >= vdpasim->dev_attr.nas)
650 return -EINVAL;
651
652 spin_lock(&vdpasim->iommu_lock);
653 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
654 iova + size - 1, pa, perm, opaque);
655 spin_unlock(&vdpasim->iommu_lock);
656
657 return ret;
658 }
659
vdpasim_dma_unmap(struct vdpa_device * vdpa,unsigned int asid,u64 iova,u64 size)660 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
661 u64 iova, u64 size)
662 {
663 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
664
665 if (asid >= vdpasim->dev_attr.nas)
666 return -EINVAL;
667
668 spin_lock(&vdpasim->iommu_lock);
669 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
670 spin_unlock(&vdpasim->iommu_lock);
671
672 return 0;
673 }
674
vdpasim_free(struct vdpa_device * vdpa)675 static void vdpasim_free(struct vdpa_device *vdpa)
676 {
677 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
678 int i;
679
680 cancel_work_sync(&vdpasim->work);
681
682 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
683 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
684 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
685 }
686
687 if (vdpa_get_dma_dev(vdpa)) {
688 put_iova_domain(&vdpasim->iova);
689 iova_cache_put();
690 }
691
692 kvfree(vdpasim->buffer);
693 vhost_iotlb_free(vdpasim->iommu);
694 kfree(vdpasim->vqs);
695 kfree(vdpasim->config);
696 }
697
698 static const struct vdpa_config_ops vdpasim_config_ops = {
699 .set_vq_address = vdpasim_set_vq_address,
700 .set_vq_num = vdpasim_set_vq_num,
701 .kick_vq = vdpasim_kick_vq,
702 .set_vq_cb = vdpasim_set_vq_cb,
703 .set_vq_ready = vdpasim_set_vq_ready,
704 .get_vq_ready = vdpasim_get_vq_ready,
705 .set_vq_state = vdpasim_set_vq_state,
706 .get_vq_state = vdpasim_get_vq_state,
707 .get_vq_align = vdpasim_get_vq_align,
708 .get_vq_group = vdpasim_get_vq_group,
709 .get_device_features = vdpasim_get_device_features,
710 .set_driver_features = vdpasim_set_driver_features,
711 .get_driver_features = vdpasim_get_driver_features,
712 .set_config_cb = vdpasim_set_config_cb,
713 .get_vq_num_max = vdpasim_get_vq_num_max,
714 .get_device_id = vdpasim_get_device_id,
715 .get_vendor_id = vdpasim_get_vendor_id,
716 .get_status = vdpasim_get_status,
717 .set_status = vdpasim_set_status,
718 .reset = vdpasim_reset,
719 .suspend = vdpasim_suspend,
720 .get_config_size = vdpasim_get_config_size,
721 .get_config = vdpasim_get_config,
722 .set_config = vdpasim_set_config,
723 .get_generation = vdpasim_get_generation,
724 .get_iova_range = vdpasim_get_iova_range,
725 .set_group_asid = vdpasim_set_group_asid,
726 .dma_map = vdpasim_dma_map,
727 .dma_unmap = vdpasim_dma_unmap,
728 .free = vdpasim_free,
729 };
730
731 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
732 .set_vq_address = vdpasim_set_vq_address,
733 .set_vq_num = vdpasim_set_vq_num,
734 .kick_vq = vdpasim_kick_vq,
735 .set_vq_cb = vdpasim_set_vq_cb,
736 .set_vq_ready = vdpasim_set_vq_ready,
737 .get_vq_ready = vdpasim_get_vq_ready,
738 .set_vq_state = vdpasim_set_vq_state,
739 .get_vq_state = vdpasim_get_vq_state,
740 .get_vq_align = vdpasim_get_vq_align,
741 .get_vq_group = vdpasim_get_vq_group,
742 .get_device_features = vdpasim_get_device_features,
743 .set_driver_features = vdpasim_set_driver_features,
744 .get_driver_features = vdpasim_get_driver_features,
745 .set_config_cb = vdpasim_set_config_cb,
746 .get_vq_num_max = vdpasim_get_vq_num_max,
747 .get_device_id = vdpasim_get_device_id,
748 .get_vendor_id = vdpasim_get_vendor_id,
749 .get_status = vdpasim_get_status,
750 .set_status = vdpasim_set_status,
751 .reset = vdpasim_reset,
752 .suspend = vdpasim_suspend,
753 .get_config_size = vdpasim_get_config_size,
754 .get_config = vdpasim_get_config,
755 .set_config = vdpasim_set_config,
756 .get_generation = vdpasim_get_generation,
757 .get_iova_range = vdpasim_get_iova_range,
758 .set_group_asid = vdpasim_set_group_asid,
759 .set_map = vdpasim_set_map,
760 .free = vdpasim_free,
761 };
762
763 MODULE_VERSION(DRV_VERSION);
764 MODULE_LICENSE(DRV_LICENSE);
765 MODULE_AUTHOR(DRV_AUTHOR);
766 MODULE_DESCRIPTION(DRV_DESC);
767