1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vDPA bus.
4 *
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
17
18 static LIST_HEAD(mdev_head);
19 /* A global mutex that protects vdpa management device and device level operations. */
20 static DEFINE_MUTEX(vdpa_dev_mutex);
21 static DEFINE_IDA(vdpa_index_ida);
22
23 static struct genl_family vdpa_nl_family;
24
vdpa_dev_probe(struct device * d)25 static int vdpa_dev_probe(struct device *d)
26 {
27 struct vdpa_device *vdev = dev_to_vdpa(d);
28 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
29 int ret = 0;
30
31 if (drv && drv->probe)
32 ret = drv->probe(vdev);
33
34 return ret;
35 }
36
vdpa_dev_remove(struct device * d)37 static void vdpa_dev_remove(struct device *d)
38 {
39 struct vdpa_device *vdev = dev_to_vdpa(d);
40 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
41
42 if (drv && drv->remove)
43 drv->remove(vdev);
44 }
45
46 static struct bus_type vdpa_bus = {
47 .name = "vdpa",
48 .probe = vdpa_dev_probe,
49 .remove = vdpa_dev_remove,
50 };
51
vdpa_release_dev(struct device * d)52 static void vdpa_release_dev(struct device *d)
53 {
54 struct vdpa_device *vdev = dev_to_vdpa(d);
55 const struct vdpa_config_ops *ops = vdev->config;
56
57 if (ops->free)
58 ops->free(vdev);
59
60 ida_simple_remove(&vdpa_index_ida, vdev->index);
61 kfree(vdev);
62 }
63
64 /**
65 * __vdpa_alloc_device - allocate and initilaize a vDPA device
66 * This allows driver to some prepartion after device is
67 * initialized but before registered.
68 * @parent: the parent device
69 * @config: the bus operations that is supported by this device
70 * @size: size of the parent structure that contains private data
71 * @name: name of the vdpa device; optional.
72 * @use_va: indicate whether virtual address must be used by this device
73 *
74 * Driver should use vdpa_alloc_device() wrapper macro instead of
75 * using this directly.
76 *
77 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
78 * ida.
79 */
__vdpa_alloc_device(struct device * parent,const struct vdpa_config_ops * config,size_t size,const char * name,bool use_va)80 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
81 const struct vdpa_config_ops *config,
82 size_t size, const char *name,
83 bool use_va)
84 {
85 struct vdpa_device *vdev;
86 int err = -EINVAL;
87
88 if (!config)
89 goto err;
90
91 if (!!config->dma_map != !!config->dma_unmap)
92 goto err;
93
94 /* It should only work for the device that use on-chip IOMMU */
95 if (use_va && !(config->dma_map || config->set_map))
96 goto err;
97
98 err = -ENOMEM;
99 vdev = kzalloc(size, GFP_KERNEL);
100 if (!vdev)
101 goto err;
102
103 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
104 if (err < 0)
105 goto err_ida;
106
107 vdev->dev.bus = &vdpa_bus;
108 vdev->dev.parent = parent;
109 vdev->dev.release = vdpa_release_dev;
110 vdev->index = err;
111 vdev->config = config;
112 vdev->features_valid = false;
113 vdev->use_va = use_va;
114
115 if (name)
116 err = dev_set_name(&vdev->dev, "%s", name);
117 else
118 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
119 if (err)
120 goto err_name;
121
122 device_initialize(&vdev->dev);
123
124 return vdev;
125
126 err_name:
127 ida_simple_remove(&vdpa_index_ida, vdev->index);
128 err_ida:
129 kfree(vdev);
130 err:
131 return ERR_PTR(err);
132 }
133 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
134
vdpa_name_match(struct device * dev,const void * data)135 static int vdpa_name_match(struct device *dev, const void *data)
136 {
137 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
138
139 return (strcmp(dev_name(&vdev->dev), data) == 0);
140 }
141
__vdpa_register_device(struct vdpa_device * vdev,int nvqs)142 static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
143 {
144 struct device *dev;
145
146 vdev->nvqs = nvqs;
147
148 lockdep_assert_held(&vdpa_dev_mutex);
149 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
150 if (dev) {
151 put_device(dev);
152 return -EEXIST;
153 }
154 return device_add(&vdev->dev);
155 }
156
157 /**
158 * _vdpa_register_device - register a vDPA device with vdpa lock held
159 * Caller must have a succeed call of vdpa_alloc_device() before.
160 * Caller must invoke this routine in the management device dev_add()
161 * callback after setting up valid mgmtdev for this vdpa device.
162 * @vdev: the vdpa device to be registered to vDPA bus
163 * @nvqs: number of virtqueues supported by this device
164 *
165 * Return: Returns an error when fail to add device to vDPA bus
166 */
_vdpa_register_device(struct vdpa_device * vdev,int nvqs)167 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
168 {
169 if (!vdev->mdev)
170 return -EINVAL;
171
172 return __vdpa_register_device(vdev, nvqs);
173 }
174 EXPORT_SYMBOL_GPL(_vdpa_register_device);
175
176 /**
177 * vdpa_register_device - register a vDPA device
178 * Callers must have a succeed call of vdpa_alloc_device() before.
179 * @vdev: the vdpa device to be registered to vDPA bus
180 * @nvqs: number of virtqueues supported by this device
181 *
182 * Return: Returns an error when fail to add to vDPA bus
183 */
vdpa_register_device(struct vdpa_device * vdev,int nvqs)184 int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
185 {
186 int err;
187
188 mutex_lock(&vdpa_dev_mutex);
189 err = __vdpa_register_device(vdev, nvqs);
190 mutex_unlock(&vdpa_dev_mutex);
191 return err;
192 }
193 EXPORT_SYMBOL_GPL(vdpa_register_device);
194
195 /**
196 * _vdpa_unregister_device - unregister a vDPA device
197 * Caller must invoke this routine as part of management device dev_del()
198 * callback.
199 * @vdev: the vdpa device to be unregisted from vDPA bus
200 */
_vdpa_unregister_device(struct vdpa_device * vdev)201 void _vdpa_unregister_device(struct vdpa_device *vdev)
202 {
203 lockdep_assert_held(&vdpa_dev_mutex);
204 WARN_ON(!vdev->mdev);
205 device_unregister(&vdev->dev);
206 }
207 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
208
209 /**
210 * vdpa_unregister_device - unregister a vDPA device
211 * @vdev: the vdpa device to be unregisted from vDPA bus
212 */
vdpa_unregister_device(struct vdpa_device * vdev)213 void vdpa_unregister_device(struct vdpa_device *vdev)
214 {
215 mutex_lock(&vdpa_dev_mutex);
216 device_unregister(&vdev->dev);
217 mutex_unlock(&vdpa_dev_mutex);
218 }
219 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
220
221 /**
222 * __vdpa_register_driver - register a vDPA device driver
223 * @drv: the vdpa device driver to be registered
224 * @owner: module owner of the driver
225 *
226 * Return: Returns an err when fail to do the registration
227 */
__vdpa_register_driver(struct vdpa_driver * drv,struct module * owner)228 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
229 {
230 drv->driver.bus = &vdpa_bus;
231 drv->driver.owner = owner;
232
233 return driver_register(&drv->driver);
234 }
235 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
236
237 /**
238 * vdpa_unregister_driver - unregister a vDPA device driver
239 * @drv: the vdpa device driver to be unregistered
240 */
vdpa_unregister_driver(struct vdpa_driver * drv)241 void vdpa_unregister_driver(struct vdpa_driver *drv)
242 {
243 driver_unregister(&drv->driver);
244 }
245 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
246
247 /**
248 * vdpa_mgmtdev_register - register a vdpa management device
249 *
250 * @mdev: Pointer to vdpa management device
251 * vdpa_mgmtdev_register() register a vdpa management device which supports
252 * vdpa device management.
253 * Return: Returns 0 on success or failure when required callback ops are not
254 * initialized.
255 */
vdpa_mgmtdev_register(struct vdpa_mgmt_dev * mdev)256 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
257 {
258 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
259 return -EINVAL;
260
261 INIT_LIST_HEAD(&mdev->list);
262 mutex_lock(&vdpa_dev_mutex);
263 list_add_tail(&mdev->list, &mdev_head);
264 mutex_unlock(&vdpa_dev_mutex);
265 return 0;
266 }
267 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
268
vdpa_match_remove(struct device * dev,void * data)269 static int vdpa_match_remove(struct device *dev, void *data)
270 {
271 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
272 struct vdpa_mgmt_dev *mdev = vdev->mdev;
273
274 if (mdev == data)
275 mdev->ops->dev_del(mdev, vdev);
276 return 0;
277 }
278
vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev * mdev)279 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
280 {
281 mutex_lock(&vdpa_dev_mutex);
282
283 list_del(&mdev->list);
284
285 /* Filter out all the entries belong to this management device and delete it. */
286 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
287
288 mutex_unlock(&vdpa_dev_mutex);
289 }
290 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
291
mgmtdev_handle_match(const struct vdpa_mgmt_dev * mdev,const char * busname,const char * devname)292 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
293 const char *busname, const char *devname)
294 {
295 /* Bus name is optional for simulated management device, so ignore the
296 * device with bus if bus attribute is provided.
297 */
298 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
299 return false;
300
301 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
302 return true;
303
304 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
305 (strcmp(dev_name(mdev->device), devname) == 0))
306 return true;
307
308 return false;
309 }
310
vdpa_mgmtdev_get_from_attr(struct nlattr ** attrs)311 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
312 {
313 struct vdpa_mgmt_dev *mdev;
314 const char *busname = NULL;
315 const char *devname;
316
317 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
318 return ERR_PTR(-EINVAL);
319 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
320 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
321 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
322
323 list_for_each_entry(mdev, &mdev_head, list) {
324 if (mgmtdev_handle_match(mdev, busname, devname))
325 return mdev;
326 }
327 return ERR_PTR(-ENODEV);
328 }
329
vdpa_nl_mgmtdev_handle_fill(struct sk_buff * msg,const struct vdpa_mgmt_dev * mdev)330 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
331 {
332 if (mdev->device->bus &&
333 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
334 return -EMSGSIZE;
335 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
336 return -EMSGSIZE;
337 return 0;
338 }
339
vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev * mdev,struct sk_buff * msg,u32 portid,u32 seq,int flags)340 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
341 u32 portid, u32 seq, int flags)
342 {
343 u64 supported_classes = 0;
344 void *hdr;
345 int i = 0;
346 int err;
347
348 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
349 if (!hdr)
350 return -EMSGSIZE;
351 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
352 if (err)
353 goto msg_err;
354
355 while (mdev->id_table[i].device) {
356 supported_classes |= BIT(mdev->id_table[i].device);
357 i++;
358 }
359
360 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
361 supported_classes, VDPA_ATTR_UNSPEC)) {
362 err = -EMSGSIZE;
363 goto msg_err;
364 }
365
366 genlmsg_end(msg, hdr);
367 return 0;
368
369 msg_err:
370 genlmsg_cancel(msg, hdr);
371 return err;
372 }
373
vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff * skb,struct genl_info * info)374 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
375 {
376 struct vdpa_mgmt_dev *mdev;
377 struct sk_buff *msg;
378 int err;
379
380 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
381 if (!msg)
382 return -ENOMEM;
383
384 mutex_lock(&vdpa_dev_mutex);
385 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
386 if (IS_ERR(mdev)) {
387 mutex_unlock(&vdpa_dev_mutex);
388 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
389 err = PTR_ERR(mdev);
390 goto out;
391 }
392
393 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
394 mutex_unlock(&vdpa_dev_mutex);
395 if (err)
396 goto out;
397 err = genlmsg_reply(msg, info);
398 return err;
399
400 out:
401 nlmsg_free(msg);
402 return err;
403 }
404
405 static int
vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)406 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
407 {
408 struct vdpa_mgmt_dev *mdev;
409 int start = cb->args[0];
410 int idx = 0;
411 int err;
412
413 mutex_lock(&vdpa_dev_mutex);
414 list_for_each_entry(mdev, &mdev_head, list) {
415 if (idx < start) {
416 idx++;
417 continue;
418 }
419 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
420 cb->nlh->nlmsg_seq, NLM_F_MULTI);
421 if (err)
422 goto out;
423 idx++;
424 }
425 out:
426 mutex_unlock(&vdpa_dev_mutex);
427 cb->args[0] = idx;
428 return msg->len;
429 }
430
vdpa_nl_cmd_dev_add_set_doit(struct sk_buff * skb,struct genl_info * info)431 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
432 {
433 struct vdpa_mgmt_dev *mdev;
434 const char *name;
435 int err = 0;
436
437 if (!info->attrs[VDPA_ATTR_DEV_NAME])
438 return -EINVAL;
439
440 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
441
442 mutex_lock(&vdpa_dev_mutex);
443 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
444 if (IS_ERR(mdev)) {
445 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
446 err = PTR_ERR(mdev);
447 goto err;
448 }
449
450 err = mdev->ops->dev_add(mdev, name);
451 err:
452 mutex_unlock(&vdpa_dev_mutex);
453 return err;
454 }
455
vdpa_nl_cmd_dev_del_set_doit(struct sk_buff * skb,struct genl_info * info)456 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
457 {
458 struct vdpa_mgmt_dev *mdev;
459 struct vdpa_device *vdev;
460 struct device *dev;
461 const char *name;
462 int err = 0;
463
464 if (!info->attrs[VDPA_ATTR_DEV_NAME])
465 return -EINVAL;
466 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
467
468 mutex_lock(&vdpa_dev_mutex);
469 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
470 if (!dev) {
471 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
472 err = -ENODEV;
473 goto dev_err;
474 }
475 vdev = container_of(dev, struct vdpa_device, dev);
476 if (!vdev->mdev) {
477 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
478 err = -EINVAL;
479 goto mdev_err;
480 }
481 mdev = vdev->mdev;
482 mdev->ops->dev_del(mdev, vdev);
483 mdev_err:
484 put_device(dev);
485 dev_err:
486 mutex_unlock(&vdpa_dev_mutex);
487 return err;
488 }
489
490 static int
vdpa_dev_fill(struct vdpa_device * vdev,struct sk_buff * msg,u32 portid,u32 seq,int flags,struct netlink_ext_ack * extack)491 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
492 int flags, struct netlink_ext_ack *extack)
493 {
494 u16 max_vq_size;
495 u32 device_id;
496 u32 vendor_id;
497 void *hdr;
498 int err;
499
500 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
501 if (!hdr)
502 return -EMSGSIZE;
503
504 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
505 if (err)
506 goto msg_err;
507
508 device_id = vdev->config->get_device_id(vdev);
509 vendor_id = vdev->config->get_vendor_id(vdev);
510 max_vq_size = vdev->config->get_vq_num_max(vdev);
511
512 err = -EMSGSIZE;
513 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
514 goto msg_err;
515 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
516 goto msg_err;
517 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
518 goto msg_err;
519 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
520 goto msg_err;
521 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
522 goto msg_err;
523
524 genlmsg_end(msg, hdr);
525 return 0;
526
527 msg_err:
528 genlmsg_cancel(msg, hdr);
529 return err;
530 }
531
vdpa_nl_cmd_dev_get_doit(struct sk_buff * skb,struct genl_info * info)532 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
533 {
534 struct vdpa_device *vdev;
535 struct sk_buff *msg;
536 const char *devname;
537 struct device *dev;
538 int err;
539
540 if (!info->attrs[VDPA_ATTR_DEV_NAME])
541 return -EINVAL;
542 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
543 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
544 if (!msg)
545 return -ENOMEM;
546
547 mutex_lock(&vdpa_dev_mutex);
548 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
549 if (!dev) {
550 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
551 err = -ENODEV;
552 goto err;
553 }
554 vdev = container_of(dev, struct vdpa_device, dev);
555 if (!vdev->mdev) {
556 err = -EINVAL;
557 goto mdev_err;
558 }
559 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
560 if (!err)
561 err = genlmsg_reply(msg, info);
562 mdev_err:
563 put_device(dev);
564 err:
565 mutex_unlock(&vdpa_dev_mutex);
566 if (err)
567 nlmsg_free(msg);
568 return err;
569 }
570
571 struct vdpa_dev_dump_info {
572 struct sk_buff *msg;
573 struct netlink_callback *cb;
574 int start_idx;
575 int idx;
576 };
577
vdpa_dev_dump(struct device * dev,void * data)578 static int vdpa_dev_dump(struct device *dev, void *data)
579 {
580 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
581 struct vdpa_dev_dump_info *info = data;
582 int err;
583
584 if (!vdev->mdev)
585 return 0;
586 if (info->idx < info->start_idx) {
587 info->idx++;
588 return 0;
589 }
590 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
591 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
592 if (err)
593 return err;
594
595 info->idx++;
596 return 0;
597 }
598
vdpa_nl_cmd_dev_get_dumpit(struct sk_buff * msg,struct netlink_callback * cb)599 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
600 {
601 struct vdpa_dev_dump_info info;
602
603 info.msg = msg;
604 info.cb = cb;
605 info.start_idx = cb->args[0];
606 info.idx = 0;
607
608 mutex_lock(&vdpa_dev_mutex);
609 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
610 mutex_unlock(&vdpa_dev_mutex);
611 cb->args[0] = info.idx;
612 return msg->len;
613 }
614
615 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
616 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
617 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
618 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
619 };
620
621 static const struct genl_ops vdpa_nl_ops[] = {
622 {
623 .cmd = VDPA_CMD_MGMTDEV_GET,
624 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
625 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
626 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
627 },
628 {
629 .cmd = VDPA_CMD_DEV_NEW,
630 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
631 .doit = vdpa_nl_cmd_dev_add_set_doit,
632 .flags = GENL_ADMIN_PERM,
633 },
634 {
635 .cmd = VDPA_CMD_DEV_DEL,
636 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
637 .doit = vdpa_nl_cmd_dev_del_set_doit,
638 .flags = GENL_ADMIN_PERM,
639 },
640 {
641 .cmd = VDPA_CMD_DEV_GET,
642 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
643 .doit = vdpa_nl_cmd_dev_get_doit,
644 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
645 },
646 };
647
648 static struct genl_family vdpa_nl_family __ro_after_init = {
649 .name = VDPA_GENL_NAME,
650 .version = VDPA_GENL_VERSION,
651 .maxattr = VDPA_ATTR_MAX,
652 .policy = vdpa_nl_policy,
653 .netnsok = false,
654 .module = THIS_MODULE,
655 .ops = vdpa_nl_ops,
656 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
657 };
658
vdpa_init(void)659 static int vdpa_init(void)
660 {
661 int err;
662
663 err = bus_register(&vdpa_bus);
664 if (err)
665 return err;
666 err = genl_register_family(&vdpa_nl_family);
667 if (err)
668 goto err;
669 return 0;
670
671 err:
672 bus_unregister(&vdpa_bus);
673 return err;
674 }
675
vdpa_exit(void)676 static void __exit vdpa_exit(void)
677 {
678 genl_unregister_family(&vdpa_nl_family);
679 bus_unregister(&vdpa_bus);
680 ida_destroy(&vdpa_index_ida);
681 }
682 core_initcall(vdpa_init);
683 module_exit(vdpa_exit);
684
685 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
686 MODULE_LICENSE("GPL v2");
687