1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * VMEbus User access driver
4 *
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * Based on work by:
9 * Tom Armistead and Ajit Prem
10 * Copyright 2004 Motorola Inc.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/refcount.h>
16 #include <linux/cdev.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/ioctl.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/pagemap.h>
27 #include <linux/pci.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/syscalls.h>
32 #include <linux/types.h>
33
34 #include <linux/io.h>
35 #include <linux/uaccess.h>
36 #include <linux/vme.h>
37
38 #include "vme_user.h"
39
40 static const char driver_name[] = "vme_user";
41
42 static int bus[VME_USER_BUS_MAX];
43 static unsigned int bus_num;
44
45 /* Currently Documentation/admin-guide/devices.rst defines the
46 * following for VME:
47 *
48 * 221 char VME bus
49 * 0 = /dev/bus/vme/m0 First master image
50 * 1 = /dev/bus/vme/m1 Second master image
51 * 2 = /dev/bus/vme/m2 Third master image
52 * 3 = /dev/bus/vme/m3 Fourth master image
53 * 4 = /dev/bus/vme/s0 First slave image
54 * 5 = /dev/bus/vme/s1 Second slave image
55 * 6 = /dev/bus/vme/s2 Third slave image
56 * 7 = /dev/bus/vme/s3 Fourth slave image
57 * 8 = /dev/bus/vme/ctl Control
58 *
59 * It is expected that all VME bus drivers will use the
60 * same interface. For interface documentation see
61 * http://www.vmelinux.org/.
62 *
63 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
64 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
65 * We'll run with this for now as far as possible, however it probably makes
66 * sense to get rid of the old mappings and just do everything dynamically.
67 *
68 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
69 * defined above and try to support at least some of the interface from
70 * http://www.vmelinux.org/ as an alternative the driver can be written
71 * providing a saner interface later.
72 *
73 * The vmelinux.org driver never supported slave images, the devices reserved
74 * for slaves were repurposed to support all 8 master images on the UniverseII!
75 * We shall support 4 masters and 4 slaves with this driver.
76 */
77 #define VME_MAJOR 221 /* VME Major Device Number */
78 #define VME_DEVS 9 /* Number of dev entries */
79
80 #define MASTER_MINOR 0
81 #define MASTER_MAX 3
82 #define SLAVE_MINOR 4
83 #define SLAVE_MAX 7
84 #define CONTROL_MINOR 8
85
86 #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
87
88 /*
89 * Structure to handle image related parameters.
90 */
91 struct image_desc {
92 void *kern_buf; /* Buffer address in kernel space */
93 dma_addr_t pci_buf; /* Buffer address in PCI address space */
94 unsigned long long size_buf; /* Buffer size */
95 struct mutex mutex; /* Mutex for locking image */
96 struct device *device; /* Sysfs device */
97 struct vme_resource *resource; /* VME resource */
98 int mmap_count; /* Number of current mmap's */
99 };
100
101 static struct image_desc image[VME_DEVS];
102
103 static struct cdev *vme_user_cdev; /* Character device */
104 static struct class *vme_user_sysfs_class; /* Sysfs class */
105 static struct vme_dev *vme_user_bridge; /* Pointer to user device */
106
107 static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
108 MASTER_MINOR, MASTER_MINOR,
109 SLAVE_MINOR, SLAVE_MINOR,
110 SLAVE_MINOR, SLAVE_MINOR,
111 CONTROL_MINOR
112 };
113
114 struct vme_user_vma_priv {
115 unsigned int minor;
116 refcount_t refcnt;
117 };
118
resource_to_user(int minor,char __user * buf,size_t count,loff_t * ppos)119 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
120 loff_t *ppos)
121 {
122 ssize_t copied = 0;
123
124 if (count > image[minor].size_buf)
125 count = image[minor].size_buf;
126
127 copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
128 count, *ppos);
129 if (copied < 0)
130 return (int)copied;
131
132 if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
133 return -EFAULT;
134
135 return copied;
136 }
137
resource_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)138 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
139 size_t count, loff_t *ppos)
140 {
141 if (count > image[minor].size_buf)
142 count = image[minor].size_buf;
143
144 if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
145 return -EFAULT;
146
147 return vme_master_write(image[minor].resource, image[minor].kern_buf,
148 count, *ppos);
149 }
150
buffer_to_user(unsigned int minor,char __user * buf,size_t count,loff_t * ppos)151 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
152 size_t count, loff_t *ppos)
153 {
154 void *image_ptr;
155
156 image_ptr = image[minor].kern_buf + *ppos;
157 if (copy_to_user(buf, image_ptr, (unsigned long)count))
158 return -EFAULT;
159
160 return count;
161 }
162
buffer_from_user(unsigned int minor,const char __user * buf,size_t count,loff_t * ppos)163 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
164 size_t count, loff_t *ppos)
165 {
166 void *image_ptr;
167
168 image_ptr = image[minor].kern_buf + *ppos;
169 if (copy_from_user(image_ptr, buf, (unsigned long)count))
170 return -EFAULT;
171
172 return count;
173 }
174
vme_user_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)175 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
176 loff_t *ppos)
177 {
178 unsigned int minor = MINOR(file_inode(file)->i_rdev);
179 ssize_t retval;
180 size_t image_size;
181
182 if (minor == CONTROL_MINOR)
183 return 0;
184
185 mutex_lock(&image[minor].mutex);
186
187 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
188 image_size = vme_get_size(image[minor].resource);
189
190 /* Ensure we are starting at a valid location */
191 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
192 mutex_unlock(&image[minor].mutex);
193 return 0;
194 }
195
196 /* Ensure not reading past end of the image */
197 if (*ppos + count > image_size)
198 count = image_size - *ppos;
199
200 switch (type[minor]) {
201 case MASTER_MINOR:
202 retval = resource_to_user(minor, buf, count, ppos);
203 break;
204 case SLAVE_MINOR:
205 retval = buffer_to_user(minor, buf, count, ppos);
206 break;
207 default:
208 retval = -EINVAL;
209 }
210
211 mutex_unlock(&image[minor].mutex);
212 if (retval > 0)
213 *ppos += retval;
214
215 return retval;
216 }
217
vme_user_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)218 static ssize_t vme_user_write(struct file *file, const char __user *buf,
219 size_t count, loff_t *ppos)
220 {
221 unsigned int minor = MINOR(file_inode(file)->i_rdev);
222 ssize_t retval;
223 size_t image_size;
224
225 if (minor == CONTROL_MINOR)
226 return 0;
227
228 mutex_lock(&image[minor].mutex);
229
230 image_size = vme_get_size(image[minor].resource);
231
232 /* Ensure we are starting at a valid location */
233 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
234 mutex_unlock(&image[minor].mutex);
235 return 0;
236 }
237
238 /* Ensure not reading past end of the image */
239 if (*ppos + count > image_size)
240 count = image_size - *ppos;
241
242 switch (type[minor]) {
243 case MASTER_MINOR:
244 retval = resource_from_user(minor, buf, count, ppos);
245 break;
246 case SLAVE_MINOR:
247 retval = buffer_from_user(minor, buf, count, ppos);
248 break;
249 default:
250 retval = -EINVAL;
251 }
252
253 mutex_unlock(&image[minor].mutex);
254
255 if (retval > 0)
256 *ppos += retval;
257
258 return retval;
259 }
260
vme_user_llseek(struct file * file,loff_t off,int whence)261 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
262 {
263 unsigned int minor = MINOR(file_inode(file)->i_rdev);
264 size_t image_size;
265 loff_t res;
266
267 switch (type[minor]) {
268 case MASTER_MINOR:
269 case SLAVE_MINOR:
270 mutex_lock(&image[minor].mutex);
271 image_size = vme_get_size(image[minor].resource);
272 res = fixed_size_llseek(file, off, whence, image_size);
273 mutex_unlock(&image[minor].mutex);
274 return res;
275 }
276
277 return -EINVAL;
278 }
279
280 /*
281 * The ioctls provided by the old VME access method (the one at vmelinux.org)
282 * are most certainly wrong as the effectively push the registers layout
283 * through to user space. Given that the VME core can handle multiple bridges,
284 * with different register layouts this is most certainly not the way to go.
285 *
286 * We aren't using the structures defined in the Motorola driver either - these
287 * are also quite low level, however we should use the definitions that have
288 * already been defined.
289 */
vme_user_ioctl(struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)290 static int vme_user_ioctl(struct inode *inode, struct file *file,
291 unsigned int cmd, unsigned long arg)
292 {
293 struct vme_master master;
294 struct vme_slave slave;
295 struct vme_irq_id irq_req;
296 unsigned long copied;
297 unsigned int minor = MINOR(inode->i_rdev);
298 int retval;
299 dma_addr_t pci_addr;
300 void __user *argp = (void __user *)arg;
301
302 switch (type[minor]) {
303 case CONTROL_MINOR:
304 switch (cmd) {
305 case VME_IRQ_GEN:
306 copied = copy_from_user(&irq_req, argp,
307 sizeof(irq_req));
308 if (copied) {
309 pr_warn("Partial copy from userspace\n");
310 return -EFAULT;
311 }
312
313 return vme_irq_generate(vme_user_bridge,
314 irq_req.level,
315 irq_req.statid);
316 }
317 break;
318 case MASTER_MINOR:
319 switch (cmd) {
320 case VME_GET_MASTER:
321 memset(&master, 0, sizeof(master));
322
323 /* XXX We do not want to push aspace, cycle and width
324 * to userspace as they are
325 */
326 retval = vme_master_get(image[minor].resource,
327 &master.enable,
328 &master.vme_addr,
329 &master.size, &master.aspace,
330 &master.cycle, &master.dwidth);
331
332 copied = copy_to_user(argp, &master,
333 sizeof(master));
334 if (copied) {
335 pr_warn("Partial copy to userspace\n");
336 return -EFAULT;
337 }
338
339 return retval;
340
341 case VME_SET_MASTER:
342
343 if (image[minor].mmap_count != 0) {
344 pr_warn("Can't adjust mapped window\n");
345 return -EPERM;
346 }
347
348 copied = copy_from_user(&master, argp, sizeof(master));
349 if (copied) {
350 pr_warn("Partial copy from userspace\n");
351 return -EFAULT;
352 }
353
354 /* XXX We do not want to push aspace, cycle and width
355 * to userspace as they are
356 */
357 return vme_master_set(image[minor].resource,
358 master.enable, master.vme_addr, master.size,
359 master.aspace, master.cycle, master.dwidth);
360
361 break;
362 }
363 break;
364 case SLAVE_MINOR:
365 switch (cmd) {
366 case VME_GET_SLAVE:
367 memset(&slave, 0, sizeof(slave));
368
369 /* XXX We do not want to push aspace, cycle and width
370 * to userspace as they are
371 */
372 retval = vme_slave_get(image[minor].resource,
373 &slave.enable, &slave.vme_addr,
374 &slave.size, &pci_addr,
375 &slave.aspace, &slave.cycle);
376
377 copied = copy_to_user(argp, &slave,
378 sizeof(slave));
379 if (copied) {
380 pr_warn("Partial copy to userspace\n");
381 return -EFAULT;
382 }
383
384 return retval;
385
386 case VME_SET_SLAVE:
387
388 copied = copy_from_user(&slave, argp, sizeof(slave));
389 if (copied) {
390 pr_warn("Partial copy from userspace\n");
391 return -EFAULT;
392 }
393
394 /* XXX We do not want to push aspace, cycle and width
395 * to userspace as they are
396 */
397 return vme_slave_set(image[minor].resource,
398 slave.enable, slave.vme_addr, slave.size,
399 image[minor].pci_buf, slave.aspace,
400 slave.cycle);
401
402 break;
403 }
404 break;
405 }
406
407 return -EINVAL;
408 }
409
410 static long
vme_user_unlocked_ioctl(struct file * file,unsigned int cmd,unsigned long arg)411 vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
412 {
413 int ret;
414 struct inode *inode = file_inode(file);
415 unsigned int minor = MINOR(inode->i_rdev);
416
417 mutex_lock(&image[minor].mutex);
418 ret = vme_user_ioctl(inode, file, cmd, arg);
419 mutex_unlock(&image[minor].mutex);
420
421 return ret;
422 }
423
vme_user_vm_open(struct vm_area_struct * vma)424 static void vme_user_vm_open(struct vm_area_struct *vma)
425 {
426 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
427
428 refcount_inc(&vma_priv->refcnt);
429 }
430
vme_user_vm_close(struct vm_area_struct * vma)431 static void vme_user_vm_close(struct vm_area_struct *vma)
432 {
433 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
434 unsigned int minor = vma_priv->minor;
435
436 if (!refcount_dec_and_test(&vma_priv->refcnt))
437 return;
438
439 mutex_lock(&image[minor].mutex);
440 image[minor].mmap_count--;
441 mutex_unlock(&image[minor].mutex);
442
443 kfree(vma_priv);
444 }
445
446 static const struct vm_operations_struct vme_user_vm_ops = {
447 .open = vme_user_vm_open,
448 .close = vme_user_vm_close,
449 };
450
vme_user_master_mmap(unsigned int minor,struct vm_area_struct * vma)451 static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
452 {
453 int err;
454 struct vme_user_vma_priv *vma_priv;
455
456 mutex_lock(&image[minor].mutex);
457
458 err = vme_master_mmap(image[minor].resource, vma);
459 if (err) {
460 mutex_unlock(&image[minor].mutex);
461 return err;
462 }
463
464 vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
465 if (!vma_priv) {
466 mutex_unlock(&image[minor].mutex);
467 return -ENOMEM;
468 }
469
470 vma_priv->minor = minor;
471 refcount_set(&vma_priv->refcnt, 1);
472 vma->vm_ops = &vme_user_vm_ops;
473 vma->vm_private_data = vma_priv;
474
475 image[minor].mmap_count++;
476
477 mutex_unlock(&image[minor].mutex);
478
479 return 0;
480 }
481
vme_user_mmap(struct file * file,struct vm_area_struct * vma)482 static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
483 {
484 unsigned int minor = MINOR(file_inode(file)->i_rdev);
485
486 if (type[minor] == MASTER_MINOR)
487 return vme_user_master_mmap(minor, vma);
488
489 return -ENODEV;
490 }
491
492 static const struct file_operations vme_user_fops = {
493 .read = vme_user_read,
494 .write = vme_user_write,
495 .llseek = vme_user_llseek,
496 .unlocked_ioctl = vme_user_unlocked_ioctl,
497 .compat_ioctl = vme_user_unlocked_ioctl,
498 .mmap = vme_user_mmap,
499 };
500
vme_user_match(struct vme_dev * vdev)501 static int vme_user_match(struct vme_dev *vdev)
502 {
503 int i;
504
505 int cur_bus = vme_bus_num(vdev);
506 int cur_slot = vme_slot_num(vdev);
507
508 for (i = 0; i < bus_num; i++)
509 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
510 return 1;
511
512 return 0;
513 }
514
515 /*
516 * In this simple access driver, the old behaviour is being preserved as much
517 * as practical. We will therefore reserve the buffers and request the images
518 * here so that we don't have to do it later.
519 */
vme_user_probe(struct vme_dev * vdev)520 static int vme_user_probe(struct vme_dev *vdev)
521 {
522 int i, err;
523 char *name;
524
525 /* Save pointer to the bridge device */
526 if (vme_user_bridge) {
527 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
528 err = -EINVAL;
529 goto err_dev;
530 }
531 vme_user_bridge = vdev;
532
533 /* Initialise descriptors */
534 for (i = 0; i < VME_DEVS; i++) {
535 image[i].kern_buf = NULL;
536 image[i].pci_buf = 0;
537 mutex_init(&image[i].mutex);
538 image[i].device = NULL;
539 image[i].resource = NULL;
540 }
541
542 /* Assign major and minor numbers for the driver */
543 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
544 driver_name);
545 if (err) {
546 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
547 VME_MAJOR);
548 goto err_region;
549 }
550
551 /* Register the driver as a char device */
552 vme_user_cdev = cdev_alloc();
553 if (!vme_user_cdev) {
554 err = -ENOMEM;
555 goto err_char;
556 }
557 vme_user_cdev->ops = &vme_user_fops;
558 vme_user_cdev->owner = THIS_MODULE;
559 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
560 if (err)
561 goto err_class;
562
563 /* Request slave resources and allocate buffers (128kB wide) */
564 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
565 /* XXX Need to properly request attributes */
566 /* For ca91cx42 bridge there are only two slave windows
567 * supporting A16 addressing, so we request A24 supported
568 * by all windows.
569 */
570 image[i].resource = vme_slave_request(vme_user_bridge,
571 VME_A24, VME_SCT);
572 if (!image[i].resource) {
573 dev_warn(&vdev->dev,
574 "Unable to allocate slave resource\n");
575 err = -ENOMEM;
576 goto err_slave;
577 }
578 image[i].size_buf = PCI_BUF_SIZE;
579 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
580 image[i].size_buf,
581 &image[i].pci_buf);
582 if (!image[i].kern_buf) {
583 dev_warn(&vdev->dev,
584 "Unable to allocate memory for buffer\n");
585 image[i].pci_buf = 0;
586 vme_slave_free(image[i].resource);
587 err = -ENOMEM;
588 goto err_slave;
589 }
590 }
591
592 /*
593 * Request master resources allocate page sized buffers for small
594 * reads and writes
595 */
596 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
597 /* XXX Need to properly request attributes */
598 image[i].resource = vme_master_request(vme_user_bridge,
599 VME_A32, VME_SCT,
600 VME_D32);
601 if (!image[i].resource) {
602 dev_warn(&vdev->dev,
603 "Unable to allocate master resource\n");
604 err = -ENOMEM;
605 goto err_master;
606 }
607 image[i].size_buf = PCI_BUF_SIZE;
608 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
609 if (!image[i].kern_buf) {
610 err = -ENOMEM;
611 vme_master_free(image[i].resource);
612 goto err_master;
613 }
614 }
615
616 /* Create sysfs entries - on udev systems this creates the dev files */
617 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
618 if (IS_ERR(vme_user_sysfs_class)) {
619 dev_err(&vdev->dev, "Error creating vme_user class.\n");
620 err = PTR_ERR(vme_user_sysfs_class);
621 goto err_master;
622 }
623
624 /* Add sysfs Entries */
625 for (i = 0; i < VME_DEVS; i++) {
626 int num;
627
628 switch (type[i]) {
629 case MASTER_MINOR:
630 name = "bus/vme/m%d";
631 break;
632 case CONTROL_MINOR:
633 name = "bus/vme/ctl";
634 break;
635 case SLAVE_MINOR:
636 name = "bus/vme/s%d";
637 break;
638 default:
639 err = -EINVAL;
640 goto err_sysfs;
641 }
642
643 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
644 image[i].device = device_create(vme_user_sysfs_class, NULL,
645 MKDEV(VME_MAJOR, i), NULL,
646 name, num);
647 if (IS_ERR(image[i].device)) {
648 dev_info(&vdev->dev, "Error creating sysfs device\n");
649 err = PTR_ERR(image[i].device);
650 goto err_sysfs;
651 }
652 }
653
654 return 0;
655
656 err_sysfs:
657 while (i > 0) {
658 i--;
659 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
660 }
661 class_destroy(vme_user_sysfs_class);
662
663 /* Ensure counter set correctly to unalloc all master windows */
664 i = MASTER_MAX + 1;
665 err_master:
666 while (i > MASTER_MINOR) {
667 i--;
668 kfree(image[i].kern_buf);
669 vme_master_free(image[i].resource);
670 }
671
672 /*
673 * Ensure counter set correctly to unalloc all slave windows and buffers
674 */
675 i = SLAVE_MAX + 1;
676 err_slave:
677 while (i > SLAVE_MINOR) {
678 i--;
679 vme_free_consistent(image[i].resource, image[i].size_buf,
680 image[i].kern_buf, image[i].pci_buf);
681 vme_slave_free(image[i].resource);
682 }
683 err_class:
684 cdev_del(vme_user_cdev);
685 err_char:
686 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
687 err_region:
688 err_dev:
689 return err;
690 }
691
vme_user_remove(struct vme_dev * dev)692 static int vme_user_remove(struct vme_dev *dev)
693 {
694 int i;
695
696 /* Remove sysfs Entries */
697 for (i = 0; i < VME_DEVS; i++) {
698 mutex_destroy(&image[i].mutex);
699 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
700 }
701 class_destroy(vme_user_sysfs_class);
702
703 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
704 kfree(image[i].kern_buf);
705 vme_master_free(image[i].resource);
706 }
707
708 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
709 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
710 vme_free_consistent(image[i].resource, image[i].size_buf,
711 image[i].kern_buf, image[i].pci_buf);
712 vme_slave_free(image[i].resource);
713 }
714
715 /* Unregister device driver */
716 cdev_del(vme_user_cdev);
717
718 /* Unregister the major and minor device numbers */
719 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
720
721 return 0;
722 }
723
724 static struct vme_driver vme_user_driver = {
725 .name = driver_name,
726 .match = vme_user_match,
727 .probe = vme_user_probe,
728 .remove = vme_user_remove,
729 };
730
vme_user_init(void)731 static int __init vme_user_init(void)
732 {
733 int retval = 0;
734
735 pr_info("VME User Space Access Driver\n");
736
737 if (bus_num == 0) {
738 pr_err("No cards, skipping registration\n");
739 retval = -ENODEV;
740 goto err_nocard;
741 }
742
743 /* Let's start by supporting one bus, we can support more than one
744 * in future revisions if that ever becomes necessary.
745 */
746 if (bus_num > VME_USER_BUS_MAX) {
747 pr_err("Driver only able to handle %d buses\n",
748 VME_USER_BUS_MAX);
749 bus_num = VME_USER_BUS_MAX;
750 }
751
752 /*
753 * Here we just register the maximum number of devices we can and
754 * leave vme_user_match() to allow only 1 to go through to probe().
755 * This way, if we later want to allow multiple user access devices,
756 * we just change the code in vme_user_match().
757 */
758 retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
759 if (retval)
760 goto err_reg;
761
762 return retval;
763
764 err_reg:
765 err_nocard:
766 return retval;
767 }
768
vme_user_exit(void)769 static void __exit vme_user_exit(void)
770 {
771 vme_unregister_driver(&vme_user_driver);
772 }
773
774 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
775 module_param_array(bus, int, &bus_num, 0000);
776
777 MODULE_DESCRIPTION("VME User Space Access Driver");
778 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
779 MODULE_LICENSE("GPL");
780
781 module_init(vme_user_init);
782 module_exit(vme_user_exit);
783