1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB Raw Gadget driver.
4 * See Documentation/usb/raw-gadget.rst for more details.
5 *
6 * Copyright (c) 2020 Google, Inc.
7 * Author: Andrey Konovalov <andreyknvl@gmail.com>
8 */
9
10 #include <linux/compiler.h>
11 #include <linux/ctype.h>
12 #include <linux/debugfs.h>
13 #include <linux/delay.h>
14 #include <linux/idr.h>
15 #include <linux/kref.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/semaphore.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23
24 #include <linux/usb.h>
25 #include <linux/usb/ch9.h>
26 #include <linux/usb/ch11.h>
27 #include <linux/usb/gadget.h>
28
29 #include <uapi/linux/usb/raw_gadget.h>
30
31 #define DRIVER_DESC "USB Raw Gadget"
32 #define DRIVER_NAME "raw-gadget"
33
34 MODULE_DESCRIPTION(DRIVER_DESC);
35 MODULE_AUTHOR("Andrey Konovalov");
36 MODULE_LICENSE("GPL");
37
38 /*----------------------------------------------------------------------*/
39
40 static DEFINE_IDA(driver_id_numbers);
41 #define DRIVER_DRIVER_NAME_LENGTH_MAX 32
42
43 #define RAW_EVENT_QUEUE_SIZE 16
44
45 struct raw_event_queue {
46 /* See the comment in raw_event_queue_fetch() for locking details. */
47 spinlock_t lock;
48 struct semaphore sema;
49 struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
50 int size;
51 };
52
raw_event_queue_init(struct raw_event_queue * queue)53 static void raw_event_queue_init(struct raw_event_queue *queue)
54 {
55 spin_lock_init(&queue->lock);
56 sema_init(&queue->sema, 0);
57 queue->size = 0;
58 }
59
raw_event_queue_add(struct raw_event_queue * queue,enum usb_raw_event_type type,size_t length,const void * data)60 static int raw_event_queue_add(struct raw_event_queue *queue,
61 enum usb_raw_event_type type, size_t length, const void *data)
62 {
63 unsigned long flags;
64 struct usb_raw_event *event;
65
66 spin_lock_irqsave(&queue->lock, flags);
67 if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) {
68 spin_unlock_irqrestore(&queue->lock, flags);
69 return -ENOMEM;
70 }
71 event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
72 if (!event) {
73 spin_unlock_irqrestore(&queue->lock, flags);
74 return -ENOMEM;
75 }
76 event->type = type;
77 event->length = length;
78 if (event->length)
79 memcpy(&event->data[0], data, length);
80 queue->events[queue->size] = event;
81 queue->size++;
82 up(&queue->sema);
83 spin_unlock_irqrestore(&queue->lock, flags);
84 return 0;
85 }
86
raw_event_queue_fetch(struct raw_event_queue * queue)87 static struct usb_raw_event *raw_event_queue_fetch(
88 struct raw_event_queue *queue)
89 {
90 int ret;
91 unsigned long flags;
92 struct usb_raw_event *event;
93
94 /*
95 * This function can be called concurrently. We first check that
96 * there's at least one event queued by decrementing the semaphore,
97 * and then take the lock to protect queue struct fields.
98 */
99 ret = down_interruptible(&queue->sema);
100 if (ret)
101 return ERR_PTR(ret);
102 spin_lock_irqsave(&queue->lock, flags);
103 /*
104 * queue->size must have the same value as queue->sema counter (before
105 * the down_interruptible() call above), so this check is a fail-safe.
106 */
107 if (WARN_ON(!queue->size)) {
108 spin_unlock_irqrestore(&queue->lock, flags);
109 return ERR_PTR(-ENODEV);
110 }
111 event = queue->events[0];
112 queue->size--;
113 memmove(&queue->events[0], &queue->events[1],
114 queue->size * sizeof(queue->events[0]));
115 spin_unlock_irqrestore(&queue->lock, flags);
116 return event;
117 }
118
raw_event_queue_destroy(struct raw_event_queue * queue)119 static void raw_event_queue_destroy(struct raw_event_queue *queue)
120 {
121 int i;
122
123 for (i = 0; i < queue->size; i++)
124 kfree(queue->events[i]);
125 queue->size = 0;
126 }
127
128 /*----------------------------------------------------------------------*/
129
130 struct raw_dev;
131
132 enum ep_state {
133 STATE_EP_DISABLED,
134 STATE_EP_ENABLED,
135 };
136
137 struct raw_ep {
138 struct raw_dev *dev;
139 enum ep_state state;
140 struct usb_ep *ep;
141 u8 addr;
142 struct usb_request *req;
143 bool urb_queued;
144 bool disabling;
145 ssize_t status;
146 };
147
148 enum dev_state {
149 STATE_DEV_INVALID = 0,
150 STATE_DEV_OPENED,
151 STATE_DEV_INITIALIZED,
152 STATE_DEV_REGISTERING,
153 STATE_DEV_RUNNING,
154 STATE_DEV_CLOSED,
155 STATE_DEV_FAILED
156 };
157
158 struct raw_dev {
159 struct kref count;
160 spinlock_t lock;
161
162 const char *udc_name;
163 struct usb_gadget_driver driver;
164
165 /* Reference to misc device: */
166 struct device *dev;
167
168 /* Make driver names unique */
169 int driver_id_number;
170
171 /* Protected by lock: */
172 enum dev_state state;
173 bool gadget_registered;
174 struct usb_gadget *gadget;
175 struct usb_request *req;
176 bool ep0_in_pending;
177 bool ep0_out_pending;
178 bool ep0_urb_queued;
179 ssize_t ep0_status;
180 struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
181 int eps_num;
182
183 struct completion ep0_done;
184 struct raw_event_queue queue;
185 };
186
dev_new(void)187 static struct raw_dev *dev_new(void)
188 {
189 struct raw_dev *dev;
190
191 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
192 if (!dev)
193 return NULL;
194 /* Matches kref_put() in raw_release(). */
195 kref_init(&dev->count);
196 spin_lock_init(&dev->lock);
197 init_completion(&dev->ep0_done);
198 raw_event_queue_init(&dev->queue);
199 dev->driver_id_number = -1;
200 return dev;
201 }
202
dev_free(struct kref * kref)203 static void dev_free(struct kref *kref)
204 {
205 struct raw_dev *dev = container_of(kref, struct raw_dev, count);
206 int i;
207
208 kfree(dev->udc_name);
209 kfree(dev->driver.udc_name);
210 kfree(dev->driver.driver.name);
211 if (dev->driver_id_number >= 0)
212 ida_free(&driver_id_numbers, dev->driver_id_number);
213 if (dev->req) {
214 if (dev->ep0_urb_queued)
215 usb_ep_dequeue(dev->gadget->ep0, dev->req);
216 usb_ep_free_request(dev->gadget->ep0, dev->req);
217 }
218 raw_event_queue_destroy(&dev->queue);
219 for (i = 0; i < dev->eps_num; i++) {
220 if (dev->eps[i].state == STATE_EP_DISABLED)
221 continue;
222 usb_ep_disable(dev->eps[i].ep);
223 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
224 kfree(dev->eps[i].ep->desc);
225 dev->eps[i].state = STATE_EP_DISABLED;
226 }
227 kfree(dev);
228 }
229
230 /*----------------------------------------------------------------------*/
231
raw_queue_event(struct raw_dev * dev,enum usb_raw_event_type type,size_t length,const void * data)232 static int raw_queue_event(struct raw_dev *dev,
233 enum usb_raw_event_type type, size_t length, const void *data)
234 {
235 int ret = 0;
236 unsigned long flags;
237
238 ret = raw_event_queue_add(&dev->queue, type, length, data);
239 if (ret < 0) {
240 spin_lock_irqsave(&dev->lock, flags);
241 dev->state = STATE_DEV_FAILED;
242 spin_unlock_irqrestore(&dev->lock, flags);
243 }
244 return ret;
245 }
246
gadget_ep0_complete(struct usb_ep * ep,struct usb_request * req)247 static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
248 {
249 struct raw_dev *dev = req->context;
250 unsigned long flags;
251
252 spin_lock_irqsave(&dev->lock, flags);
253 if (req->status)
254 dev->ep0_status = req->status;
255 else
256 dev->ep0_status = req->actual;
257 if (dev->ep0_in_pending)
258 dev->ep0_in_pending = false;
259 else
260 dev->ep0_out_pending = false;
261 spin_unlock_irqrestore(&dev->lock, flags);
262
263 complete(&dev->ep0_done);
264 }
265
get_ep_addr(const char * name)266 static u8 get_ep_addr(const char *name)
267 {
268 /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
269 * parse the endpoint address from its name. We deliberately use
270 * deprecated simple_strtoul() function here, as the number isn't
271 * followed by '\0' nor '\n'.
272 */
273 if (isdigit(name[2]))
274 return simple_strtoul(&name[2], NULL, 10);
275 /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
276 return USB_RAW_EP_ADDR_ANY;
277 }
278
gadget_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)279 static int gadget_bind(struct usb_gadget *gadget,
280 struct usb_gadget_driver *driver)
281 {
282 int ret = 0, i = 0;
283 struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
284 struct usb_request *req;
285 struct usb_ep *ep;
286 unsigned long flags;
287
288 if (strcmp(gadget->name, dev->udc_name) != 0)
289 return -ENODEV;
290
291 set_gadget_data(gadget, dev);
292 req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
293 if (!req) {
294 dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
295 set_gadget_data(gadget, NULL);
296 return -ENOMEM;
297 }
298
299 spin_lock_irqsave(&dev->lock, flags);
300 dev->req = req;
301 dev->req->context = dev;
302 dev->req->complete = gadget_ep0_complete;
303 dev->gadget = gadget;
304 gadget_for_each_ep(ep, dev->gadget) {
305 dev->eps[i].ep = ep;
306 dev->eps[i].addr = get_ep_addr(ep->name);
307 dev->eps[i].state = STATE_EP_DISABLED;
308 i++;
309 }
310 dev->eps_num = i;
311 spin_unlock_irqrestore(&dev->lock, flags);
312
313 /* Matches kref_put() in gadget_unbind(). */
314 kref_get(&dev->count);
315
316 ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
317 if (ret < 0)
318 dev_err(&gadget->dev, "failed to queue event\n");
319
320 return ret;
321 }
322
gadget_unbind(struct usb_gadget * gadget)323 static void gadget_unbind(struct usb_gadget *gadget)
324 {
325 struct raw_dev *dev = get_gadget_data(gadget);
326
327 set_gadget_data(gadget, NULL);
328 /* Matches kref_get() in gadget_bind(). */
329 kref_put(&dev->count, dev_free);
330 }
331
gadget_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)332 static int gadget_setup(struct usb_gadget *gadget,
333 const struct usb_ctrlrequest *ctrl)
334 {
335 int ret = 0;
336 struct raw_dev *dev = get_gadget_data(gadget);
337 unsigned long flags;
338
339 spin_lock_irqsave(&dev->lock, flags);
340 if (dev->state != STATE_DEV_RUNNING) {
341 dev_err(&gadget->dev, "ignoring, device is not running\n");
342 ret = -ENODEV;
343 goto out_unlock;
344 }
345 if (dev->ep0_in_pending || dev->ep0_out_pending) {
346 dev_dbg(&gadget->dev, "stalling, request already pending\n");
347 ret = -EBUSY;
348 goto out_unlock;
349 }
350 if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
351 dev->ep0_in_pending = true;
352 else
353 dev->ep0_out_pending = true;
354 spin_unlock_irqrestore(&dev->lock, flags);
355
356 ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
357 if (ret < 0)
358 dev_err(&gadget->dev, "failed to queue event\n");
359 goto out;
360
361 out_unlock:
362 spin_unlock_irqrestore(&dev->lock, flags);
363 out:
364 return ret;
365 }
366
367 /* These are currently unused but present in case UDC driver requires them. */
gadget_disconnect(struct usb_gadget * gadget)368 static void gadget_disconnect(struct usb_gadget *gadget) { }
gadget_suspend(struct usb_gadget * gadget)369 static void gadget_suspend(struct usb_gadget *gadget) { }
gadget_resume(struct usb_gadget * gadget)370 static void gadget_resume(struct usb_gadget *gadget) { }
gadget_reset(struct usb_gadget * gadget)371 static void gadget_reset(struct usb_gadget *gadget) { }
372
373 /*----------------------------------------------------------------------*/
374
375 static struct miscdevice raw_misc_device;
376
raw_open(struct inode * inode,struct file * fd)377 static int raw_open(struct inode *inode, struct file *fd)
378 {
379 struct raw_dev *dev;
380
381 /* Nonblocking I/O is not supported yet. */
382 if (fd->f_flags & O_NONBLOCK)
383 return -EINVAL;
384
385 dev = dev_new();
386 if (!dev)
387 return -ENOMEM;
388 fd->private_data = dev;
389 dev->state = STATE_DEV_OPENED;
390 dev->dev = raw_misc_device.this_device;
391 return 0;
392 }
393
raw_release(struct inode * inode,struct file * fd)394 static int raw_release(struct inode *inode, struct file *fd)
395 {
396 int ret = 0;
397 struct raw_dev *dev = fd->private_data;
398 unsigned long flags;
399 bool unregister = false;
400
401 spin_lock_irqsave(&dev->lock, flags);
402 dev->state = STATE_DEV_CLOSED;
403 if (!dev->gadget) {
404 spin_unlock_irqrestore(&dev->lock, flags);
405 goto out_put;
406 }
407 if (dev->gadget_registered)
408 unregister = true;
409 dev->gadget_registered = false;
410 spin_unlock_irqrestore(&dev->lock, flags);
411
412 if (unregister) {
413 ret = usb_gadget_unregister_driver(&dev->driver);
414 if (ret != 0)
415 dev_err(dev->dev,
416 "usb_gadget_unregister_driver() failed with %d\n",
417 ret);
418 /* Matches kref_get() in raw_ioctl_run(). */
419 kref_put(&dev->count, dev_free);
420 }
421
422 out_put:
423 /* Matches dev_new() in raw_open(). */
424 kref_put(&dev->count, dev_free);
425 return ret;
426 }
427
428 /*----------------------------------------------------------------------*/
429
raw_ioctl_init(struct raw_dev * dev,unsigned long value)430 static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
431 {
432 int ret = 0;
433 int driver_id_number;
434 struct usb_raw_init arg;
435 char *udc_driver_name;
436 char *udc_device_name;
437 char *driver_driver_name;
438 unsigned long flags;
439
440 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
441 return -EFAULT;
442
443 switch (arg.speed) {
444 case USB_SPEED_UNKNOWN:
445 arg.speed = USB_SPEED_HIGH;
446 break;
447 case USB_SPEED_LOW:
448 case USB_SPEED_FULL:
449 case USB_SPEED_HIGH:
450 case USB_SPEED_SUPER:
451 break;
452 default:
453 return -EINVAL;
454 }
455
456 driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
457 if (driver_id_number < 0)
458 return driver_id_number;
459
460 driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
461 if (!driver_driver_name) {
462 ret = -ENOMEM;
463 goto out_free_driver_id_number;
464 }
465 snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
466 DRIVER_NAME ".%d", driver_id_number);
467
468 udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
469 if (!udc_driver_name) {
470 ret = -ENOMEM;
471 goto out_free_driver_driver_name;
472 }
473 ret = strscpy(udc_driver_name, &arg.driver_name[0],
474 UDC_NAME_LENGTH_MAX);
475 if (ret < 0)
476 goto out_free_udc_driver_name;
477 ret = 0;
478
479 udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
480 if (!udc_device_name) {
481 ret = -ENOMEM;
482 goto out_free_udc_driver_name;
483 }
484 ret = strscpy(udc_device_name, &arg.device_name[0],
485 UDC_NAME_LENGTH_MAX);
486 if (ret < 0)
487 goto out_free_udc_device_name;
488 ret = 0;
489
490 spin_lock_irqsave(&dev->lock, flags);
491 if (dev->state != STATE_DEV_OPENED) {
492 dev_dbg(dev->dev, "fail, device is not opened\n");
493 ret = -EINVAL;
494 goto out_unlock;
495 }
496 dev->udc_name = udc_driver_name;
497
498 dev->driver.function = DRIVER_DESC;
499 dev->driver.max_speed = arg.speed;
500 dev->driver.setup = gadget_setup;
501 dev->driver.disconnect = gadget_disconnect;
502 dev->driver.bind = gadget_bind;
503 dev->driver.unbind = gadget_unbind;
504 dev->driver.suspend = gadget_suspend;
505 dev->driver.resume = gadget_resume;
506 dev->driver.reset = gadget_reset;
507 dev->driver.driver.name = driver_driver_name;
508 dev->driver.udc_name = udc_device_name;
509 dev->driver.match_existing_only = 1;
510 dev->driver_id_number = driver_id_number;
511
512 dev->state = STATE_DEV_INITIALIZED;
513 spin_unlock_irqrestore(&dev->lock, flags);
514 return ret;
515
516 out_unlock:
517 spin_unlock_irqrestore(&dev->lock, flags);
518 out_free_udc_device_name:
519 kfree(udc_device_name);
520 out_free_udc_driver_name:
521 kfree(udc_driver_name);
522 out_free_driver_driver_name:
523 kfree(driver_driver_name);
524 out_free_driver_id_number:
525 ida_free(&driver_id_numbers, driver_id_number);
526 return ret;
527 }
528
raw_ioctl_run(struct raw_dev * dev,unsigned long value)529 static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
530 {
531 int ret = 0;
532 unsigned long flags;
533
534 if (value)
535 return -EINVAL;
536
537 spin_lock_irqsave(&dev->lock, flags);
538 if (dev->state != STATE_DEV_INITIALIZED) {
539 dev_dbg(dev->dev, "fail, device is not initialized\n");
540 ret = -EINVAL;
541 goto out_unlock;
542 }
543 dev->state = STATE_DEV_REGISTERING;
544 spin_unlock_irqrestore(&dev->lock, flags);
545
546 ret = usb_gadget_register_driver(&dev->driver);
547
548 spin_lock_irqsave(&dev->lock, flags);
549 if (ret) {
550 dev_err(dev->dev,
551 "fail, usb_gadget_register_driver returned %d\n", ret);
552 dev->state = STATE_DEV_FAILED;
553 goto out_unlock;
554 }
555 dev->gadget_registered = true;
556 dev->state = STATE_DEV_RUNNING;
557 /* Matches kref_put() in raw_release(). */
558 kref_get(&dev->count);
559
560 out_unlock:
561 spin_unlock_irqrestore(&dev->lock, flags);
562 return ret;
563 }
564
raw_ioctl_event_fetch(struct raw_dev * dev,unsigned long value)565 static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
566 {
567 struct usb_raw_event arg;
568 unsigned long flags;
569 struct usb_raw_event *event;
570 uint32_t length;
571
572 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
573 return -EFAULT;
574
575 spin_lock_irqsave(&dev->lock, flags);
576 if (dev->state != STATE_DEV_RUNNING) {
577 dev_dbg(dev->dev, "fail, device is not running\n");
578 spin_unlock_irqrestore(&dev->lock, flags);
579 return -EINVAL;
580 }
581 if (!dev->gadget) {
582 dev_dbg(dev->dev, "fail, gadget is not bound\n");
583 spin_unlock_irqrestore(&dev->lock, flags);
584 return -EBUSY;
585 }
586 spin_unlock_irqrestore(&dev->lock, flags);
587
588 event = raw_event_queue_fetch(&dev->queue);
589 if (PTR_ERR(event) == -EINTR) {
590 dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
591 return -EINTR;
592 }
593 if (IS_ERR(event)) {
594 dev_err(&dev->gadget->dev, "failed to fetch event\n");
595 spin_lock_irqsave(&dev->lock, flags);
596 dev->state = STATE_DEV_FAILED;
597 spin_unlock_irqrestore(&dev->lock, flags);
598 return -ENODEV;
599 }
600 length = min(arg.length, event->length);
601 if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
602 kfree(event);
603 return -EFAULT;
604 }
605
606 kfree(event);
607 return 0;
608 }
609
raw_alloc_io_data(struct usb_raw_ep_io * io,void __user * ptr,bool get_from_user)610 static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
611 bool get_from_user)
612 {
613 void *data;
614
615 if (copy_from_user(io, ptr, sizeof(*io)))
616 return ERR_PTR(-EFAULT);
617 if (io->ep >= USB_RAW_EPS_NUM_MAX)
618 return ERR_PTR(-EINVAL);
619 if (!usb_raw_io_flags_valid(io->flags))
620 return ERR_PTR(-EINVAL);
621 if (io->length > PAGE_SIZE)
622 return ERR_PTR(-EINVAL);
623 if (get_from_user)
624 data = memdup_user(ptr + sizeof(*io), io->length);
625 else {
626 data = kmalloc(io->length, GFP_KERNEL);
627 if (!data)
628 data = ERR_PTR(-ENOMEM);
629 }
630 return data;
631 }
632
raw_process_ep0_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)633 static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
634 void *data, bool in)
635 {
636 int ret = 0;
637 unsigned long flags;
638
639 spin_lock_irqsave(&dev->lock, flags);
640 if (dev->state != STATE_DEV_RUNNING) {
641 dev_dbg(dev->dev, "fail, device is not running\n");
642 ret = -EINVAL;
643 goto out_unlock;
644 }
645 if (!dev->gadget) {
646 dev_dbg(dev->dev, "fail, gadget is not bound\n");
647 ret = -EBUSY;
648 goto out_unlock;
649 }
650 if (dev->ep0_urb_queued) {
651 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
652 ret = -EBUSY;
653 goto out_unlock;
654 }
655 if ((in && !dev->ep0_in_pending) ||
656 (!in && !dev->ep0_out_pending)) {
657 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
658 ret = -EBUSY;
659 goto out_unlock;
660 }
661 if (WARN_ON(in && dev->ep0_out_pending)) {
662 ret = -ENODEV;
663 dev->state = STATE_DEV_FAILED;
664 goto out_done;
665 }
666 if (WARN_ON(!in && dev->ep0_in_pending)) {
667 ret = -ENODEV;
668 dev->state = STATE_DEV_FAILED;
669 goto out_done;
670 }
671
672 dev->req->buf = data;
673 dev->req->length = io->length;
674 dev->req->zero = usb_raw_io_flags_zero(io->flags);
675 dev->ep0_urb_queued = true;
676 spin_unlock_irqrestore(&dev->lock, flags);
677
678 ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
679 if (ret) {
680 dev_err(&dev->gadget->dev,
681 "fail, usb_ep_queue returned %d\n", ret);
682 spin_lock_irqsave(&dev->lock, flags);
683 dev->state = STATE_DEV_FAILED;
684 goto out_done;
685 }
686
687 ret = wait_for_completion_interruptible(&dev->ep0_done);
688 if (ret) {
689 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
690 usb_ep_dequeue(dev->gadget->ep0, dev->req);
691 wait_for_completion(&dev->ep0_done);
692 spin_lock_irqsave(&dev->lock, flags);
693 goto out_done;
694 }
695
696 spin_lock_irqsave(&dev->lock, flags);
697 ret = dev->ep0_status;
698
699 out_done:
700 dev->ep0_urb_queued = false;
701 out_unlock:
702 spin_unlock_irqrestore(&dev->lock, flags);
703 return ret;
704 }
705
raw_ioctl_ep0_write(struct raw_dev * dev,unsigned long value)706 static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
707 {
708 int ret = 0;
709 void *data;
710 struct usb_raw_ep_io io;
711
712 data = raw_alloc_io_data(&io, (void __user *)value, true);
713 if (IS_ERR(data))
714 return PTR_ERR(data);
715 ret = raw_process_ep0_io(dev, &io, data, true);
716 kfree(data);
717 return ret;
718 }
719
raw_ioctl_ep0_read(struct raw_dev * dev,unsigned long value)720 static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
721 {
722 int ret = 0;
723 void *data;
724 struct usb_raw_ep_io io;
725 unsigned int length;
726
727 data = raw_alloc_io_data(&io, (void __user *)value, false);
728 if (IS_ERR(data))
729 return PTR_ERR(data);
730 ret = raw_process_ep0_io(dev, &io, data, false);
731 if (ret < 0)
732 goto free;
733
734 length = min(io.length, (unsigned int)ret);
735 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
736 ret = -EFAULT;
737 else
738 ret = length;
739 free:
740 kfree(data);
741 return ret;
742 }
743
raw_ioctl_ep0_stall(struct raw_dev * dev,unsigned long value)744 static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
745 {
746 int ret = 0;
747 unsigned long flags;
748
749 if (value)
750 return -EINVAL;
751 spin_lock_irqsave(&dev->lock, flags);
752 if (dev->state != STATE_DEV_RUNNING) {
753 dev_dbg(dev->dev, "fail, device is not running\n");
754 ret = -EINVAL;
755 goto out_unlock;
756 }
757 if (!dev->gadget) {
758 dev_dbg(dev->dev, "fail, gadget is not bound\n");
759 ret = -EBUSY;
760 goto out_unlock;
761 }
762 if (dev->ep0_urb_queued) {
763 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
764 ret = -EBUSY;
765 goto out_unlock;
766 }
767 if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
768 dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
769 ret = -EBUSY;
770 goto out_unlock;
771 }
772
773 ret = usb_ep_set_halt(dev->gadget->ep0);
774 if (ret < 0)
775 dev_err(&dev->gadget->dev,
776 "fail, usb_ep_set_halt returned %d\n", ret);
777
778 if (dev->ep0_in_pending)
779 dev->ep0_in_pending = false;
780 else
781 dev->ep0_out_pending = false;
782
783 out_unlock:
784 spin_unlock_irqrestore(&dev->lock, flags);
785 return ret;
786 }
787
raw_ioctl_ep_enable(struct raw_dev * dev,unsigned long value)788 static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
789 {
790 int ret = 0, i;
791 unsigned long flags;
792 struct usb_endpoint_descriptor *desc;
793 struct raw_ep *ep;
794 bool ep_props_matched = false;
795
796 desc = memdup_user((void __user *)value, sizeof(*desc));
797 if (IS_ERR(desc))
798 return PTR_ERR(desc);
799
800 /*
801 * Endpoints with a maxpacket length of 0 can cause crashes in UDC
802 * drivers.
803 */
804 if (usb_endpoint_maxp(desc) == 0) {
805 dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
806 kfree(desc);
807 return -EINVAL;
808 }
809
810 spin_lock_irqsave(&dev->lock, flags);
811 if (dev->state != STATE_DEV_RUNNING) {
812 dev_dbg(dev->dev, "fail, device is not running\n");
813 ret = -EINVAL;
814 goto out_free;
815 }
816 if (!dev->gadget) {
817 dev_dbg(dev->dev, "fail, gadget is not bound\n");
818 ret = -EBUSY;
819 goto out_free;
820 }
821
822 for (i = 0; i < dev->eps_num; i++) {
823 ep = &dev->eps[i];
824 if (ep->addr != usb_endpoint_num(desc) &&
825 ep->addr != USB_RAW_EP_ADDR_ANY)
826 continue;
827 if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
828 continue;
829 ep_props_matched = true;
830 if (ep->state != STATE_EP_DISABLED)
831 continue;
832 ep->ep->desc = desc;
833 ret = usb_ep_enable(ep->ep);
834 if (ret < 0) {
835 dev_err(&dev->gadget->dev,
836 "fail, usb_ep_enable returned %d\n", ret);
837 goto out_free;
838 }
839 ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
840 if (!ep->req) {
841 dev_err(&dev->gadget->dev,
842 "fail, usb_ep_alloc_request failed\n");
843 usb_ep_disable(ep->ep);
844 ret = -ENOMEM;
845 goto out_free;
846 }
847 ep->state = STATE_EP_ENABLED;
848 ep->ep->driver_data = ep;
849 ret = i;
850 goto out_unlock;
851 }
852
853 if (!ep_props_matched) {
854 dev_dbg(&dev->gadget->dev, "fail, bad endpoint descriptor\n");
855 ret = -EINVAL;
856 } else {
857 dev_dbg(&dev->gadget->dev, "fail, no endpoints available\n");
858 ret = -EBUSY;
859 }
860
861 out_free:
862 kfree(desc);
863 out_unlock:
864 spin_unlock_irqrestore(&dev->lock, flags);
865 return ret;
866 }
867
raw_ioctl_ep_disable(struct raw_dev * dev,unsigned long value)868 static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
869 {
870 int ret = 0, i = value;
871 unsigned long flags;
872
873 spin_lock_irqsave(&dev->lock, flags);
874 if (dev->state != STATE_DEV_RUNNING) {
875 dev_dbg(dev->dev, "fail, device is not running\n");
876 ret = -EINVAL;
877 goto out_unlock;
878 }
879 if (!dev->gadget) {
880 dev_dbg(dev->dev, "fail, gadget is not bound\n");
881 ret = -EBUSY;
882 goto out_unlock;
883 }
884 if (i < 0 || i >= dev->eps_num) {
885 dev_dbg(dev->dev, "fail, invalid endpoint\n");
886 ret = -EBUSY;
887 goto out_unlock;
888 }
889 if (dev->eps[i].state == STATE_EP_DISABLED) {
890 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
891 ret = -EINVAL;
892 goto out_unlock;
893 }
894 if (dev->eps[i].disabling) {
895 dev_dbg(&dev->gadget->dev,
896 "fail, disable already in progress\n");
897 ret = -EINVAL;
898 goto out_unlock;
899 }
900 if (dev->eps[i].urb_queued) {
901 dev_dbg(&dev->gadget->dev,
902 "fail, waiting for urb completion\n");
903 ret = -EINVAL;
904 goto out_unlock;
905 }
906 dev->eps[i].disabling = true;
907 spin_unlock_irqrestore(&dev->lock, flags);
908
909 usb_ep_disable(dev->eps[i].ep);
910
911 spin_lock_irqsave(&dev->lock, flags);
912 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
913 kfree(dev->eps[i].ep->desc);
914 dev->eps[i].state = STATE_EP_DISABLED;
915 dev->eps[i].disabling = false;
916
917 out_unlock:
918 spin_unlock_irqrestore(&dev->lock, flags);
919 return ret;
920 }
921
raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev * dev,unsigned long value,bool set,bool halt)922 static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
923 unsigned long value, bool set, bool halt)
924 {
925 int ret = 0, i = value;
926 unsigned long flags;
927
928 spin_lock_irqsave(&dev->lock, flags);
929 if (dev->state != STATE_DEV_RUNNING) {
930 dev_dbg(dev->dev, "fail, device is not running\n");
931 ret = -EINVAL;
932 goto out_unlock;
933 }
934 if (!dev->gadget) {
935 dev_dbg(dev->dev, "fail, gadget is not bound\n");
936 ret = -EBUSY;
937 goto out_unlock;
938 }
939 if (i < 0 || i >= dev->eps_num) {
940 dev_dbg(dev->dev, "fail, invalid endpoint\n");
941 ret = -EBUSY;
942 goto out_unlock;
943 }
944 if (dev->eps[i].state == STATE_EP_DISABLED) {
945 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
946 ret = -EINVAL;
947 goto out_unlock;
948 }
949 if (dev->eps[i].disabling) {
950 dev_dbg(&dev->gadget->dev,
951 "fail, disable is in progress\n");
952 ret = -EINVAL;
953 goto out_unlock;
954 }
955 if (dev->eps[i].urb_queued) {
956 dev_dbg(&dev->gadget->dev,
957 "fail, waiting for urb completion\n");
958 ret = -EINVAL;
959 goto out_unlock;
960 }
961 if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
962 dev_dbg(&dev->gadget->dev,
963 "fail, can't halt/wedge ISO endpoint\n");
964 ret = -EINVAL;
965 goto out_unlock;
966 }
967
968 if (set && halt) {
969 ret = usb_ep_set_halt(dev->eps[i].ep);
970 if (ret < 0)
971 dev_err(&dev->gadget->dev,
972 "fail, usb_ep_set_halt returned %d\n", ret);
973 } else if (!set && halt) {
974 ret = usb_ep_clear_halt(dev->eps[i].ep);
975 if (ret < 0)
976 dev_err(&dev->gadget->dev,
977 "fail, usb_ep_clear_halt returned %d\n", ret);
978 } else if (set && !halt) {
979 ret = usb_ep_set_wedge(dev->eps[i].ep);
980 if (ret < 0)
981 dev_err(&dev->gadget->dev,
982 "fail, usb_ep_set_wedge returned %d\n", ret);
983 }
984
985 out_unlock:
986 spin_unlock_irqrestore(&dev->lock, flags);
987 return ret;
988 }
989
gadget_ep_complete(struct usb_ep * ep,struct usb_request * req)990 static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
991 {
992 struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
993 struct raw_dev *dev = r_ep->dev;
994 unsigned long flags;
995
996 spin_lock_irqsave(&dev->lock, flags);
997 if (req->status)
998 r_ep->status = req->status;
999 else
1000 r_ep->status = req->actual;
1001 spin_unlock_irqrestore(&dev->lock, flags);
1002
1003 complete((struct completion *)req->context);
1004 }
1005
raw_process_ep_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)1006 static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
1007 void *data, bool in)
1008 {
1009 int ret = 0;
1010 unsigned long flags;
1011 struct raw_ep *ep;
1012 DECLARE_COMPLETION_ONSTACK(done);
1013
1014 spin_lock_irqsave(&dev->lock, flags);
1015 if (dev->state != STATE_DEV_RUNNING) {
1016 dev_dbg(dev->dev, "fail, device is not running\n");
1017 ret = -EINVAL;
1018 goto out_unlock;
1019 }
1020 if (!dev->gadget) {
1021 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1022 ret = -EBUSY;
1023 goto out_unlock;
1024 }
1025 if (io->ep >= dev->eps_num) {
1026 dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
1027 ret = -EINVAL;
1028 goto out_unlock;
1029 }
1030 ep = &dev->eps[io->ep];
1031 if (ep->state != STATE_EP_ENABLED) {
1032 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
1033 ret = -EBUSY;
1034 goto out_unlock;
1035 }
1036 if (ep->disabling) {
1037 dev_dbg(&dev->gadget->dev,
1038 "fail, endpoint is already being disabled\n");
1039 ret = -EBUSY;
1040 goto out_unlock;
1041 }
1042 if (ep->urb_queued) {
1043 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
1044 ret = -EBUSY;
1045 goto out_unlock;
1046 }
1047 if (in != usb_endpoint_dir_in(ep->ep->desc)) {
1048 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
1049 ret = -EINVAL;
1050 goto out_unlock;
1051 }
1052
1053 ep->dev = dev;
1054 ep->req->context = &done;
1055 ep->req->complete = gadget_ep_complete;
1056 ep->req->buf = data;
1057 ep->req->length = io->length;
1058 ep->req->zero = usb_raw_io_flags_zero(io->flags);
1059 ep->urb_queued = true;
1060 spin_unlock_irqrestore(&dev->lock, flags);
1061
1062 ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
1063 if (ret) {
1064 dev_err(&dev->gadget->dev,
1065 "fail, usb_ep_queue returned %d\n", ret);
1066 spin_lock_irqsave(&dev->lock, flags);
1067 dev->state = STATE_DEV_FAILED;
1068 goto out_done;
1069 }
1070
1071 ret = wait_for_completion_interruptible(&done);
1072 if (ret) {
1073 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
1074 usb_ep_dequeue(ep->ep, ep->req);
1075 wait_for_completion(&done);
1076 spin_lock_irqsave(&dev->lock, flags);
1077 goto out_done;
1078 }
1079
1080 spin_lock_irqsave(&dev->lock, flags);
1081 ret = ep->status;
1082
1083 out_done:
1084 ep->urb_queued = false;
1085 out_unlock:
1086 spin_unlock_irqrestore(&dev->lock, flags);
1087 return ret;
1088 }
1089
raw_ioctl_ep_write(struct raw_dev * dev,unsigned long value)1090 static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
1091 {
1092 int ret = 0;
1093 char *data;
1094 struct usb_raw_ep_io io;
1095
1096 data = raw_alloc_io_data(&io, (void __user *)value, true);
1097 if (IS_ERR(data))
1098 return PTR_ERR(data);
1099 ret = raw_process_ep_io(dev, &io, data, true);
1100 kfree(data);
1101 return ret;
1102 }
1103
raw_ioctl_ep_read(struct raw_dev * dev,unsigned long value)1104 static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
1105 {
1106 int ret = 0;
1107 char *data;
1108 struct usb_raw_ep_io io;
1109 unsigned int length;
1110
1111 data = raw_alloc_io_data(&io, (void __user *)value, false);
1112 if (IS_ERR(data))
1113 return PTR_ERR(data);
1114 ret = raw_process_ep_io(dev, &io, data, false);
1115 if (ret < 0)
1116 goto free;
1117
1118 length = min(io.length, (unsigned int)ret);
1119 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
1120 ret = -EFAULT;
1121 else
1122 ret = length;
1123 free:
1124 kfree(data);
1125 return ret;
1126 }
1127
raw_ioctl_configure(struct raw_dev * dev,unsigned long value)1128 static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
1129 {
1130 int ret = 0;
1131 unsigned long flags;
1132
1133 if (value)
1134 return -EINVAL;
1135 spin_lock_irqsave(&dev->lock, flags);
1136 if (dev->state != STATE_DEV_RUNNING) {
1137 dev_dbg(dev->dev, "fail, device is not running\n");
1138 ret = -EINVAL;
1139 goto out_unlock;
1140 }
1141 if (!dev->gadget) {
1142 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1143 ret = -EBUSY;
1144 goto out_unlock;
1145 }
1146 usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
1147
1148 out_unlock:
1149 spin_unlock_irqrestore(&dev->lock, flags);
1150 return ret;
1151 }
1152
raw_ioctl_vbus_draw(struct raw_dev * dev,unsigned long value)1153 static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
1154 {
1155 int ret = 0;
1156 unsigned long flags;
1157
1158 spin_lock_irqsave(&dev->lock, flags);
1159 if (dev->state != STATE_DEV_RUNNING) {
1160 dev_dbg(dev->dev, "fail, device is not running\n");
1161 ret = -EINVAL;
1162 goto out_unlock;
1163 }
1164 if (!dev->gadget) {
1165 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1166 ret = -EBUSY;
1167 goto out_unlock;
1168 }
1169 usb_gadget_vbus_draw(dev->gadget, 2 * value);
1170
1171 out_unlock:
1172 spin_unlock_irqrestore(&dev->lock, flags);
1173 return ret;
1174 }
1175
fill_ep_caps(struct usb_ep_caps * caps,struct usb_raw_ep_caps * raw_caps)1176 static void fill_ep_caps(struct usb_ep_caps *caps,
1177 struct usb_raw_ep_caps *raw_caps)
1178 {
1179 raw_caps->type_control = caps->type_control;
1180 raw_caps->type_iso = caps->type_iso;
1181 raw_caps->type_bulk = caps->type_bulk;
1182 raw_caps->type_int = caps->type_int;
1183 raw_caps->dir_in = caps->dir_in;
1184 raw_caps->dir_out = caps->dir_out;
1185 }
1186
fill_ep_limits(struct usb_ep * ep,struct usb_raw_ep_limits * limits)1187 static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
1188 {
1189 limits->maxpacket_limit = ep->maxpacket_limit;
1190 limits->max_streams = ep->max_streams;
1191 }
1192
raw_ioctl_eps_info(struct raw_dev * dev,unsigned long value)1193 static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
1194 {
1195 int ret = 0, i;
1196 unsigned long flags;
1197 struct usb_raw_eps_info *info;
1198 struct raw_ep *ep;
1199
1200 info = kzalloc(sizeof(*info), GFP_KERNEL);
1201 if (!info) {
1202 ret = -ENOMEM;
1203 goto out;
1204 }
1205
1206 spin_lock_irqsave(&dev->lock, flags);
1207 if (dev->state != STATE_DEV_RUNNING) {
1208 dev_dbg(dev->dev, "fail, device is not running\n");
1209 ret = -EINVAL;
1210 spin_unlock_irqrestore(&dev->lock, flags);
1211 goto out_free;
1212 }
1213 if (!dev->gadget) {
1214 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1215 ret = -EBUSY;
1216 spin_unlock_irqrestore(&dev->lock, flags);
1217 goto out_free;
1218 }
1219
1220 for (i = 0; i < dev->eps_num; i++) {
1221 ep = &dev->eps[i];
1222 strscpy(&info->eps[i].name[0], ep->ep->name,
1223 USB_RAW_EP_NAME_MAX);
1224 info->eps[i].addr = ep->addr;
1225 fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
1226 fill_ep_limits(ep->ep, &info->eps[i].limits);
1227 }
1228 ret = dev->eps_num;
1229 spin_unlock_irqrestore(&dev->lock, flags);
1230
1231 if (copy_to_user((void __user *)value, info, sizeof(*info)))
1232 ret = -EFAULT;
1233
1234 out_free:
1235 kfree(info);
1236 out:
1237 return ret;
1238 }
1239
raw_ioctl(struct file * fd,unsigned int cmd,unsigned long value)1240 static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
1241 {
1242 struct raw_dev *dev = fd->private_data;
1243 int ret = 0;
1244
1245 if (!dev)
1246 return -EBUSY;
1247
1248 switch (cmd) {
1249 case USB_RAW_IOCTL_INIT:
1250 ret = raw_ioctl_init(dev, value);
1251 break;
1252 case USB_RAW_IOCTL_RUN:
1253 ret = raw_ioctl_run(dev, value);
1254 break;
1255 case USB_RAW_IOCTL_EVENT_FETCH:
1256 ret = raw_ioctl_event_fetch(dev, value);
1257 break;
1258 case USB_RAW_IOCTL_EP0_WRITE:
1259 ret = raw_ioctl_ep0_write(dev, value);
1260 break;
1261 case USB_RAW_IOCTL_EP0_READ:
1262 ret = raw_ioctl_ep0_read(dev, value);
1263 break;
1264 case USB_RAW_IOCTL_EP_ENABLE:
1265 ret = raw_ioctl_ep_enable(dev, value);
1266 break;
1267 case USB_RAW_IOCTL_EP_DISABLE:
1268 ret = raw_ioctl_ep_disable(dev, value);
1269 break;
1270 case USB_RAW_IOCTL_EP_WRITE:
1271 ret = raw_ioctl_ep_write(dev, value);
1272 break;
1273 case USB_RAW_IOCTL_EP_READ:
1274 ret = raw_ioctl_ep_read(dev, value);
1275 break;
1276 case USB_RAW_IOCTL_CONFIGURE:
1277 ret = raw_ioctl_configure(dev, value);
1278 break;
1279 case USB_RAW_IOCTL_VBUS_DRAW:
1280 ret = raw_ioctl_vbus_draw(dev, value);
1281 break;
1282 case USB_RAW_IOCTL_EPS_INFO:
1283 ret = raw_ioctl_eps_info(dev, value);
1284 break;
1285 case USB_RAW_IOCTL_EP0_STALL:
1286 ret = raw_ioctl_ep0_stall(dev, value);
1287 break;
1288 case USB_RAW_IOCTL_EP_SET_HALT:
1289 ret = raw_ioctl_ep_set_clear_halt_wedge(
1290 dev, value, true, true);
1291 break;
1292 case USB_RAW_IOCTL_EP_CLEAR_HALT:
1293 ret = raw_ioctl_ep_set_clear_halt_wedge(
1294 dev, value, false, true);
1295 break;
1296 case USB_RAW_IOCTL_EP_SET_WEDGE:
1297 ret = raw_ioctl_ep_set_clear_halt_wedge(
1298 dev, value, true, false);
1299 break;
1300 default:
1301 ret = -EINVAL;
1302 }
1303
1304 return ret;
1305 }
1306
1307 /*----------------------------------------------------------------------*/
1308
1309 static const struct file_operations raw_fops = {
1310 .open = raw_open,
1311 .unlocked_ioctl = raw_ioctl,
1312 .compat_ioctl = raw_ioctl,
1313 .release = raw_release,
1314 .llseek = no_llseek,
1315 };
1316
1317 static struct miscdevice raw_misc_device = {
1318 .minor = MISC_DYNAMIC_MINOR,
1319 .name = DRIVER_NAME,
1320 .fops = &raw_fops,
1321 };
1322
1323 module_misc_device(raw_misc_device);
1324