1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB Raw Gadget driver.
4 * See Documentation/usb/raw-gadget.rst for more details.
5 *
6 * Copyright (c) 2020 Google, Inc.
7 * Author: Andrey Konovalov <andreyknvl@gmail.com>
8 */
9
10 #include <linux/compiler.h>
11 #include <linux/ctype.h>
12 #include <linux/debugfs.h>
13 #include <linux/delay.h>
14 #include <linux/idr.h>
15 #include <linux/kref.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/semaphore.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23
24 #include <linux/usb.h>
25 #include <linux/usb/ch9.h>
26 #include <linux/usb/ch11.h>
27 #include <linux/usb/gadget.h>
28
29 #include <uapi/linux/usb/raw_gadget.h>
30
31 #define DRIVER_DESC "USB Raw Gadget"
32 #define DRIVER_NAME "raw-gadget"
33
34 MODULE_DESCRIPTION(DRIVER_DESC);
35 MODULE_AUTHOR("Andrey Konovalov");
36 MODULE_LICENSE("GPL");
37
38 /*----------------------------------------------------------------------*/
39
40 static DEFINE_IDA(driver_id_numbers);
41 #define DRIVER_DRIVER_NAME_LENGTH_MAX 32
42
43 #define RAW_EVENT_QUEUE_SIZE 16
44
45 struct raw_event_queue {
46 /* See the comment in raw_event_queue_fetch() for locking details. */
47 spinlock_t lock;
48 struct semaphore sema;
49 struct usb_raw_event *events[RAW_EVENT_QUEUE_SIZE];
50 int size;
51 };
52
raw_event_queue_init(struct raw_event_queue * queue)53 static void raw_event_queue_init(struct raw_event_queue *queue)
54 {
55 spin_lock_init(&queue->lock);
56 sema_init(&queue->sema, 0);
57 queue->size = 0;
58 }
59
raw_event_queue_add(struct raw_event_queue * queue,enum usb_raw_event_type type,size_t length,const void * data)60 static int raw_event_queue_add(struct raw_event_queue *queue,
61 enum usb_raw_event_type type, size_t length, const void *data)
62 {
63 unsigned long flags;
64 struct usb_raw_event *event;
65
66 spin_lock_irqsave(&queue->lock, flags);
67 if (WARN_ON(queue->size >= RAW_EVENT_QUEUE_SIZE)) {
68 spin_unlock_irqrestore(&queue->lock, flags);
69 return -ENOMEM;
70 }
71 event = kmalloc(sizeof(*event) + length, GFP_ATOMIC);
72 if (!event) {
73 spin_unlock_irqrestore(&queue->lock, flags);
74 return -ENOMEM;
75 }
76 event->type = type;
77 event->length = length;
78 if (event->length)
79 memcpy(&event->data[0], data, length);
80 queue->events[queue->size] = event;
81 queue->size++;
82 up(&queue->sema);
83 spin_unlock_irqrestore(&queue->lock, flags);
84 return 0;
85 }
86
raw_event_queue_fetch(struct raw_event_queue * queue)87 static struct usb_raw_event *raw_event_queue_fetch(
88 struct raw_event_queue *queue)
89 {
90 int ret;
91 unsigned long flags;
92 struct usb_raw_event *event;
93
94 /*
95 * This function can be called concurrently. We first check that
96 * there's at least one event queued by decrementing the semaphore,
97 * and then take the lock to protect queue struct fields.
98 */
99 ret = down_interruptible(&queue->sema);
100 if (ret)
101 return ERR_PTR(ret);
102 spin_lock_irqsave(&queue->lock, flags);
103 /*
104 * queue->size must have the same value as queue->sema counter (before
105 * the down_interruptible() call above), so this check is a fail-safe.
106 */
107 if (WARN_ON(!queue->size)) {
108 spin_unlock_irqrestore(&queue->lock, flags);
109 return ERR_PTR(-ENODEV);
110 }
111 event = queue->events[0];
112 queue->size--;
113 memmove(&queue->events[0], &queue->events[1],
114 queue->size * sizeof(queue->events[0]));
115 spin_unlock_irqrestore(&queue->lock, flags);
116 return event;
117 }
118
raw_event_queue_destroy(struct raw_event_queue * queue)119 static void raw_event_queue_destroy(struct raw_event_queue *queue)
120 {
121 int i;
122
123 for (i = 0; i < queue->size; i++)
124 kfree(queue->events[i]);
125 queue->size = 0;
126 }
127
128 /*----------------------------------------------------------------------*/
129
130 struct raw_dev;
131
132 enum ep_state {
133 STATE_EP_DISABLED,
134 STATE_EP_ENABLED,
135 };
136
137 struct raw_ep {
138 struct raw_dev *dev;
139 enum ep_state state;
140 struct usb_ep *ep;
141 u8 addr;
142 struct usb_request *req;
143 bool urb_queued;
144 bool disabling;
145 ssize_t status;
146 };
147
148 enum dev_state {
149 STATE_DEV_INVALID = 0,
150 STATE_DEV_OPENED,
151 STATE_DEV_INITIALIZED,
152 STATE_DEV_REGISTERING,
153 STATE_DEV_RUNNING,
154 STATE_DEV_CLOSED,
155 STATE_DEV_FAILED
156 };
157
158 struct raw_dev {
159 struct kref count;
160 spinlock_t lock;
161
162 const char *udc_name;
163 struct usb_gadget_driver driver;
164
165 /* Reference to misc device: */
166 struct device *dev;
167
168 /* Make driver names unique */
169 int driver_id_number;
170
171 /* Protected by lock: */
172 enum dev_state state;
173 bool gadget_registered;
174 struct usb_gadget *gadget;
175 struct usb_request *req;
176 bool ep0_in_pending;
177 bool ep0_out_pending;
178 bool ep0_urb_queued;
179 ssize_t ep0_status;
180 struct raw_ep eps[USB_RAW_EPS_NUM_MAX];
181 int eps_num;
182
183 struct completion ep0_done;
184 struct raw_event_queue queue;
185 };
186
dev_new(void)187 static struct raw_dev *dev_new(void)
188 {
189 struct raw_dev *dev;
190
191 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
192 if (!dev)
193 return NULL;
194 /* Matches kref_put() in raw_release(). */
195 kref_init(&dev->count);
196 spin_lock_init(&dev->lock);
197 init_completion(&dev->ep0_done);
198 raw_event_queue_init(&dev->queue);
199 dev->driver_id_number = -1;
200 return dev;
201 }
202
dev_free(struct kref * kref)203 static void dev_free(struct kref *kref)
204 {
205 struct raw_dev *dev = container_of(kref, struct raw_dev, count);
206 int i;
207
208 kfree(dev->udc_name);
209 kfree(dev->driver.udc_name);
210 kfree(dev->driver.driver.name);
211 if (dev->driver_id_number >= 0)
212 ida_free(&driver_id_numbers, dev->driver_id_number);
213 if (dev->req) {
214 if (dev->ep0_urb_queued)
215 usb_ep_dequeue(dev->gadget->ep0, dev->req);
216 usb_ep_free_request(dev->gadget->ep0, dev->req);
217 }
218 raw_event_queue_destroy(&dev->queue);
219 for (i = 0; i < dev->eps_num; i++) {
220 if (dev->eps[i].state == STATE_EP_DISABLED)
221 continue;
222 usb_ep_disable(dev->eps[i].ep);
223 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
224 kfree(dev->eps[i].ep->desc);
225 dev->eps[i].state = STATE_EP_DISABLED;
226 }
227 kfree(dev);
228 }
229
230 /*----------------------------------------------------------------------*/
231
raw_queue_event(struct raw_dev * dev,enum usb_raw_event_type type,size_t length,const void * data)232 static int raw_queue_event(struct raw_dev *dev,
233 enum usb_raw_event_type type, size_t length, const void *data)
234 {
235 int ret = 0;
236 unsigned long flags;
237
238 ret = raw_event_queue_add(&dev->queue, type, length, data);
239 if (ret < 0) {
240 spin_lock_irqsave(&dev->lock, flags);
241 dev->state = STATE_DEV_FAILED;
242 spin_unlock_irqrestore(&dev->lock, flags);
243 }
244 return ret;
245 }
246
gadget_ep0_complete(struct usb_ep * ep,struct usb_request * req)247 static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
248 {
249 struct raw_dev *dev = req->context;
250 unsigned long flags;
251
252 spin_lock_irqsave(&dev->lock, flags);
253 if (req->status)
254 dev->ep0_status = req->status;
255 else
256 dev->ep0_status = req->actual;
257 if (dev->ep0_in_pending)
258 dev->ep0_in_pending = false;
259 else
260 dev->ep0_out_pending = false;
261 spin_unlock_irqrestore(&dev->lock, flags);
262
263 complete(&dev->ep0_done);
264 }
265
get_ep_addr(const char * name)266 static u8 get_ep_addr(const char *name)
267 {
268 /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
269 * parse the endpoint address from its name. We deliberately use
270 * deprecated simple_strtoul() function here, as the number isn't
271 * followed by '\0' nor '\n'.
272 */
273 if (isdigit(name[2]))
274 return simple_strtoul(&name[2], NULL, 10);
275 /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
276 return USB_RAW_EP_ADDR_ANY;
277 }
278
gadget_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)279 static int gadget_bind(struct usb_gadget *gadget,
280 struct usb_gadget_driver *driver)
281 {
282 int ret = 0, i = 0;
283 struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
284 struct usb_request *req;
285 struct usb_ep *ep;
286 unsigned long flags;
287
288 if (strcmp(gadget->name, dev->udc_name) != 0)
289 return -ENODEV;
290
291 set_gadget_data(gadget, dev);
292 req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
293 if (!req) {
294 dev_err(&gadget->dev, "usb_ep_alloc_request failed\n");
295 set_gadget_data(gadget, NULL);
296 return -ENOMEM;
297 }
298
299 spin_lock_irqsave(&dev->lock, flags);
300 dev->req = req;
301 dev->req->context = dev;
302 dev->req->complete = gadget_ep0_complete;
303 dev->gadget = gadget;
304 gadget_for_each_ep(ep, dev->gadget) {
305 dev->eps[i].ep = ep;
306 dev->eps[i].addr = get_ep_addr(ep->name);
307 dev->eps[i].state = STATE_EP_DISABLED;
308 i++;
309 }
310 dev->eps_num = i;
311 spin_unlock_irqrestore(&dev->lock, flags);
312
313 ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL);
314 if (ret < 0) {
315 dev_err(&gadget->dev, "failed to queue event\n");
316 set_gadget_data(gadget, NULL);
317 return ret;
318 }
319
320 /* Matches kref_put() in gadget_unbind(). */
321 kref_get(&dev->count);
322 return ret;
323 }
324
gadget_unbind(struct usb_gadget * gadget)325 static void gadget_unbind(struct usb_gadget *gadget)
326 {
327 struct raw_dev *dev = get_gadget_data(gadget);
328
329 set_gadget_data(gadget, NULL);
330 /* Matches kref_get() in gadget_bind(). */
331 kref_put(&dev->count, dev_free);
332 }
333
gadget_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)334 static int gadget_setup(struct usb_gadget *gadget,
335 const struct usb_ctrlrequest *ctrl)
336 {
337 int ret = 0;
338 struct raw_dev *dev = get_gadget_data(gadget);
339 unsigned long flags;
340
341 spin_lock_irqsave(&dev->lock, flags);
342 if (dev->state != STATE_DEV_RUNNING) {
343 dev_err(&gadget->dev, "ignoring, device is not running\n");
344 ret = -ENODEV;
345 goto out_unlock;
346 }
347 if (dev->ep0_in_pending || dev->ep0_out_pending) {
348 dev_dbg(&gadget->dev, "stalling, request already pending\n");
349 ret = -EBUSY;
350 goto out_unlock;
351 }
352 if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength)
353 dev->ep0_in_pending = true;
354 else
355 dev->ep0_out_pending = true;
356 spin_unlock_irqrestore(&dev->lock, flags);
357
358 ret = raw_queue_event(dev, USB_RAW_EVENT_CONTROL, sizeof(*ctrl), ctrl);
359 if (ret < 0)
360 dev_err(&gadget->dev, "failed to queue event\n");
361 goto out;
362
363 out_unlock:
364 spin_unlock_irqrestore(&dev->lock, flags);
365 out:
366 return ret;
367 }
368
369 /* These are currently unused but present in case UDC driver requires them. */
gadget_disconnect(struct usb_gadget * gadget)370 static void gadget_disconnect(struct usb_gadget *gadget) { }
gadget_suspend(struct usb_gadget * gadget)371 static void gadget_suspend(struct usb_gadget *gadget) { }
gadget_resume(struct usb_gadget * gadget)372 static void gadget_resume(struct usb_gadget *gadget) { }
gadget_reset(struct usb_gadget * gadget)373 static void gadget_reset(struct usb_gadget *gadget) { }
374
375 /*----------------------------------------------------------------------*/
376
377 static struct miscdevice raw_misc_device;
378
raw_open(struct inode * inode,struct file * fd)379 static int raw_open(struct inode *inode, struct file *fd)
380 {
381 struct raw_dev *dev;
382
383 /* Nonblocking I/O is not supported yet. */
384 if (fd->f_flags & O_NONBLOCK)
385 return -EINVAL;
386
387 dev = dev_new();
388 if (!dev)
389 return -ENOMEM;
390 fd->private_data = dev;
391 dev->state = STATE_DEV_OPENED;
392 dev->dev = raw_misc_device.this_device;
393 return 0;
394 }
395
raw_release(struct inode * inode,struct file * fd)396 static int raw_release(struct inode *inode, struct file *fd)
397 {
398 int ret = 0;
399 struct raw_dev *dev = fd->private_data;
400 unsigned long flags;
401 bool unregister = false;
402
403 spin_lock_irqsave(&dev->lock, flags);
404 dev->state = STATE_DEV_CLOSED;
405 if (!dev->gadget) {
406 spin_unlock_irqrestore(&dev->lock, flags);
407 goto out_put;
408 }
409 if (dev->gadget_registered)
410 unregister = true;
411 dev->gadget_registered = false;
412 spin_unlock_irqrestore(&dev->lock, flags);
413
414 if (unregister) {
415 ret = usb_gadget_unregister_driver(&dev->driver);
416 if (ret != 0)
417 dev_err(dev->dev,
418 "usb_gadget_unregister_driver() failed with %d\n",
419 ret);
420 /* Matches kref_get() in raw_ioctl_run(). */
421 kref_put(&dev->count, dev_free);
422 }
423
424 out_put:
425 /* Matches dev_new() in raw_open(). */
426 kref_put(&dev->count, dev_free);
427 return ret;
428 }
429
430 /*----------------------------------------------------------------------*/
431
raw_ioctl_init(struct raw_dev * dev,unsigned long value)432 static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
433 {
434 int ret = 0;
435 int driver_id_number;
436 struct usb_raw_init arg;
437 char *udc_driver_name;
438 char *udc_device_name;
439 char *driver_driver_name;
440 unsigned long flags;
441
442 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
443 return -EFAULT;
444
445 switch (arg.speed) {
446 case USB_SPEED_UNKNOWN:
447 arg.speed = USB_SPEED_HIGH;
448 break;
449 case USB_SPEED_LOW:
450 case USB_SPEED_FULL:
451 case USB_SPEED_HIGH:
452 case USB_SPEED_SUPER:
453 break;
454 default:
455 return -EINVAL;
456 }
457
458 driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
459 if (driver_id_number < 0)
460 return driver_id_number;
461
462 driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
463 if (!driver_driver_name) {
464 ret = -ENOMEM;
465 goto out_free_driver_id_number;
466 }
467 snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
468 DRIVER_NAME ".%d", driver_id_number);
469
470 udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
471 if (!udc_driver_name) {
472 ret = -ENOMEM;
473 goto out_free_driver_driver_name;
474 }
475 ret = strscpy(udc_driver_name, &arg.driver_name[0],
476 UDC_NAME_LENGTH_MAX);
477 if (ret < 0)
478 goto out_free_udc_driver_name;
479 ret = 0;
480
481 udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
482 if (!udc_device_name) {
483 ret = -ENOMEM;
484 goto out_free_udc_driver_name;
485 }
486 ret = strscpy(udc_device_name, &arg.device_name[0],
487 UDC_NAME_LENGTH_MAX);
488 if (ret < 0)
489 goto out_free_udc_device_name;
490 ret = 0;
491
492 spin_lock_irqsave(&dev->lock, flags);
493 if (dev->state != STATE_DEV_OPENED) {
494 dev_dbg(dev->dev, "fail, device is not opened\n");
495 ret = -EINVAL;
496 goto out_unlock;
497 }
498 dev->udc_name = udc_driver_name;
499
500 dev->driver.function = DRIVER_DESC;
501 dev->driver.max_speed = arg.speed;
502 dev->driver.setup = gadget_setup;
503 dev->driver.disconnect = gadget_disconnect;
504 dev->driver.bind = gadget_bind;
505 dev->driver.unbind = gadget_unbind;
506 dev->driver.suspend = gadget_suspend;
507 dev->driver.resume = gadget_resume;
508 dev->driver.reset = gadget_reset;
509 dev->driver.driver.name = driver_driver_name;
510 dev->driver.udc_name = udc_device_name;
511 dev->driver.match_existing_only = 1;
512 dev->driver_id_number = driver_id_number;
513
514 dev->state = STATE_DEV_INITIALIZED;
515 spin_unlock_irqrestore(&dev->lock, flags);
516 return ret;
517
518 out_unlock:
519 spin_unlock_irqrestore(&dev->lock, flags);
520 out_free_udc_device_name:
521 kfree(udc_device_name);
522 out_free_udc_driver_name:
523 kfree(udc_driver_name);
524 out_free_driver_driver_name:
525 kfree(driver_driver_name);
526 out_free_driver_id_number:
527 ida_free(&driver_id_numbers, driver_id_number);
528 return ret;
529 }
530
raw_ioctl_run(struct raw_dev * dev,unsigned long value)531 static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
532 {
533 int ret = 0;
534 unsigned long flags;
535
536 if (value)
537 return -EINVAL;
538
539 spin_lock_irqsave(&dev->lock, flags);
540 if (dev->state != STATE_DEV_INITIALIZED) {
541 dev_dbg(dev->dev, "fail, device is not initialized\n");
542 ret = -EINVAL;
543 goto out_unlock;
544 }
545 dev->state = STATE_DEV_REGISTERING;
546 spin_unlock_irqrestore(&dev->lock, flags);
547
548 ret = usb_gadget_register_driver(&dev->driver);
549
550 spin_lock_irqsave(&dev->lock, flags);
551 if (ret) {
552 dev_err(dev->dev,
553 "fail, usb_gadget_register_driver returned %d\n", ret);
554 dev->state = STATE_DEV_FAILED;
555 goto out_unlock;
556 }
557 dev->gadget_registered = true;
558 dev->state = STATE_DEV_RUNNING;
559 /* Matches kref_put() in raw_release(). */
560 kref_get(&dev->count);
561
562 out_unlock:
563 spin_unlock_irqrestore(&dev->lock, flags);
564 return ret;
565 }
566
raw_ioctl_event_fetch(struct raw_dev * dev,unsigned long value)567 static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
568 {
569 struct usb_raw_event arg;
570 unsigned long flags;
571 struct usb_raw_event *event;
572 uint32_t length;
573
574 if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
575 return -EFAULT;
576
577 spin_lock_irqsave(&dev->lock, flags);
578 if (dev->state != STATE_DEV_RUNNING) {
579 dev_dbg(dev->dev, "fail, device is not running\n");
580 spin_unlock_irqrestore(&dev->lock, flags);
581 return -EINVAL;
582 }
583 if (!dev->gadget) {
584 dev_dbg(dev->dev, "fail, gadget is not bound\n");
585 spin_unlock_irqrestore(&dev->lock, flags);
586 return -EBUSY;
587 }
588 spin_unlock_irqrestore(&dev->lock, flags);
589
590 event = raw_event_queue_fetch(&dev->queue);
591 if (PTR_ERR(event) == -EINTR) {
592 dev_dbg(&dev->gadget->dev, "event fetching interrupted\n");
593 return -EINTR;
594 }
595 if (IS_ERR(event)) {
596 dev_err(&dev->gadget->dev, "failed to fetch event\n");
597 spin_lock_irqsave(&dev->lock, flags);
598 dev->state = STATE_DEV_FAILED;
599 spin_unlock_irqrestore(&dev->lock, flags);
600 return -ENODEV;
601 }
602 length = min(arg.length, event->length);
603 if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
604 kfree(event);
605 return -EFAULT;
606 }
607
608 kfree(event);
609 return 0;
610 }
611
raw_alloc_io_data(struct usb_raw_ep_io * io,void __user * ptr,bool get_from_user)612 static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
613 bool get_from_user)
614 {
615 void *data;
616
617 if (copy_from_user(io, ptr, sizeof(*io)))
618 return ERR_PTR(-EFAULT);
619 if (io->ep >= USB_RAW_EPS_NUM_MAX)
620 return ERR_PTR(-EINVAL);
621 if (!usb_raw_io_flags_valid(io->flags))
622 return ERR_PTR(-EINVAL);
623 if (io->length > PAGE_SIZE)
624 return ERR_PTR(-EINVAL);
625 if (get_from_user)
626 data = memdup_user(ptr + sizeof(*io), io->length);
627 else {
628 data = kmalloc(io->length, GFP_KERNEL);
629 if (!data)
630 data = ERR_PTR(-ENOMEM);
631 }
632 return data;
633 }
634
raw_process_ep0_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)635 static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
636 void *data, bool in)
637 {
638 int ret = 0;
639 unsigned long flags;
640
641 spin_lock_irqsave(&dev->lock, flags);
642 if (dev->state != STATE_DEV_RUNNING) {
643 dev_dbg(dev->dev, "fail, device is not running\n");
644 ret = -EINVAL;
645 goto out_unlock;
646 }
647 if (!dev->gadget) {
648 dev_dbg(dev->dev, "fail, gadget is not bound\n");
649 ret = -EBUSY;
650 goto out_unlock;
651 }
652 if (dev->ep0_urb_queued) {
653 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
654 ret = -EBUSY;
655 goto out_unlock;
656 }
657 if ((in && !dev->ep0_in_pending) ||
658 (!in && !dev->ep0_out_pending)) {
659 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
660 ret = -EBUSY;
661 goto out_unlock;
662 }
663 if (WARN_ON(in && dev->ep0_out_pending)) {
664 ret = -ENODEV;
665 dev->state = STATE_DEV_FAILED;
666 goto out_done;
667 }
668 if (WARN_ON(!in && dev->ep0_in_pending)) {
669 ret = -ENODEV;
670 dev->state = STATE_DEV_FAILED;
671 goto out_done;
672 }
673
674 dev->req->buf = data;
675 dev->req->length = io->length;
676 dev->req->zero = usb_raw_io_flags_zero(io->flags);
677 dev->ep0_urb_queued = true;
678 spin_unlock_irqrestore(&dev->lock, flags);
679
680 ret = usb_ep_queue(dev->gadget->ep0, dev->req, GFP_KERNEL);
681 if (ret) {
682 dev_err(&dev->gadget->dev,
683 "fail, usb_ep_queue returned %d\n", ret);
684 spin_lock_irqsave(&dev->lock, flags);
685 dev->state = STATE_DEV_FAILED;
686 goto out_done;
687 }
688
689 ret = wait_for_completion_interruptible(&dev->ep0_done);
690 if (ret) {
691 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
692 usb_ep_dequeue(dev->gadget->ep0, dev->req);
693 wait_for_completion(&dev->ep0_done);
694 spin_lock_irqsave(&dev->lock, flags);
695 goto out_done;
696 }
697
698 spin_lock_irqsave(&dev->lock, flags);
699 ret = dev->ep0_status;
700
701 out_done:
702 dev->ep0_urb_queued = false;
703 out_unlock:
704 spin_unlock_irqrestore(&dev->lock, flags);
705 return ret;
706 }
707
raw_ioctl_ep0_write(struct raw_dev * dev,unsigned long value)708 static int raw_ioctl_ep0_write(struct raw_dev *dev, unsigned long value)
709 {
710 int ret = 0;
711 void *data;
712 struct usb_raw_ep_io io;
713
714 data = raw_alloc_io_data(&io, (void __user *)value, true);
715 if (IS_ERR(data))
716 return PTR_ERR(data);
717 ret = raw_process_ep0_io(dev, &io, data, true);
718 kfree(data);
719 return ret;
720 }
721
raw_ioctl_ep0_read(struct raw_dev * dev,unsigned long value)722 static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
723 {
724 int ret = 0;
725 void *data;
726 struct usb_raw_ep_io io;
727 unsigned int length;
728
729 data = raw_alloc_io_data(&io, (void __user *)value, false);
730 if (IS_ERR(data))
731 return PTR_ERR(data);
732 ret = raw_process_ep0_io(dev, &io, data, false);
733 if (ret < 0)
734 goto free;
735
736 length = min(io.length, (unsigned int)ret);
737 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
738 ret = -EFAULT;
739 else
740 ret = length;
741 free:
742 kfree(data);
743 return ret;
744 }
745
raw_ioctl_ep0_stall(struct raw_dev * dev,unsigned long value)746 static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
747 {
748 int ret = 0;
749 unsigned long flags;
750
751 if (value)
752 return -EINVAL;
753 spin_lock_irqsave(&dev->lock, flags);
754 if (dev->state != STATE_DEV_RUNNING) {
755 dev_dbg(dev->dev, "fail, device is not running\n");
756 ret = -EINVAL;
757 goto out_unlock;
758 }
759 if (!dev->gadget) {
760 dev_dbg(dev->dev, "fail, gadget is not bound\n");
761 ret = -EBUSY;
762 goto out_unlock;
763 }
764 if (dev->ep0_urb_queued) {
765 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
766 ret = -EBUSY;
767 goto out_unlock;
768 }
769 if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
770 dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
771 ret = -EBUSY;
772 goto out_unlock;
773 }
774
775 ret = usb_ep_set_halt(dev->gadget->ep0);
776 if (ret < 0)
777 dev_err(&dev->gadget->dev,
778 "fail, usb_ep_set_halt returned %d\n", ret);
779
780 if (dev->ep0_in_pending)
781 dev->ep0_in_pending = false;
782 else
783 dev->ep0_out_pending = false;
784
785 out_unlock:
786 spin_unlock_irqrestore(&dev->lock, flags);
787 return ret;
788 }
789
raw_ioctl_ep_enable(struct raw_dev * dev,unsigned long value)790 static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
791 {
792 int ret = 0, i;
793 unsigned long flags;
794 struct usb_endpoint_descriptor *desc;
795 struct raw_ep *ep;
796 bool ep_props_matched = false;
797
798 desc = memdup_user((void __user *)value, sizeof(*desc));
799 if (IS_ERR(desc))
800 return PTR_ERR(desc);
801
802 /*
803 * Endpoints with a maxpacket length of 0 can cause crashes in UDC
804 * drivers.
805 */
806 if (usb_endpoint_maxp(desc) == 0) {
807 dev_dbg(dev->dev, "fail, bad endpoint maxpacket\n");
808 kfree(desc);
809 return -EINVAL;
810 }
811
812 spin_lock_irqsave(&dev->lock, flags);
813 if (dev->state != STATE_DEV_RUNNING) {
814 dev_dbg(dev->dev, "fail, device is not running\n");
815 ret = -EINVAL;
816 goto out_free;
817 }
818 if (!dev->gadget) {
819 dev_dbg(dev->dev, "fail, gadget is not bound\n");
820 ret = -EBUSY;
821 goto out_free;
822 }
823
824 for (i = 0; i < dev->eps_num; i++) {
825 ep = &dev->eps[i];
826 if (ep->addr != usb_endpoint_num(desc) &&
827 ep->addr != USB_RAW_EP_ADDR_ANY)
828 continue;
829 if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
830 continue;
831 ep_props_matched = true;
832 if (ep->state != STATE_EP_DISABLED)
833 continue;
834 ep->ep->desc = desc;
835 ret = usb_ep_enable(ep->ep);
836 if (ret < 0) {
837 dev_err(&dev->gadget->dev,
838 "fail, usb_ep_enable returned %d\n", ret);
839 goto out_free;
840 }
841 ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
842 if (!ep->req) {
843 dev_err(&dev->gadget->dev,
844 "fail, usb_ep_alloc_request failed\n");
845 usb_ep_disable(ep->ep);
846 ret = -ENOMEM;
847 goto out_free;
848 }
849 ep->state = STATE_EP_ENABLED;
850 ep->ep->driver_data = ep;
851 ret = i;
852 goto out_unlock;
853 }
854
855 if (!ep_props_matched) {
856 dev_dbg(&dev->gadget->dev, "fail, bad endpoint descriptor\n");
857 ret = -EINVAL;
858 } else {
859 dev_dbg(&dev->gadget->dev, "fail, no endpoints available\n");
860 ret = -EBUSY;
861 }
862
863 out_free:
864 kfree(desc);
865 out_unlock:
866 spin_unlock_irqrestore(&dev->lock, flags);
867 return ret;
868 }
869
raw_ioctl_ep_disable(struct raw_dev * dev,unsigned long value)870 static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
871 {
872 int ret = 0, i = value;
873 unsigned long flags;
874
875 spin_lock_irqsave(&dev->lock, flags);
876 if (dev->state != STATE_DEV_RUNNING) {
877 dev_dbg(dev->dev, "fail, device is not running\n");
878 ret = -EINVAL;
879 goto out_unlock;
880 }
881 if (!dev->gadget) {
882 dev_dbg(dev->dev, "fail, gadget is not bound\n");
883 ret = -EBUSY;
884 goto out_unlock;
885 }
886 if (i < 0 || i >= dev->eps_num) {
887 dev_dbg(dev->dev, "fail, invalid endpoint\n");
888 ret = -EBUSY;
889 goto out_unlock;
890 }
891 if (dev->eps[i].state == STATE_EP_DISABLED) {
892 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
893 ret = -EINVAL;
894 goto out_unlock;
895 }
896 if (dev->eps[i].disabling) {
897 dev_dbg(&dev->gadget->dev,
898 "fail, disable already in progress\n");
899 ret = -EINVAL;
900 goto out_unlock;
901 }
902 if (dev->eps[i].urb_queued) {
903 dev_dbg(&dev->gadget->dev,
904 "fail, waiting for urb completion\n");
905 ret = -EINVAL;
906 goto out_unlock;
907 }
908 dev->eps[i].disabling = true;
909 spin_unlock_irqrestore(&dev->lock, flags);
910
911 usb_ep_disable(dev->eps[i].ep);
912
913 spin_lock_irqsave(&dev->lock, flags);
914 usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
915 kfree(dev->eps[i].ep->desc);
916 dev->eps[i].state = STATE_EP_DISABLED;
917 dev->eps[i].disabling = false;
918
919 out_unlock:
920 spin_unlock_irqrestore(&dev->lock, flags);
921 return ret;
922 }
923
raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev * dev,unsigned long value,bool set,bool halt)924 static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
925 unsigned long value, bool set, bool halt)
926 {
927 int ret = 0, i = value;
928 unsigned long flags;
929
930 spin_lock_irqsave(&dev->lock, flags);
931 if (dev->state != STATE_DEV_RUNNING) {
932 dev_dbg(dev->dev, "fail, device is not running\n");
933 ret = -EINVAL;
934 goto out_unlock;
935 }
936 if (!dev->gadget) {
937 dev_dbg(dev->dev, "fail, gadget is not bound\n");
938 ret = -EBUSY;
939 goto out_unlock;
940 }
941 if (i < 0 || i >= dev->eps_num) {
942 dev_dbg(dev->dev, "fail, invalid endpoint\n");
943 ret = -EBUSY;
944 goto out_unlock;
945 }
946 if (dev->eps[i].state == STATE_EP_DISABLED) {
947 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
948 ret = -EINVAL;
949 goto out_unlock;
950 }
951 if (dev->eps[i].disabling) {
952 dev_dbg(&dev->gadget->dev,
953 "fail, disable is in progress\n");
954 ret = -EINVAL;
955 goto out_unlock;
956 }
957 if (dev->eps[i].urb_queued) {
958 dev_dbg(&dev->gadget->dev,
959 "fail, waiting for urb completion\n");
960 ret = -EINVAL;
961 goto out_unlock;
962 }
963 if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
964 dev_dbg(&dev->gadget->dev,
965 "fail, can't halt/wedge ISO endpoint\n");
966 ret = -EINVAL;
967 goto out_unlock;
968 }
969
970 if (set && halt) {
971 ret = usb_ep_set_halt(dev->eps[i].ep);
972 if (ret < 0)
973 dev_err(&dev->gadget->dev,
974 "fail, usb_ep_set_halt returned %d\n", ret);
975 } else if (!set && halt) {
976 ret = usb_ep_clear_halt(dev->eps[i].ep);
977 if (ret < 0)
978 dev_err(&dev->gadget->dev,
979 "fail, usb_ep_clear_halt returned %d\n", ret);
980 } else if (set && !halt) {
981 ret = usb_ep_set_wedge(dev->eps[i].ep);
982 if (ret < 0)
983 dev_err(&dev->gadget->dev,
984 "fail, usb_ep_set_wedge returned %d\n", ret);
985 }
986
987 out_unlock:
988 spin_unlock_irqrestore(&dev->lock, flags);
989 return ret;
990 }
991
gadget_ep_complete(struct usb_ep * ep,struct usb_request * req)992 static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
993 {
994 struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
995 struct raw_dev *dev = r_ep->dev;
996 unsigned long flags;
997
998 spin_lock_irqsave(&dev->lock, flags);
999 if (req->status)
1000 r_ep->status = req->status;
1001 else
1002 r_ep->status = req->actual;
1003 spin_unlock_irqrestore(&dev->lock, flags);
1004
1005 complete((struct completion *)req->context);
1006 }
1007
raw_process_ep_io(struct raw_dev * dev,struct usb_raw_ep_io * io,void * data,bool in)1008 static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
1009 void *data, bool in)
1010 {
1011 int ret = 0;
1012 unsigned long flags;
1013 struct raw_ep *ep;
1014 DECLARE_COMPLETION_ONSTACK(done);
1015
1016 spin_lock_irqsave(&dev->lock, flags);
1017 if (dev->state != STATE_DEV_RUNNING) {
1018 dev_dbg(dev->dev, "fail, device is not running\n");
1019 ret = -EINVAL;
1020 goto out_unlock;
1021 }
1022 if (!dev->gadget) {
1023 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1024 ret = -EBUSY;
1025 goto out_unlock;
1026 }
1027 if (io->ep >= dev->eps_num) {
1028 dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
1029 ret = -EINVAL;
1030 goto out_unlock;
1031 }
1032 ep = &dev->eps[io->ep];
1033 if (ep->state != STATE_EP_ENABLED) {
1034 dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
1035 ret = -EBUSY;
1036 goto out_unlock;
1037 }
1038 if (ep->disabling) {
1039 dev_dbg(&dev->gadget->dev,
1040 "fail, endpoint is already being disabled\n");
1041 ret = -EBUSY;
1042 goto out_unlock;
1043 }
1044 if (ep->urb_queued) {
1045 dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
1046 ret = -EBUSY;
1047 goto out_unlock;
1048 }
1049 if (in != usb_endpoint_dir_in(ep->ep->desc)) {
1050 dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
1051 ret = -EINVAL;
1052 goto out_unlock;
1053 }
1054
1055 ep->dev = dev;
1056 ep->req->context = &done;
1057 ep->req->complete = gadget_ep_complete;
1058 ep->req->buf = data;
1059 ep->req->length = io->length;
1060 ep->req->zero = usb_raw_io_flags_zero(io->flags);
1061 ep->urb_queued = true;
1062 spin_unlock_irqrestore(&dev->lock, flags);
1063
1064 ret = usb_ep_queue(ep->ep, ep->req, GFP_KERNEL);
1065 if (ret) {
1066 dev_err(&dev->gadget->dev,
1067 "fail, usb_ep_queue returned %d\n", ret);
1068 spin_lock_irqsave(&dev->lock, flags);
1069 dev->state = STATE_DEV_FAILED;
1070 goto out_done;
1071 }
1072
1073 ret = wait_for_completion_interruptible(&done);
1074 if (ret) {
1075 dev_dbg(&dev->gadget->dev, "wait interrupted\n");
1076 usb_ep_dequeue(ep->ep, ep->req);
1077 wait_for_completion(&done);
1078 spin_lock_irqsave(&dev->lock, flags);
1079 goto out_done;
1080 }
1081
1082 spin_lock_irqsave(&dev->lock, flags);
1083 ret = ep->status;
1084
1085 out_done:
1086 ep->urb_queued = false;
1087 out_unlock:
1088 spin_unlock_irqrestore(&dev->lock, flags);
1089 return ret;
1090 }
1091
raw_ioctl_ep_write(struct raw_dev * dev,unsigned long value)1092 static int raw_ioctl_ep_write(struct raw_dev *dev, unsigned long value)
1093 {
1094 int ret = 0;
1095 char *data;
1096 struct usb_raw_ep_io io;
1097
1098 data = raw_alloc_io_data(&io, (void __user *)value, true);
1099 if (IS_ERR(data))
1100 return PTR_ERR(data);
1101 ret = raw_process_ep_io(dev, &io, data, true);
1102 kfree(data);
1103 return ret;
1104 }
1105
raw_ioctl_ep_read(struct raw_dev * dev,unsigned long value)1106 static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
1107 {
1108 int ret = 0;
1109 char *data;
1110 struct usb_raw_ep_io io;
1111 unsigned int length;
1112
1113 data = raw_alloc_io_data(&io, (void __user *)value, false);
1114 if (IS_ERR(data))
1115 return PTR_ERR(data);
1116 ret = raw_process_ep_io(dev, &io, data, false);
1117 if (ret < 0)
1118 goto free;
1119
1120 length = min(io.length, (unsigned int)ret);
1121 if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
1122 ret = -EFAULT;
1123 else
1124 ret = length;
1125 free:
1126 kfree(data);
1127 return ret;
1128 }
1129
raw_ioctl_configure(struct raw_dev * dev,unsigned long value)1130 static int raw_ioctl_configure(struct raw_dev *dev, unsigned long value)
1131 {
1132 int ret = 0;
1133 unsigned long flags;
1134
1135 if (value)
1136 return -EINVAL;
1137 spin_lock_irqsave(&dev->lock, flags);
1138 if (dev->state != STATE_DEV_RUNNING) {
1139 dev_dbg(dev->dev, "fail, device is not running\n");
1140 ret = -EINVAL;
1141 goto out_unlock;
1142 }
1143 if (!dev->gadget) {
1144 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1145 ret = -EBUSY;
1146 goto out_unlock;
1147 }
1148 usb_gadget_set_state(dev->gadget, USB_STATE_CONFIGURED);
1149
1150 out_unlock:
1151 spin_unlock_irqrestore(&dev->lock, flags);
1152 return ret;
1153 }
1154
raw_ioctl_vbus_draw(struct raw_dev * dev,unsigned long value)1155 static int raw_ioctl_vbus_draw(struct raw_dev *dev, unsigned long value)
1156 {
1157 int ret = 0;
1158 unsigned long flags;
1159
1160 spin_lock_irqsave(&dev->lock, flags);
1161 if (dev->state != STATE_DEV_RUNNING) {
1162 dev_dbg(dev->dev, "fail, device is not running\n");
1163 ret = -EINVAL;
1164 goto out_unlock;
1165 }
1166 if (!dev->gadget) {
1167 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1168 ret = -EBUSY;
1169 goto out_unlock;
1170 }
1171 usb_gadget_vbus_draw(dev->gadget, 2 * value);
1172
1173 out_unlock:
1174 spin_unlock_irqrestore(&dev->lock, flags);
1175 return ret;
1176 }
1177
fill_ep_caps(struct usb_ep_caps * caps,struct usb_raw_ep_caps * raw_caps)1178 static void fill_ep_caps(struct usb_ep_caps *caps,
1179 struct usb_raw_ep_caps *raw_caps)
1180 {
1181 raw_caps->type_control = caps->type_control;
1182 raw_caps->type_iso = caps->type_iso;
1183 raw_caps->type_bulk = caps->type_bulk;
1184 raw_caps->type_int = caps->type_int;
1185 raw_caps->dir_in = caps->dir_in;
1186 raw_caps->dir_out = caps->dir_out;
1187 }
1188
fill_ep_limits(struct usb_ep * ep,struct usb_raw_ep_limits * limits)1189 static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
1190 {
1191 limits->maxpacket_limit = ep->maxpacket_limit;
1192 limits->max_streams = ep->max_streams;
1193 }
1194
raw_ioctl_eps_info(struct raw_dev * dev,unsigned long value)1195 static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
1196 {
1197 int ret = 0, i;
1198 unsigned long flags;
1199 struct usb_raw_eps_info *info;
1200 struct raw_ep *ep;
1201
1202 info = kzalloc(sizeof(*info), GFP_KERNEL);
1203 if (!info) {
1204 ret = -ENOMEM;
1205 goto out;
1206 }
1207
1208 spin_lock_irqsave(&dev->lock, flags);
1209 if (dev->state != STATE_DEV_RUNNING) {
1210 dev_dbg(dev->dev, "fail, device is not running\n");
1211 ret = -EINVAL;
1212 spin_unlock_irqrestore(&dev->lock, flags);
1213 goto out_free;
1214 }
1215 if (!dev->gadget) {
1216 dev_dbg(dev->dev, "fail, gadget is not bound\n");
1217 ret = -EBUSY;
1218 spin_unlock_irqrestore(&dev->lock, flags);
1219 goto out_free;
1220 }
1221
1222 for (i = 0; i < dev->eps_num; i++) {
1223 ep = &dev->eps[i];
1224 strscpy(&info->eps[i].name[0], ep->ep->name,
1225 USB_RAW_EP_NAME_MAX);
1226 info->eps[i].addr = ep->addr;
1227 fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
1228 fill_ep_limits(ep->ep, &info->eps[i].limits);
1229 }
1230 ret = dev->eps_num;
1231 spin_unlock_irqrestore(&dev->lock, flags);
1232
1233 if (copy_to_user((void __user *)value, info, sizeof(*info)))
1234 ret = -EFAULT;
1235
1236 out_free:
1237 kfree(info);
1238 out:
1239 return ret;
1240 }
1241
raw_ioctl(struct file * fd,unsigned int cmd,unsigned long value)1242 static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
1243 {
1244 struct raw_dev *dev = fd->private_data;
1245 int ret = 0;
1246
1247 if (!dev)
1248 return -EBUSY;
1249
1250 switch (cmd) {
1251 case USB_RAW_IOCTL_INIT:
1252 ret = raw_ioctl_init(dev, value);
1253 break;
1254 case USB_RAW_IOCTL_RUN:
1255 ret = raw_ioctl_run(dev, value);
1256 break;
1257 case USB_RAW_IOCTL_EVENT_FETCH:
1258 ret = raw_ioctl_event_fetch(dev, value);
1259 break;
1260 case USB_RAW_IOCTL_EP0_WRITE:
1261 ret = raw_ioctl_ep0_write(dev, value);
1262 break;
1263 case USB_RAW_IOCTL_EP0_READ:
1264 ret = raw_ioctl_ep0_read(dev, value);
1265 break;
1266 case USB_RAW_IOCTL_EP_ENABLE:
1267 ret = raw_ioctl_ep_enable(dev, value);
1268 break;
1269 case USB_RAW_IOCTL_EP_DISABLE:
1270 ret = raw_ioctl_ep_disable(dev, value);
1271 break;
1272 case USB_RAW_IOCTL_EP_WRITE:
1273 ret = raw_ioctl_ep_write(dev, value);
1274 break;
1275 case USB_RAW_IOCTL_EP_READ:
1276 ret = raw_ioctl_ep_read(dev, value);
1277 break;
1278 case USB_RAW_IOCTL_CONFIGURE:
1279 ret = raw_ioctl_configure(dev, value);
1280 break;
1281 case USB_RAW_IOCTL_VBUS_DRAW:
1282 ret = raw_ioctl_vbus_draw(dev, value);
1283 break;
1284 case USB_RAW_IOCTL_EPS_INFO:
1285 ret = raw_ioctl_eps_info(dev, value);
1286 break;
1287 case USB_RAW_IOCTL_EP0_STALL:
1288 ret = raw_ioctl_ep0_stall(dev, value);
1289 break;
1290 case USB_RAW_IOCTL_EP_SET_HALT:
1291 ret = raw_ioctl_ep_set_clear_halt_wedge(
1292 dev, value, true, true);
1293 break;
1294 case USB_RAW_IOCTL_EP_CLEAR_HALT:
1295 ret = raw_ioctl_ep_set_clear_halt_wedge(
1296 dev, value, false, true);
1297 break;
1298 case USB_RAW_IOCTL_EP_SET_WEDGE:
1299 ret = raw_ioctl_ep_set_clear_halt_wedge(
1300 dev, value, true, false);
1301 break;
1302 default:
1303 ret = -EINVAL;
1304 }
1305
1306 return ret;
1307 }
1308
1309 /*----------------------------------------------------------------------*/
1310
1311 static const struct file_operations raw_fops = {
1312 .open = raw_open,
1313 .unlocked_ioctl = raw_ioctl,
1314 .compat_ioctl = raw_ioctl,
1315 .release = raw_release,
1316 .llseek = no_llseek,
1317 };
1318
1319 static struct miscdevice raw_misc_device = {
1320 .minor = MISC_DYNAMIC_MINOR,
1321 .name = DRIVER_NAME,
1322 .fops = &raw_fops,
1323 };
1324
1325 module_misc_device(raw_misc_device);
1326