1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * usb.c - Hardware dependent module for USB
4  *
5  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/usb.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/cdev.h>
15 #include <linux/device.h>
16 #include <linux/list.h>
17 #include <linux/completion.h>
18 #include <linux/mutex.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/workqueue.h>
22 #include <linux/sysfs.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/etherdevice.h>
25 #include <linux/uaccess.h>
26 #include "most/core.h"
27 
28 #define USB_MTU			512
29 #define NO_ISOCHRONOUS_URB	0
30 #define AV_PACKETS_PER_XACT	2
31 #define BUF_CHAIN_SIZE		0xFFFF
32 #define MAX_NUM_ENDPOINTS	30
33 #define MAX_SUFFIX_LEN		10
34 #define MAX_STRING_LEN		80
35 #define MAX_BUF_SIZE		0xFFFF
36 
37 #define USB_VENDOR_ID_SMSC	0x0424  /* VID: SMSC */
38 #define USB_DEV_ID_BRDG		0xC001  /* PID: USB Bridge */
39 #define USB_DEV_ID_OS81118	0xCF18  /* PID: USB OS81118 */
40 #define USB_DEV_ID_OS81119	0xCF19  /* PID: USB OS81119 */
41 #define USB_DEV_ID_OS81210	0xCF30  /* PID: USB OS81210 */
42 /* DRCI Addresses */
43 #define DRCI_REG_NI_STATE	0x0100
44 #define DRCI_REG_PACKET_BW	0x0101
45 #define DRCI_REG_NODE_ADDR	0x0102
46 #define DRCI_REG_NODE_POS	0x0103
47 #define DRCI_REG_MEP_FILTER	0x0140
48 #define DRCI_REG_HASH_TBL0	0x0141
49 #define DRCI_REG_HASH_TBL1	0x0142
50 #define DRCI_REG_HASH_TBL2	0x0143
51 #define DRCI_REG_HASH_TBL3	0x0144
52 #define DRCI_REG_HW_ADDR_HI	0x0145
53 #define DRCI_REG_HW_ADDR_MI	0x0146
54 #define DRCI_REG_HW_ADDR_LO	0x0147
55 #define DRCI_REG_BASE		0x1100
56 #define DRCI_COMMAND		0x02
57 #define DRCI_READ_REQ		0xA0
58 #define DRCI_WRITE_REQ		0xA1
59 
60 /**
61  * struct most_dci_obj - Direct Communication Interface
62  * @kobj:position in sysfs
63  * @usb_device: pointer to the usb device
64  * @reg_addr: register address for arbitrary DCI access
65  */
66 struct most_dci_obj {
67 	struct device dev;
68 	struct usb_device *usb_device;
69 	u16 reg_addr;
70 };
71 
72 #define to_dci_obj(p) container_of(p, struct most_dci_obj, dev)
73 
74 struct most_dev;
75 
76 struct clear_hold_work {
77 	struct work_struct ws;
78 	struct most_dev *mdev;
79 	unsigned int channel;
80 	int pipe;
81 };
82 
83 #define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
84 
85 /**
86  * struct most_dev - holds all usb interface specific stuff
87  * @usb_device: pointer to usb device
88  * @iface: hardware interface
89  * @cap: channel capabilities
90  * @conf: channel configuration
91  * @dci: direct communication interface of hardware
92  * @ep_address: endpoint address table
93  * @description: device description
94  * @suffix: suffix for channel name
95  * @channel_lock: synchronize channel access
96  * @padding_active: indicates channel uses padding
97  * @is_channel_healthy: health status table of each channel
98  * @busy_urbs: list of anchored items
99  * @io_mutex: synchronize I/O with disconnect
100  * @link_stat_timer: timer for link status reports
101  * @poll_work_obj: work for polling link status
102  */
103 struct most_dev {
104 	struct usb_device *usb_device;
105 	struct most_interface iface;
106 	struct most_channel_capability *cap;
107 	struct most_channel_config *conf;
108 	struct most_dci_obj *dci;
109 	u8 *ep_address;
110 	char description[MAX_STRING_LEN];
111 	char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
112 	spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
113 	bool padding_active[MAX_NUM_ENDPOINTS];
114 	bool is_channel_healthy[MAX_NUM_ENDPOINTS];
115 	struct clear_hold_work clear_work[MAX_NUM_ENDPOINTS];
116 	struct usb_anchor *busy_urbs;
117 	struct mutex io_mutex;
118 	struct timer_list link_stat_timer;
119 	struct work_struct poll_work_obj;
120 	void (*on_netinfo)(struct most_interface *most_iface,
121 			   unsigned char link_state, unsigned char *addrs);
122 };
123 
124 #define to_mdev(d) container_of(d, struct most_dev, iface)
125 #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
126 
127 static void wq_clear_halt(struct work_struct *wq_obj);
128 static void wq_netinfo(struct work_struct *wq_obj);
129 
130 /**
131  * drci_rd_reg - read a DCI register
132  * @dev: usb device
133  * @reg: register address
134  * @buf: buffer to store data
135  *
136  * This is reads data from INIC's direct register communication interface
137  */
drci_rd_reg(struct usb_device * dev,u16 reg,u16 * buf)138 static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
139 {
140 	int retval;
141 	__le16 *dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
142 	u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
143 
144 	if (!dma_buf)
145 		return -ENOMEM;
146 
147 	retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
148 				 DRCI_READ_REQ, req_type,
149 				 0x0000,
150 				 reg, dma_buf, sizeof(*dma_buf), 5 * HZ);
151 	*buf = le16_to_cpu(*dma_buf);
152 	kfree(dma_buf);
153 
154 	return retval;
155 }
156 
157 /**
158  * drci_wr_reg - write a DCI register
159  * @dev: usb device
160  * @reg: register address
161  * @data: data to write
162  *
163  * This is writes data to INIC's direct register communication interface
164  */
drci_wr_reg(struct usb_device * dev,u16 reg,u16 data)165 static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
166 {
167 	return usb_control_msg(dev,
168 			       usb_sndctrlpipe(dev, 0),
169 			       DRCI_WRITE_REQ,
170 			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
171 			       data,
172 			       reg,
173 			       NULL,
174 			       0,
175 			       5 * HZ);
176 }
177 
start_sync_ep(struct usb_device * usb_dev,u16 ep)178 static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
179 {
180 	return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
181 }
182 
183 /**
184  * get_stream_frame_size - calculate frame size of current configuration
185  * @cfg: channel configuration
186  */
get_stream_frame_size(struct most_channel_config * cfg)187 static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
188 {
189 	unsigned int frame_size = 0;
190 	unsigned int sub_size = cfg->subbuffer_size;
191 
192 	if (!sub_size) {
193 		pr_warn("Misconfig: Subbuffer size zero.\n");
194 		return frame_size;
195 	}
196 	switch (cfg->data_type) {
197 	case MOST_CH_ISOC:
198 		frame_size = AV_PACKETS_PER_XACT * sub_size;
199 		break;
200 	case MOST_CH_SYNC:
201 		if (cfg->packets_per_xact == 0) {
202 			pr_warn("Misconfig: Packets per XACT zero\n");
203 			frame_size = 0;
204 		} else if (cfg->packets_per_xact == 0xFF) {
205 			frame_size = (USB_MTU / sub_size) * sub_size;
206 		} else {
207 			frame_size = cfg->packets_per_xact * sub_size;
208 		}
209 		break;
210 	default:
211 		pr_warn("Query frame size of non-streaming channel\n");
212 		break;
213 	}
214 	return frame_size;
215 }
216 
217 /**
218  * hdm_poison_channel - mark buffers of this channel as invalid
219  * @iface: pointer to the interface
220  * @channel: channel ID
221  *
222  * This unlinks all URBs submitted to the HCD,
223  * calls the associated completion function of the core and removes
224  * them from the list.
225  *
226  * Returns 0 on success or error code otherwise.
227  */
hdm_poison_channel(struct most_interface * iface,int channel)228 static int hdm_poison_channel(struct most_interface *iface, int channel)
229 {
230 	struct most_dev *mdev = to_mdev(iface);
231 	unsigned long flags;
232 	spinlock_t *lock; /* temp. lock */
233 
234 	if (unlikely(!iface)) {
235 		dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
236 		return -EIO;
237 	}
238 	if (unlikely(channel < 0 || channel >= iface->num_channels)) {
239 		dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
240 		return -ECHRNG;
241 	}
242 
243 	lock = mdev->channel_lock + channel;
244 	spin_lock_irqsave(lock, flags);
245 	mdev->is_channel_healthy[channel] = false;
246 	spin_unlock_irqrestore(lock, flags);
247 
248 	cancel_work_sync(&mdev->clear_work[channel].ws);
249 
250 	mutex_lock(&mdev->io_mutex);
251 	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
252 	if (mdev->padding_active[channel])
253 		mdev->padding_active[channel] = false;
254 
255 	if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
256 		del_timer_sync(&mdev->link_stat_timer);
257 		cancel_work_sync(&mdev->poll_work_obj);
258 	}
259 	mutex_unlock(&mdev->io_mutex);
260 	return 0;
261 }
262 
263 /**
264  * hdm_add_padding - add padding bytes
265  * @mdev: most device
266  * @channel: channel ID
267  * @mbo: buffer object
268  *
269  * This inserts the INIC hardware specific padding bytes into a streaming
270  * channel's buffer
271  */
hdm_add_padding(struct most_dev * mdev,int channel,struct mbo * mbo)272 static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
273 {
274 	struct most_channel_config *conf = &mdev->conf[channel];
275 	unsigned int frame_size = get_stream_frame_size(conf);
276 	unsigned int j, num_frames;
277 
278 	if (!frame_size)
279 		return -EIO;
280 	num_frames = mbo->buffer_length / frame_size;
281 
282 	if (num_frames < 1) {
283 		dev_err(&mdev->usb_device->dev,
284 			"Missed minimal transfer unit.\n");
285 		return -EIO;
286 	}
287 
288 	for (j = num_frames - 1; j > 0; j--)
289 		memmove(mbo->virt_address + j * USB_MTU,
290 			mbo->virt_address + j * frame_size,
291 			frame_size);
292 	mbo->buffer_length = num_frames * USB_MTU;
293 	return 0;
294 }
295 
296 /**
297  * hdm_remove_padding - remove padding bytes
298  * @mdev: most device
299  * @channel: channel ID
300  * @mbo: buffer object
301  *
302  * This takes the INIC hardware specific padding bytes off a streaming
303  * channel's buffer.
304  */
hdm_remove_padding(struct most_dev * mdev,int channel,struct mbo * mbo)305 static int hdm_remove_padding(struct most_dev *mdev, int channel,
306 			      struct mbo *mbo)
307 {
308 	struct most_channel_config *const conf = &mdev->conf[channel];
309 	unsigned int frame_size = get_stream_frame_size(conf);
310 	unsigned int j, num_frames;
311 
312 	if (!frame_size)
313 		return -EIO;
314 	num_frames = mbo->processed_length / USB_MTU;
315 
316 	for (j = 1; j < num_frames; j++)
317 		memmove(mbo->virt_address + frame_size * j,
318 			mbo->virt_address + USB_MTU * j,
319 			frame_size);
320 
321 	mbo->processed_length = frame_size * num_frames;
322 	return 0;
323 }
324 
325 /**
326  * hdm_write_completion - completion function for submitted Tx URBs
327  * @urb: the URB that has been completed
328  *
329  * This checks the status of the completed URB. In case the URB has been
330  * unlinked before, it is immediately freed. On any other error the MBO
331  * transfer flag is set. On success it frees allocated resources and calls
332  * the completion function.
333  *
334  * Context: interrupt!
335  */
hdm_write_completion(struct urb * urb)336 static void hdm_write_completion(struct urb *urb)
337 {
338 	struct mbo *mbo = urb->context;
339 	struct most_dev *mdev = to_mdev(mbo->ifp);
340 	unsigned int channel = mbo->hdm_channel_id;
341 	spinlock_t *lock = mdev->channel_lock + channel;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(lock, flags);
345 
346 	mbo->processed_length = 0;
347 	mbo->status = MBO_E_INVAL;
348 	if (likely(mdev->is_channel_healthy[channel])) {
349 		switch (urb->status) {
350 		case 0:
351 		case -ESHUTDOWN:
352 			mbo->processed_length = urb->actual_length;
353 			mbo->status = MBO_SUCCESS;
354 			break;
355 		case -EPIPE:
356 			dev_warn(&mdev->usb_device->dev,
357 				 "Broken pipe on ep%02x\n",
358 				 mdev->ep_address[channel]);
359 			mdev->is_channel_healthy[channel] = false;
360 			mdev->clear_work[channel].pipe = urb->pipe;
361 			schedule_work(&mdev->clear_work[channel].ws);
362 			break;
363 		case -ENODEV:
364 		case -EPROTO:
365 			mbo->status = MBO_E_CLOSE;
366 			break;
367 		}
368 	}
369 
370 	spin_unlock_irqrestore(lock, flags);
371 
372 	if (likely(mbo->complete))
373 		mbo->complete(mbo);
374 	usb_free_urb(urb);
375 }
376 
377 /**
378  * hdm_read_completion - completion function for submitted Rx URBs
379  * @urb: the URB that has been completed
380  *
381  * This checks the status of the completed URB. In case the URB has been
382  * unlinked before it is immediately freed. On any other error the MBO transfer
383  * flag is set. On success it frees allocated resources, removes
384  * padding bytes -if necessary- and calls the completion function.
385  *
386  * Context: interrupt!
387  *
388  * **************************************************************************
389  *                   Error codes returned by in urb->status
390  *                   or in iso_frame_desc[n].status (for ISO)
391  * *************************************************************************
392  *
393  * USB device drivers may only test urb status values in completion handlers.
394  * This is because otherwise there would be a race between HCDs updating
395  * these values on one CPU, and device drivers testing them on another CPU.
396  *
397  * A transfer's actual_length may be positive even when an error has been
398  * reported.  That's because transfers often involve several packets, so that
399  * one or more packets could finish before an error stops further endpoint I/O.
400  *
401  * For isochronous URBs, the urb status value is non-zero only if the URB is
402  * unlinked, the device is removed, the host controller is disabled or the total
403  * transferred length is less than the requested length and the URB_SHORT_NOT_OK
404  * flag is set.  Completion handlers for isochronous URBs should only see
405  * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
406  * Individual frame descriptor status fields may report more status codes.
407  *
408  *
409  * 0			Transfer completed successfully
410  *
411  * -ENOENT		URB was synchronously unlinked by usb_unlink_urb
412  *
413  * -EINPROGRESS		URB still pending, no results yet
414  *			(That is, if drivers see this it's a bug.)
415  *
416  * -EPROTO (*, **)	a) bitstuff error
417  *			b) no response packet received within the
418  *			   prescribed bus turn-around time
419  *			c) unknown USB error
420  *
421  * -EILSEQ (*, **)	a) CRC mismatch
422  *			b) no response packet received within the
423  *			   prescribed bus turn-around time
424  *			c) unknown USB error
425  *
426  *			Note that often the controller hardware does not
427  *			distinguish among cases a), b), and c), so a
428  *			driver cannot tell whether there was a protocol
429  *			error, a failure to respond (often caused by
430  *			device disconnect), or some other fault.
431  *
432  * -ETIME (**)		No response packet received within the prescribed
433  *			bus turn-around time.  This error may instead be
434  *			reported as -EPROTO or -EILSEQ.
435  *
436  * -ETIMEDOUT		Synchronous USB message functions use this code
437  *			to indicate timeout expired before the transfer
438  *			completed, and no other error was reported by HC.
439  *
440  * -EPIPE (**)		Endpoint stalled.  For non-control endpoints,
441  *			reset this status with usb_clear_halt().
442  *
443  * -ECOMM		During an IN transfer, the host controller
444  *			received data from an endpoint faster than it
445  *			could be written to system memory
446  *
447  * -ENOSR		During an OUT transfer, the host controller
448  *			could not retrieve data from system memory fast
449  *			enough to keep up with the USB data rate
450  *
451  * -EOVERFLOW (*)	The amount of data returned by the endpoint was
452  *			greater than either the max packet size of the
453  *			endpoint or the remaining buffer size.  "Babble".
454  *
455  * -EREMOTEIO		The data read from the endpoint did not fill the
456  *			specified buffer, and URB_SHORT_NOT_OK was set in
457  *			urb->transfer_flags.
458  *
459  * -ENODEV		Device was removed.  Often preceded by a burst of
460  *			other errors, since the hub driver doesn't detect
461  *			device removal events immediately.
462  *
463  * -EXDEV		ISO transfer only partially completed
464  *			(only set in iso_frame_desc[n].status, not urb->status)
465  *
466  * -EINVAL		ISO madness, if this happens: Log off and go home
467  *
468  * -ECONNRESET		URB was asynchronously unlinked by usb_unlink_urb
469  *
470  * -ESHUTDOWN		The device or host controller has been disabled due
471  *			to some problem that could not be worked around,
472  *			such as a physical disconnect.
473  *
474  *
475  * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
476  * hardware problems such as bad devices (including firmware) or cables.
477  *
478  * (**) This is also one of several codes that different kinds of host
479  * controller use to indicate a transfer has failed because of device
480  * disconnect.  In the interval before the hub driver starts disconnect
481  * processing, devices may receive such fault reports for every request.
482  *
483  * See <https://www.kernel.org/doc/Documentation/driver-api/usb/error-codes.rst>
484  */
hdm_read_completion(struct urb * urb)485 static void hdm_read_completion(struct urb *urb)
486 {
487 	struct mbo *mbo = urb->context;
488 	struct most_dev *mdev = to_mdev(mbo->ifp);
489 	unsigned int channel = mbo->hdm_channel_id;
490 	struct device *dev = &mdev->usb_device->dev;
491 	spinlock_t *lock = mdev->channel_lock + channel;
492 	unsigned long flags;
493 
494 	spin_lock_irqsave(lock, flags);
495 
496 	mbo->processed_length = 0;
497 	mbo->status = MBO_E_INVAL;
498 	if (likely(mdev->is_channel_healthy[channel])) {
499 		switch (urb->status) {
500 		case 0:
501 		case -ESHUTDOWN:
502 			mbo->processed_length = urb->actual_length;
503 			mbo->status = MBO_SUCCESS;
504 			if (mdev->padding_active[channel] &&
505 			    hdm_remove_padding(mdev, channel, mbo)) {
506 				mbo->processed_length = 0;
507 				mbo->status = MBO_E_INVAL;
508 			}
509 			break;
510 		case -EPIPE:
511 			dev_warn(dev, "Broken pipe on ep%02x\n",
512 				 mdev->ep_address[channel]);
513 			mdev->is_channel_healthy[channel] = false;
514 			mdev->clear_work[channel].pipe = urb->pipe;
515 			schedule_work(&mdev->clear_work[channel].ws);
516 			break;
517 		case -ENODEV:
518 		case -EPROTO:
519 			mbo->status = MBO_E_CLOSE;
520 			break;
521 		case -EOVERFLOW:
522 			dev_warn(dev, "Babble on ep%02x\n",
523 				 mdev->ep_address[channel]);
524 			break;
525 		}
526 	}
527 
528 	spin_unlock_irqrestore(lock, flags);
529 
530 	if (likely(mbo->complete))
531 		mbo->complete(mbo);
532 	usb_free_urb(urb);
533 }
534 
535 /**
536  * hdm_enqueue - receive a buffer to be used for data transfer
537  * @iface: interface to enqueue to
538  * @channel: ID of the channel
539  * @mbo: pointer to the buffer object
540  *
541  * This allocates a new URB and fills it according to the channel
542  * that is being used for transmission of data. Before the URB is
543  * submitted it is stored in the private anchor list.
544  *
545  * Returns 0 on success. On any error the URB is freed and a error code
546  * is returned.
547  *
548  * Context: Could in _some_ cases be interrupt!
549  */
hdm_enqueue(struct most_interface * iface,int channel,struct mbo * mbo)550 static int hdm_enqueue(struct most_interface *iface, int channel,
551 		       struct mbo *mbo)
552 {
553 	struct most_dev *mdev;
554 	struct most_channel_config *conf;
555 	int retval = 0;
556 	struct urb *urb;
557 	unsigned long length;
558 	void *virt_address;
559 
560 	if (unlikely(!iface || !mbo))
561 		return -EIO;
562 	if (unlikely(iface->num_channels <= channel || channel < 0))
563 		return -ECHRNG;
564 
565 	mdev = to_mdev(iface);
566 	conf = &mdev->conf[channel];
567 
568 	mutex_lock(&mdev->io_mutex);
569 	if (!mdev->usb_device) {
570 		retval = -ENODEV;
571 		goto unlock_io_mutex;
572 	}
573 
574 	urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
575 	if (!urb) {
576 		retval = -ENOMEM;
577 		goto unlock_io_mutex;
578 	}
579 
580 	if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
581 	    hdm_add_padding(mdev, channel, mbo)) {
582 		retval = -EIO;
583 		goto err_free_urb;
584 	}
585 
586 	urb->transfer_dma = mbo->bus_address;
587 	virt_address = mbo->virt_address;
588 	length = mbo->buffer_length;
589 
590 	if (conf->direction & MOST_CH_TX) {
591 		usb_fill_bulk_urb(urb, mdev->usb_device,
592 				  usb_sndbulkpipe(mdev->usb_device,
593 						  mdev->ep_address[channel]),
594 				  virt_address,
595 				  length,
596 				  hdm_write_completion,
597 				  mbo);
598 		if (conf->data_type != MOST_CH_ISOC &&
599 		    conf->data_type != MOST_CH_SYNC)
600 			urb->transfer_flags |= URB_ZERO_PACKET;
601 	} else {
602 		usb_fill_bulk_urb(urb, mdev->usb_device,
603 				  usb_rcvbulkpipe(mdev->usb_device,
604 						  mdev->ep_address[channel]),
605 				  virt_address,
606 				  length + conf->extra_len,
607 				  hdm_read_completion,
608 				  mbo);
609 	}
610 	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
611 
612 	usb_anchor_urb(urb, &mdev->busy_urbs[channel]);
613 
614 	retval = usb_submit_urb(urb, GFP_KERNEL);
615 	if (retval) {
616 		dev_err(&mdev->usb_device->dev,
617 			"URB submit failed with error %d.\n", retval);
618 		goto err_unanchor_urb;
619 	}
620 	goto unlock_io_mutex;
621 
622 err_unanchor_urb:
623 	usb_unanchor_urb(urb);
624 err_free_urb:
625 	usb_free_urb(urb);
626 unlock_io_mutex:
627 	mutex_unlock(&mdev->io_mutex);
628 	return retval;
629 }
630 
hdm_dma_alloc(struct mbo * mbo,u32 size)631 static void *hdm_dma_alloc(struct mbo *mbo, u32 size)
632 {
633 	struct most_dev *mdev = to_mdev(mbo->ifp);
634 
635 	return usb_alloc_coherent(mdev->usb_device, size, GFP_KERNEL,
636 				  &mbo->bus_address);
637 }
638 
hdm_dma_free(struct mbo * mbo,u32 size)639 static void hdm_dma_free(struct mbo *mbo, u32 size)
640 {
641 	struct most_dev *mdev = to_mdev(mbo->ifp);
642 
643 	usb_free_coherent(mdev->usb_device, size, mbo->virt_address,
644 			  mbo->bus_address);
645 }
646 
647 /**
648  * hdm_configure_channel - receive channel configuration from core
649  * @iface: interface
650  * @channel: channel ID
651  * @conf: structure that holds the configuration information
652  *
653  * The attached network interface controller (NIC) supports a padding mode
654  * to avoid short packets on USB, hence increasing the performance due to a
655  * lower interrupt load. This mode is default for synchronous data and can
656  * be switched on for isochronous data. In case padding is active the
657  * driver needs to know the frame size of the payload in order to calculate
658  * the number of bytes it needs to pad when transmitting or to cut off when
659  * receiving data.
660  *
661  */
hdm_configure_channel(struct most_interface * iface,int channel,struct most_channel_config * conf)662 static int hdm_configure_channel(struct most_interface *iface, int channel,
663 				 struct most_channel_config *conf)
664 {
665 	unsigned int num_frames;
666 	unsigned int frame_size;
667 	struct most_dev *mdev = to_mdev(iface);
668 	struct device *dev = &mdev->usb_device->dev;
669 
670 	mdev->is_channel_healthy[channel] = true;
671 	mdev->clear_work[channel].channel = channel;
672 	mdev->clear_work[channel].mdev = mdev;
673 	INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
674 
675 	if (unlikely(!iface || !conf)) {
676 		dev_err(dev, "Bad interface or config pointer.\n");
677 		return -EINVAL;
678 	}
679 	if (unlikely(channel < 0 || channel >= iface->num_channels)) {
680 		dev_err(dev, "Channel ID out of range.\n");
681 		return -EINVAL;
682 	}
683 	if (!conf->num_buffers || !conf->buffer_size) {
684 		dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
685 		return -EINVAL;
686 	}
687 
688 	if (conf->data_type != MOST_CH_SYNC &&
689 	    !(conf->data_type == MOST_CH_ISOC &&
690 	      conf->packets_per_xact != 0xFF)) {
691 		mdev->padding_active[channel] = false;
692 		/*
693 		 * Since the NIC's padding mode is not going to be
694 		 * used, we can skip the frame size calculations and
695 		 * move directly on to exit.
696 		 */
697 		goto exit;
698 	}
699 
700 	mdev->padding_active[channel] = true;
701 
702 	frame_size = get_stream_frame_size(conf);
703 	if (frame_size == 0 || frame_size > USB_MTU) {
704 		dev_warn(dev, "Misconfig: frame size wrong\n");
705 		return -EINVAL;
706 	}
707 
708 	num_frames = conf->buffer_size / frame_size;
709 
710 	if (conf->buffer_size % frame_size) {
711 		u16 old_size = conf->buffer_size;
712 
713 		conf->buffer_size = num_frames * frame_size;
714 		dev_warn(dev, "%s: fixed buffer size (%d -> %d)\n",
715 			 mdev->suffix[channel], old_size, conf->buffer_size);
716 	}
717 
718 	/* calculate extra length to comply w/ HW padding */
719 	conf->extra_len = num_frames * (USB_MTU - frame_size);
720 
721 exit:
722 	mdev->conf[channel] = *conf;
723 	if (conf->data_type == MOST_CH_ASYNC) {
724 		u16 ep = mdev->ep_address[channel];
725 
726 		if (start_sync_ep(mdev->usb_device, ep) < 0)
727 			dev_warn(dev, "sync for ep%02x failed", ep);
728 	}
729 	return 0;
730 }
731 
732 /**
733  * hdm_request_netinfo - request network information
734  * @iface: pointer to interface
735  * @channel: channel ID
736  *
737  * This is used as trigger to set up the link status timer that
738  * polls for the NI state of the INIC every 2 seconds.
739  *
740  */
hdm_request_netinfo(struct most_interface * iface,int channel,void (* on_netinfo)(struct most_interface *,unsigned char,unsigned char *))741 static void hdm_request_netinfo(struct most_interface *iface, int channel,
742 				void (*on_netinfo)(struct most_interface *,
743 						   unsigned char,
744 						   unsigned char *))
745 {
746 	struct most_dev *mdev;
747 
748 	BUG_ON(!iface);
749 	mdev = to_mdev(iface);
750 	mdev->on_netinfo = on_netinfo;
751 	if (!on_netinfo)
752 		return;
753 
754 	mdev->link_stat_timer.expires = jiffies + HZ;
755 	mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
756 }
757 
758 /**
759  * link_stat_timer_handler - schedule work obtaining mac address and link status
760  * @data: pointer to USB device instance
761  *
762  * The handler runs in interrupt context. That's why we need to defer the
763  * tasks to a work queue.
764  */
link_stat_timer_handler(struct timer_list * t)765 static void link_stat_timer_handler(struct timer_list *t)
766 {
767 	struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
768 
769 	schedule_work(&mdev->poll_work_obj);
770 	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
771 	add_timer(&mdev->link_stat_timer);
772 }
773 
774 /**
775  * wq_netinfo - work queue function to deliver latest networking information
776  * @wq_obj: object that holds data for our deferred work to do
777  *
778  * This retrieves the network interface status of the USB INIC
779  */
wq_netinfo(struct work_struct * wq_obj)780 static void wq_netinfo(struct work_struct *wq_obj)
781 {
782 	struct most_dev *mdev = to_mdev_from_work(wq_obj);
783 	struct usb_device *usb_device = mdev->usb_device;
784 	struct device *dev = &usb_device->dev;
785 	u16 hi, mi, lo, link;
786 	u8 hw_addr[6];
787 
788 	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
789 		dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
790 		return;
791 	}
792 
793 	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
794 		dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
795 		return;
796 	}
797 
798 	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
799 		dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
800 		return;
801 	}
802 
803 	if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
804 		dev_err(dev, "Vendor request 'link status' failed\n");
805 		return;
806 	}
807 
808 	hw_addr[0] = hi >> 8;
809 	hw_addr[1] = hi;
810 	hw_addr[2] = mi >> 8;
811 	hw_addr[3] = mi;
812 	hw_addr[4] = lo >> 8;
813 	hw_addr[5] = lo;
814 
815 	if (mdev->on_netinfo)
816 		mdev->on_netinfo(&mdev->iface, link, hw_addr);
817 }
818 
819 /**
820  * wq_clear_halt - work queue function
821  * @wq_obj: work_struct object to execute
822  *
823  * This sends a clear_halt to the given USB pipe.
824  */
wq_clear_halt(struct work_struct * wq_obj)825 static void wq_clear_halt(struct work_struct *wq_obj)
826 {
827 	struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
828 	struct most_dev *mdev = clear_work->mdev;
829 	unsigned int channel = clear_work->channel;
830 	int pipe = clear_work->pipe;
831 
832 	mutex_lock(&mdev->io_mutex);
833 	most_stop_enqueue(&mdev->iface, channel);
834 	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
835 	if (usb_clear_halt(mdev->usb_device, pipe))
836 		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
837 
838 	/* If the functional Stall condition has been set on an
839 	 * asynchronous rx channel, we need to clear the tx channel
840 	 * too, since the hardware runs its clean-up sequence on both
841 	 * channels, as they are physically one on the network.
842 	 *
843 	 * The USB interface that exposes the asynchronous channels
844 	 * contains always two endpoints, and two only.
845 	 */
846 	if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
847 	    mdev->conf[channel].direction == MOST_CH_RX) {
848 		int peer = 1 - channel;
849 		int snd_pipe = usb_sndbulkpipe(mdev->usb_device,
850 					       mdev->ep_address[peer]);
851 		usb_clear_halt(mdev->usb_device, snd_pipe);
852 	}
853 	mdev->is_channel_healthy[channel] = true;
854 	most_resume_enqueue(&mdev->iface, channel);
855 	mutex_unlock(&mdev->io_mutex);
856 }
857 
858 /**
859  * hdm_usb_fops - file operation table for USB driver
860  */
861 static const struct file_operations hdm_usb_fops = {
862 	.owner = THIS_MODULE,
863 };
864 
865 /**
866  * usb_device_id - ID table for HCD device probing
867  */
868 static const struct usb_device_id usbid[] = {
869 	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
870 	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81118), },
871 	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81119), },
872 	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81210), },
873 	{ } /* Terminating entry */
874 };
875 
876 struct regs {
877 	const char *name;
878 	u16 reg;
879 };
880 
881 static const struct regs ro_regs[] = {
882 	{ "ni_state", DRCI_REG_NI_STATE },
883 	{ "packet_bandwidth", DRCI_REG_PACKET_BW },
884 	{ "node_address", DRCI_REG_NODE_ADDR },
885 	{ "node_position", DRCI_REG_NODE_POS },
886 };
887 
888 static const struct regs rw_regs[] = {
889 	{ "mep_filter", DRCI_REG_MEP_FILTER },
890 	{ "mep_hash0", DRCI_REG_HASH_TBL0 },
891 	{ "mep_hash1", DRCI_REG_HASH_TBL1 },
892 	{ "mep_hash2", DRCI_REG_HASH_TBL2 },
893 	{ "mep_hash3", DRCI_REG_HASH_TBL3 },
894 	{ "mep_eui48_hi", DRCI_REG_HW_ADDR_HI },
895 	{ "mep_eui48_mi", DRCI_REG_HW_ADDR_MI },
896 	{ "mep_eui48_lo", DRCI_REG_HW_ADDR_LO },
897 };
898 
get_stat_reg_addr(const struct regs * regs,int size,const char * name,u16 * reg_addr)899 static int get_stat_reg_addr(const struct regs *regs, int size,
900 			     const char *name, u16 *reg_addr)
901 {
902 	int i;
903 
904 	for (i = 0; i < size; i++) {
905 		if (!strcmp(name, regs[i].name)) {
906 			*reg_addr = regs[i].reg;
907 			return 0;
908 		}
909 	}
910 	return -EFAULT;
911 }
912 
913 #define get_static_reg_addr(regs, name, reg_addr) \
914 	get_stat_reg_addr(regs, ARRAY_SIZE(regs), name, reg_addr)
915 
value_show(struct device * dev,struct device_attribute * attr,char * buf)916 static ssize_t value_show(struct device *dev, struct device_attribute *attr,
917 			  char *buf)
918 {
919 	const char *name = attr->attr.name;
920 	struct most_dci_obj *dci_obj = to_dci_obj(dev);
921 	u16 val;
922 	u16 reg_addr;
923 	int err;
924 
925 	if (!strcmp(name, "arb_address"))
926 		return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
927 
928 	if (!strcmp(name, "arb_value"))
929 		reg_addr = dci_obj->reg_addr;
930 	else if (get_static_reg_addr(ro_regs, name, &reg_addr) &&
931 		 get_static_reg_addr(rw_regs, name, &reg_addr))
932 		return -EFAULT;
933 
934 	err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
935 	if (err < 0)
936 		return err;
937 
938 	return snprintf(buf, PAGE_SIZE, "%04x\n", val);
939 }
940 
value_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)941 static ssize_t value_store(struct device *dev, struct device_attribute *attr,
942 			   const char *buf, size_t count)
943 {
944 	u16 val;
945 	u16 reg_addr;
946 	const char *name = attr->attr.name;
947 	struct most_dci_obj *dci_obj = to_dci_obj(dev);
948 	struct usb_device *usb_dev = dci_obj->usb_device;
949 	int err = kstrtou16(buf, 16, &val);
950 
951 	if (err)
952 		return err;
953 
954 	if (!strcmp(name, "arb_address")) {
955 		dci_obj->reg_addr = val;
956 		return count;
957 	}
958 
959 	if (!strcmp(name, "arb_value"))
960 		err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
961 	else if (!strcmp(name, "sync_ep"))
962 		err = start_sync_ep(usb_dev, val);
963 	else if (!get_static_reg_addr(rw_regs, name, &reg_addr))
964 		err = drci_wr_reg(usb_dev, reg_addr, val);
965 	else
966 		return -EFAULT;
967 
968 	if (err < 0)
969 		return err;
970 
971 	return count;
972 }
973 
974 static DEVICE_ATTR(ni_state, 0444, value_show, NULL);
975 static DEVICE_ATTR(packet_bandwidth, 0444, value_show, NULL);
976 static DEVICE_ATTR(node_address, 0444, value_show, NULL);
977 static DEVICE_ATTR(node_position, 0444, value_show, NULL);
978 static DEVICE_ATTR(sync_ep, 0200, NULL, value_store);
979 static DEVICE_ATTR(mep_filter, 0644, value_show, value_store);
980 static DEVICE_ATTR(mep_hash0, 0644, value_show, value_store);
981 static DEVICE_ATTR(mep_hash1, 0644, value_show, value_store);
982 static DEVICE_ATTR(mep_hash2, 0644, value_show, value_store);
983 static DEVICE_ATTR(mep_hash3, 0644, value_show, value_store);
984 static DEVICE_ATTR(mep_eui48_hi, 0644, value_show, value_store);
985 static DEVICE_ATTR(mep_eui48_mi, 0644, value_show, value_store);
986 static DEVICE_ATTR(mep_eui48_lo, 0644, value_show, value_store);
987 static DEVICE_ATTR(arb_address, 0644, value_show, value_store);
988 static DEVICE_ATTR(arb_value, 0644, value_show, value_store);
989 
990 static struct attribute *dci_attrs[] = {
991 	&dev_attr_ni_state.attr,
992 	&dev_attr_packet_bandwidth.attr,
993 	&dev_attr_node_address.attr,
994 	&dev_attr_node_position.attr,
995 	&dev_attr_sync_ep.attr,
996 	&dev_attr_mep_filter.attr,
997 	&dev_attr_mep_hash0.attr,
998 	&dev_attr_mep_hash1.attr,
999 	&dev_attr_mep_hash2.attr,
1000 	&dev_attr_mep_hash3.attr,
1001 	&dev_attr_mep_eui48_hi.attr,
1002 	&dev_attr_mep_eui48_mi.attr,
1003 	&dev_attr_mep_eui48_lo.attr,
1004 	&dev_attr_arb_address.attr,
1005 	&dev_attr_arb_value.attr,
1006 	NULL,
1007 };
1008 
1009 static struct attribute_group dci_attr_group = {
1010 	.attrs = dci_attrs,
1011 };
1012 
1013 static const struct attribute_group *dci_attr_groups[] = {
1014 	&dci_attr_group,
1015 	NULL,
1016 };
1017 
release_dci(struct device * dev)1018 static void release_dci(struct device *dev)
1019 {
1020 	struct most_dci_obj *dci = to_dci_obj(dev);
1021 
1022 	kfree(dci);
1023 }
1024 
1025 /**
1026  * hdm_probe - probe function of USB device driver
1027  * @interface: Interface of the attached USB device
1028  * @id: Pointer to the USB ID table.
1029  *
1030  * This allocates and initializes the device instance, adds the new
1031  * entry to the internal list, scans the USB descriptors and registers
1032  * the interface with the core.
1033  * Additionally, the DCI objects are created and the hardware is sync'd.
1034  *
1035  * Return 0 on success. In case of an error a negative number is returned.
1036  */
1037 static int
hdm_probe(struct usb_interface * interface,const struct usb_device_id * id)1038 hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
1039 {
1040 	struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
1041 	struct usb_device *usb_dev = interface_to_usbdev(interface);
1042 	struct device *dev = &usb_dev->dev;
1043 	struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1044 	unsigned int i;
1045 	unsigned int num_endpoints;
1046 	struct most_channel_capability *tmp_cap;
1047 	struct usb_endpoint_descriptor *ep_desc;
1048 	int ret = 0;
1049 
1050 	if (!mdev)
1051 		goto err_out_of_memory;
1052 
1053 	usb_set_intfdata(interface, mdev);
1054 	num_endpoints = usb_iface_desc->desc.bNumEndpoints;
1055 	mutex_init(&mdev->io_mutex);
1056 	INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
1057 	timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
1058 
1059 	mdev->usb_device = usb_dev;
1060 	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
1061 
1062 	mdev->iface.mod = hdm_usb_fops.owner;
1063 	mdev->iface.driver_dev = &interface->dev;
1064 	mdev->iface.interface = ITYPE_USB;
1065 	mdev->iface.configure = hdm_configure_channel;
1066 	mdev->iface.request_netinfo = hdm_request_netinfo;
1067 	mdev->iface.enqueue = hdm_enqueue;
1068 	mdev->iface.poison_channel = hdm_poison_channel;
1069 	mdev->iface.dma_alloc = hdm_dma_alloc;
1070 	mdev->iface.dma_free = hdm_dma_free;
1071 	mdev->iface.description = mdev->description;
1072 	mdev->iface.num_channels = num_endpoints;
1073 
1074 	snprintf(mdev->description, sizeof(mdev->description),
1075 		 "%d-%s:%d.%d",
1076 		 usb_dev->bus->busnum,
1077 		 usb_dev->devpath,
1078 		 usb_dev->config->desc.bConfigurationValue,
1079 		 usb_iface_desc->desc.bInterfaceNumber);
1080 
1081 	mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
1082 	if (!mdev->conf)
1083 		goto err_free_mdev;
1084 
1085 	mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
1086 	if (!mdev->cap)
1087 		goto err_free_conf;
1088 
1089 	mdev->iface.channel_vector = mdev->cap;
1090 	mdev->ep_address =
1091 		kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
1092 	if (!mdev->ep_address)
1093 		goto err_free_cap;
1094 
1095 	mdev->busy_urbs =
1096 		kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
1097 	if (!mdev->busy_urbs)
1098 		goto err_free_ep_address;
1099 
1100 	tmp_cap = mdev->cap;
1101 	for (i = 0; i < num_endpoints; i++) {
1102 		ep_desc = &usb_iface_desc->endpoint[i].desc;
1103 		mdev->ep_address[i] = ep_desc->bEndpointAddress;
1104 		mdev->padding_active[i] = false;
1105 		mdev->is_channel_healthy[i] = true;
1106 
1107 		snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
1108 			 mdev->ep_address[i]);
1109 
1110 		tmp_cap->name_suffix = &mdev->suffix[i][0];
1111 		tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
1112 		tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
1113 		tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
1114 		tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
1115 		tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
1116 				     MOST_CH_ISOC | MOST_CH_SYNC;
1117 		if (usb_endpoint_dir_in(ep_desc))
1118 			tmp_cap->direction = MOST_CH_RX;
1119 		else
1120 			tmp_cap->direction = MOST_CH_TX;
1121 		tmp_cap++;
1122 		init_usb_anchor(&mdev->busy_urbs[i]);
1123 		spin_lock_init(&mdev->channel_lock[i]);
1124 	}
1125 	dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
1126 		   le16_to_cpu(usb_dev->descriptor.idVendor),
1127 		   le16_to_cpu(usb_dev->descriptor.idProduct),
1128 		   usb_dev->bus->busnum,
1129 		   usb_dev->devnum);
1130 
1131 	dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
1132 		   usb_dev->bus->busnum,
1133 		   usb_dev->devpath,
1134 		   usb_dev->config->desc.bConfigurationValue,
1135 		   usb_iface_desc->desc.bInterfaceNumber);
1136 
1137 	ret = most_register_interface(&mdev->iface);
1138 	if (ret)
1139 		goto err_free_busy_urbs;
1140 
1141 	mutex_lock(&mdev->io_mutex);
1142 	if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
1143 	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
1144 	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
1145 		mdev->dci = kzalloc(sizeof(*mdev->dci), GFP_KERNEL);
1146 		if (!mdev->dci) {
1147 			mutex_unlock(&mdev->io_mutex);
1148 			most_deregister_interface(&mdev->iface);
1149 			ret = -ENOMEM;
1150 			goto err_free_busy_urbs;
1151 		}
1152 
1153 		mdev->dci->dev.init_name = "dci";
1154 		mdev->dci->dev.parent = &mdev->iface.dev;
1155 		mdev->dci->dev.groups = dci_attr_groups;
1156 		mdev->dci->dev.release = release_dci;
1157 		if (device_register(&mdev->dci->dev)) {
1158 			mutex_unlock(&mdev->io_mutex);
1159 			most_deregister_interface(&mdev->iface);
1160 			ret = -ENOMEM;
1161 			goto err_free_dci;
1162 		}
1163 		mdev->dci->usb_device = mdev->usb_device;
1164 	}
1165 	mutex_unlock(&mdev->io_mutex);
1166 	return 0;
1167 err_free_dci:
1168 	kfree(mdev->dci);
1169 err_free_busy_urbs:
1170 	kfree(mdev->busy_urbs);
1171 err_free_ep_address:
1172 	kfree(mdev->ep_address);
1173 err_free_cap:
1174 	kfree(mdev->cap);
1175 err_free_conf:
1176 	kfree(mdev->conf);
1177 err_free_mdev:
1178 	kfree(mdev);
1179 err_out_of_memory:
1180 	if (ret == 0 || ret == -ENOMEM) {
1181 		ret = -ENOMEM;
1182 		dev_err(dev, "out of memory\n");
1183 	}
1184 	return ret;
1185 }
1186 
1187 /**
1188  * hdm_disconnect - disconnect function of USB device driver
1189  * @interface: Interface of the attached USB device
1190  *
1191  * This deregisters the interface with the core, removes the kernel timer
1192  * and frees resources.
1193  *
1194  * Context: hub kernel thread
1195  */
hdm_disconnect(struct usb_interface * interface)1196 static void hdm_disconnect(struct usb_interface *interface)
1197 {
1198 	struct most_dev *mdev = usb_get_intfdata(interface);
1199 
1200 	mutex_lock(&mdev->io_mutex);
1201 	usb_set_intfdata(interface, NULL);
1202 	mdev->usb_device = NULL;
1203 	mutex_unlock(&mdev->io_mutex);
1204 
1205 	del_timer_sync(&mdev->link_stat_timer);
1206 	cancel_work_sync(&mdev->poll_work_obj);
1207 
1208 	device_unregister(&mdev->dci->dev);
1209 	most_deregister_interface(&mdev->iface);
1210 
1211 	kfree(mdev->busy_urbs);
1212 	kfree(mdev->cap);
1213 	kfree(mdev->conf);
1214 	kfree(mdev->ep_address);
1215 	kfree(mdev);
1216 }
1217 
1218 static struct usb_driver hdm_usb = {
1219 	.name = "hdm_usb",
1220 	.id_table = usbid,
1221 	.probe = hdm_probe,
1222 	.disconnect = hdm_disconnect,
1223 };
1224 
1225 module_usb_driver(hdm_usb);
1226 MODULE_LICENSE("GPL");
1227 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1228 MODULE_DESCRIPTION("HDM_4_USB");
1229