1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * WUSB Wire Adapter
4  * Data transfer and URB enqueing
5  *
6  * Copyright (C) 2005-2006 Intel Corporation
7  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8  *
9  * How transfers work: get a buffer, break it up in segments (segment
10  * size is a multiple of the maxpacket size). For each segment issue a
11  * segment request (struct wa_xfer_*), then send the data buffer if
12  * out or nothing if in (all over the DTO endpoint).
13  *
14  * For each submitted segment request, a notification will come over
15  * the NEP endpoint and a transfer result (struct xfer_result) will
16  * arrive in the DTI URB. Read it, get the xfer ID, see if there is
17  * data coming (inbound transfer), schedule a read and handle it.
18  *
19  * Sounds simple, it is a pain to implement.
20  *
21  *
22  * ENTRY POINTS
23  *
24  *   FIXME
25  *
26  * LIFE CYCLE / STATE DIAGRAM
27  *
28  *   FIXME
29  *
30  * THIS CODE IS DISGUSTING
31  *
32  *   Warned you are; it's my second try and still not happy with it.
33  *
34  * NOTES:
35  *
36  *   - No iso
37  *
38  *   - Supports DMA xfers, control, bulk and maybe interrupt
39  *
40  *   - Does not recycle unused rpipes
41  *
42  *     An rpipe is assigned to an endpoint the first time it is used,
43  *     and then it's there, assigned, until the endpoint is disabled
44  *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
45  *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
46  *     (should be a mutex).
47  *
48  *     Two methods it could be done:
49  *
50  *     (a) set up a timer every time an rpipe's use count drops to 1
51  *         (which means unused) or when a transfer ends. Reset the
52  *         timer when a xfer is queued. If the timer expires, release
53  *         the rpipe [see rpipe_ep_disable()].
54  *
55  *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
56  *         when none are found go over the list, check their endpoint
57  *         and their activity record (if no last-xfer-done-ts in the
58  *         last x seconds) take it
59  *
60  *     However, due to the fact that we have a set of limited
61  *     resources (max-segments-at-the-same-time per xfer,
62  *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
63  *     we are going to have to rebuild all this based on an scheduler,
64  *     to where we have a list of transactions to do and based on the
65  *     availability of the different required components (blocks,
66  *     rpipes, segment slots, etc), we go scheduling them. Painful.
67  */
68 #include <linux/spinlock.h>
69 #include <linux/slab.h>
70 #include <linux/hash.h>
71 #include <linux/ratelimit.h>
72 #include <linux/export.h>
73 #include <linux/scatterlist.h>
74 
75 #include "wa-hc.h"
76 #include "wusbhc.h"
77 
78 enum {
79 	/* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
80 	WA_SEGS_MAX = 128,
81 };
82 
83 enum wa_seg_status {
84 	WA_SEG_NOTREADY,
85 	WA_SEG_READY,
86 	WA_SEG_DELAYED,
87 	WA_SEG_SUBMITTED,
88 	WA_SEG_PENDING,
89 	WA_SEG_DTI_PENDING,
90 	WA_SEG_DONE,
91 	WA_SEG_ERROR,
92 	WA_SEG_ABORTED,
93 };
94 
95 static void wa_xfer_delayed_run(struct wa_rpipe *);
96 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
97 
98 /*
99  * Life cycle governed by 'struct urb' (the refcount of the struct is
100  * that of the 'struct urb' and usb_free_urb() would free the whole
101  * struct).
102  */
103 struct wa_seg {
104 	struct urb tr_urb;		/* transfer request urb. */
105 	struct urb *isoc_pack_desc_urb;	/* for isoc packet descriptor. */
106 	struct urb *dto_urb;		/* for data output. */
107 	struct list_head list_node;	/* for rpipe->req_list */
108 	struct wa_xfer *xfer;		/* out xfer */
109 	u8 index;			/* which segment we are */
110 	int isoc_frame_count;	/* number of isoc frames in this segment. */
111 	int isoc_frame_offset;	/* starting frame offset in the xfer URB. */
112 	/* Isoc frame that the current transfer buffer corresponds to. */
113 	int isoc_frame_index;
114 	int isoc_size;	/* size of all isoc frames sent by this seg. */
115 	enum wa_seg_status status;
116 	ssize_t result;			/* bytes xfered or error */
117 	struct wa_xfer_hdr xfer_hdr;
118 };
119 
wa_seg_init(struct wa_seg * seg)120 static inline void wa_seg_init(struct wa_seg *seg)
121 {
122 	usb_init_urb(&seg->tr_urb);
123 
124 	/* set the remaining memory to 0. */
125 	memset(((void *)seg) + sizeof(seg->tr_urb), 0,
126 		sizeof(*seg) - sizeof(seg->tr_urb));
127 }
128 
129 /*
130  * Protected by xfer->lock
131  *
132  */
133 struct wa_xfer {
134 	struct kref refcnt;
135 	struct list_head list_node;
136 	spinlock_t lock;
137 	u32 id;
138 
139 	struct wahc *wa;		/* Wire adapter we are plugged to */
140 	struct usb_host_endpoint *ep;
141 	struct urb *urb;		/* URB we are transferring for */
142 	struct wa_seg **seg;		/* transfer segments */
143 	u8 segs, segs_submitted, segs_done;
144 	unsigned is_inbound:1;
145 	unsigned is_dma:1;
146 	size_t seg_size;
147 	int result;
148 
149 	gfp_t gfp;			/* allocation mask */
150 
151 	struct wusb_dev *wusb_dev;	/* for activity timestamps */
152 };
153 
154 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
155 	struct wa_seg *seg, int curr_iso_frame);
156 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
157 		int starting_index, enum wa_seg_status status);
158 
wa_xfer_init(struct wa_xfer * xfer)159 static inline void wa_xfer_init(struct wa_xfer *xfer)
160 {
161 	kref_init(&xfer->refcnt);
162 	INIT_LIST_HEAD(&xfer->list_node);
163 	spin_lock_init(&xfer->lock);
164 }
165 
166 /*
167  * Destroy a transfer structure
168  *
169  * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
170  * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
171  */
wa_xfer_destroy(struct kref * _xfer)172 static void wa_xfer_destroy(struct kref *_xfer)
173 {
174 	struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
175 	if (xfer->seg) {
176 		unsigned cnt;
177 		for (cnt = 0; cnt < xfer->segs; cnt++) {
178 			struct wa_seg *seg = xfer->seg[cnt];
179 			if (seg) {
180 				usb_free_urb(seg->isoc_pack_desc_urb);
181 				if (seg->dto_urb) {
182 					kfree(seg->dto_urb->sg);
183 					usb_free_urb(seg->dto_urb);
184 				}
185 				usb_free_urb(&seg->tr_urb);
186 			}
187 		}
188 		kfree(xfer->seg);
189 	}
190 	kfree(xfer);
191 }
192 
wa_xfer_get(struct wa_xfer * xfer)193 static void wa_xfer_get(struct wa_xfer *xfer)
194 {
195 	kref_get(&xfer->refcnt);
196 }
197 
wa_xfer_put(struct wa_xfer * xfer)198 static void wa_xfer_put(struct wa_xfer *xfer)
199 {
200 	kref_put(&xfer->refcnt, wa_xfer_destroy);
201 }
202 
203 /*
204  * Try to get exclusive access to the DTO endpoint resource.  Return true
205  * if successful.
206  */
__wa_dto_try_get(struct wahc * wa)207 static inline int __wa_dto_try_get(struct wahc *wa)
208 {
209 	return (test_and_set_bit(0, &wa->dto_in_use) == 0);
210 }
211 
212 /* Release the DTO endpoint resource. */
__wa_dto_put(struct wahc * wa)213 static inline void __wa_dto_put(struct wahc *wa)
214 {
215 	clear_bit_unlock(0, &wa->dto_in_use);
216 }
217 
218 /* Service RPIPEs that are waiting on the DTO resource. */
wa_check_for_delayed_rpipes(struct wahc * wa)219 static void wa_check_for_delayed_rpipes(struct wahc *wa)
220 {
221 	unsigned long flags;
222 	int dto_waiting = 0;
223 	struct wa_rpipe *rpipe;
224 
225 	spin_lock_irqsave(&wa->rpipe_lock, flags);
226 	while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
227 		rpipe = list_first_entry(&wa->rpipe_delayed_list,
228 				struct wa_rpipe, list_node);
229 		__wa_xfer_delayed_run(rpipe, &dto_waiting);
230 		/* remove this RPIPE from the list if it is not waiting. */
231 		if (!dto_waiting) {
232 			pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
233 				__func__,
234 				le16_to_cpu(rpipe->descr.wRPipeIndex));
235 			list_del_init(&rpipe->list_node);
236 		}
237 	}
238 	spin_unlock_irqrestore(&wa->rpipe_lock, flags);
239 }
240 
241 /* add this RPIPE to the end of the delayed RPIPE list. */
wa_add_delayed_rpipe(struct wahc * wa,struct wa_rpipe * rpipe)242 static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
243 {
244 	unsigned long flags;
245 
246 	spin_lock_irqsave(&wa->rpipe_lock, flags);
247 	/* add rpipe to the list if it is not already on it. */
248 	if (list_empty(&rpipe->list_node)) {
249 		pr_debug("%s: adding RPIPE %d to the delayed list.\n",
250 			__func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
251 		list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
252 	}
253 	spin_unlock_irqrestore(&wa->rpipe_lock, flags);
254 }
255 
256 /*
257  * xfer is referenced
258  *
259  * xfer->lock has to be unlocked
260  *
261  * We take xfer->lock for setting the result; this is a barrier
262  * against drivers/usb/core/hcd.c:unlink1() being called after we call
263  * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
264  * reference to the transfer.
265  */
wa_xfer_giveback(struct wa_xfer * xfer)266 static void wa_xfer_giveback(struct wa_xfer *xfer)
267 {
268 	unsigned long flags;
269 
270 	spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
271 	list_del_init(&xfer->list_node);
272 	usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
273 	spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
274 	/* FIXME: segmentation broken -- kills DWA */
275 	wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
276 	wa_put(xfer->wa);
277 	wa_xfer_put(xfer);
278 }
279 
280 /*
281  * xfer is referenced
282  *
283  * xfer->lock has to be unlocked
284  */
wa_xfer_completion(struct wa_xfer * xfer)285 static void wa_xfer_completion(struct wa_xfer *xfer)
286 {
287 	if (xfer->wusb_dev)
288 		wusb_dev_put(xfer->wusb_dev);
289 	rpipe_put(xfer->ep->hcpriv);
290 	wa_xfer_giveback(xfer);
291 }
292 
293 /*
294  * Initialize a transfer's ID
295  *
296  * We need to use a sequential number; if we use the pointer or the
297  * hash of the pointer, it can repeat over sequential transfers and
298  * then it will confuse the HWA....wonder why in hell they put a 32
299  * bit handle in there then.
300  */
wa_xfer_id_init(struct wa_xfer * xfer)301 static void wa_xfer_id_init(struct wa_xfer *xfer)
302 {
303 	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
304 }
305 
306 /* Return the xfer's ID. */
wa_xfer_id(struct wa_xfer * xfer)307 static inline u32 wa_xfer_id(struct wa_xfer *xfer)
308 {
309 	return xfer->id;
310 }
311 
312 /* Return the xfer's ID in transport format (little endian). */
wa_xfer_id_le32(struct wa_xfer * xfer)313 static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
314 {
315 	return cpu_to_le32(xfer->id);
316 }
317 
318 /*
319  * If transfer is done, wrap it up and return true
320  *
321  * xfer->lock has to be locked
322  */
__wa_xfer_is_done(struct wa_xfer * xfer)323 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
324 {
325 	struct device *dev = &xfer->wa->usb_iface->dev;
326 	unsigned result, cnt;
327 	struct wa_seg *seg;
328 	struct urb *urb = xfer->urb;
329 	unsigned found_short = 0;
330 
331 	result = xfer->segs_done == xfer->segs_submitted;
332 	if (result == 0)
333 		goto out;
334 	urb->actual_length = 0;
335 	for (cnt = 0; cnt < xfer->segs; cnt++) {
336 		seg = xfer->seg[cnt];
337 		switch (seg->status) {
338 		case WA_SEG_DONE:
339 			if (found_short && seg->result > 0) {
340 				dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
341 					xfer, wa_xfer_id(xfer), cnt,
342 					seg->result);
343 				urb->status = -EINVAL;
344 				goto out;
345 			}
346 			urb->actual_length += seg->result;
347 			if (!(usb_pipeisoc(xfer->urb->pipe))
348 				&& seg->result < xfer->seg_size
349 			    && cnt != xfer->segs-1)
350 				found_short = 1;
351 			dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
352 				"result %zu urb->actual_length %d\n",
353 				xfer, wa_xfer_id(xfer), seg->index, found_short,
354 				seg->result, urb->actual_length);
355 			break;
356 		case WA_SEG_ERROR:
357 			xfer->result = seg->result;
358 			dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
359 				xfer, wa_xfer_id(xfer), seg->index, seg->result,
360 				seg->result);
361 			goto out;
362 		case WA_SEG_ABORTED:
363 			xfer->result = seg->result;
364 			dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
365 				xfer, wa_xfer_id(xfer), seg->index, seg->result,
366 				seg->result);
367 			goto out;
368 		default:
369 			dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
370 				 xfer, wa_xfer_id(xfer), cnt, seg->status);
371 			xfer->result = -EINVAL;
372 			goto out;
373 		}
374 	}
375 	xfer->result = 0;
376 out:
377 	return result;
378 }
379 
380 /*
381  * Mark the given segment as done.  Return true if this completes the xfer.
382  * This should only be called for segs that have been submitted to an RPIPE.
383  * Delayed segs are not marked as submitted so they do not need to be marked
384  * as done when cleaning up.
385  *
386  * xfer->lock has to be locked
387  */
__wa_xfer_mark_seg_as_done(struct wa_xfer * xfer,struct wa_seg * seg,enum wa_seg_status status)388 static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
389 	struct wa_seg *seg, enum wa_seg_status status)
390 {
391 	seg->status = status;
392 	xfer->segs_done++;
393 
394 	/* check for done. */
395 	return __wa_xfer_is_done(xfer);
396 }
397 
398 /*
399  * Search for a transfer list ID on the HCD's URB list
400  *
401  * For 32 bit architectures, we use the pointer itself; for 64 bits, a
402  * 32-bit hash of the pointer.
403  *
404  * @returns NULL if not found.
405  */
wa_xfer_get_by_id(struct wahc * wa,u32 id)406 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
407 {
408 	unsigned long flags;
409 	struct wa_xfer *xfer_itr;
410 	spin_lock_irqsave(&wa->xfer_list_lock, flags);
411 	list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
412 		if (id == xfer_itr->id) {
413 			wa_xfer_get(xfer_itr);
414 			goto out;
415 		}
416 	}
417 	xfer_itr = NULL;
418 out:
419 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
420 	return xfer_itr;
421 }
422 
423 struct wa_xfer_abort_buffer {
424 	struct urb urb;
425 	struct wahc *wa;
426 	struct wa_xfer_abort cmd;
427 };
428 
__wa_xfer_abort_cb(struct urb * urb)429 static void __wa_xfer_abort_cb(struct urb *urb)
430 {
431 	struct wa_xfer_abort_buffer *b = urb->context;
432 	struct wahc *wa = b->wa;
433 
434 	/*
435 	 * If the abort request URB failed, then the HWA did not get the abort
436 	 * command.  Forcibly clean up the xfer without waiting for a Transfer
437 	 * Result from the HWA.
438 	 */
439 	if (urb->status < 0) {
440 		struct wa_xfer *xfer;
441 		struct device *dev = &wa->usb_iface->dev;
442 
443 		xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
444 		dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
445 			__func__, urb->status);
446 		if (xfer) {
447 			unsigned long flags;
448 			int done, seg_index = 0;
449 			struct wa_rpipe *rpipe = xfer->ep->hcpriv;
450 
451 			dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
452 				__func__, xfer, wa_xfer_id(xfer));
453 			spin_lock_irqsave(&xfer->lock, flags);
454 			/* skip done segs. */
455 			while (seg_index < xfer->segs) {
456 				struct wa_seg *seg = xfer->seg[seg_index];
457 
458 				if ((seg->status == WA_SEG_DONE) ||
459 					(seg->status == WA_SEG_ERROR)) {
460 					++seg_index;
461 				} else {
462 					break;
463 				}
464 			}
465 			/* mark remaining segs as aborted. */
466 			wa_complete_remaining_xfer_segs(xfer, seg_index,
467 				WA_SEG_ABORTED);
468 			done = __wa_xfer_is_done(xfer);
469 			spin_unlock_irqrestore(&xfer->lock, flags);
470 			if (done)
471 				wa_xfer_completion(xfer);
472 			wa_xfer_delayed_run(rpipe);
473 			wa_xfer_put(xfer);
474 		} else {
475 			dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
476 				 __func__, le32_to_cpu(b->cmd.dwTransferID));
477 		}
478 	}
479 
480 	wa_put(wa);	/* taken in __wa_xfer_abort */
481 	usb_put_urb(&b->urb);
482 }
483 
484 /*
485  * Aborts an ongoing transaction
486  *
487  * Assumes the transfer is referenced and locked and in a submitted
488  * state (mainly that there is an endpoint/rpipe assigned).
489  *
490  * The callback (see above) does nothing but freeing up the data by
491  * putting the URB. Because the URB is allocated at the head of the
492  * struct, the whole space we allocated is kfreed. *
493  */
__wa_xfer_abort(struct wa_xfer * xfer)494 static int __wa_xfer_abort(struct wa_xfer *xfer)
495 {
496 	int result = -ENOMEM;
497 	struct device *dev = &xfer->wa->usb_iface->dev;
498 	struct wa_xfer_abort_buffer *b;
499 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
500 
501 	b = kmalloc(sizeof(*b), GFP_ATOMIC);
502 	if (b == NULL)
503 		goto error_kmalloc;
504 	b->cmd.bLength =  sizeof(b->cmd);
505 	b->cmd.bRequestType = WA_XFER_ABORT;
506 	b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
507 	b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
508 	b->wa = wa_get(xfer->wa);
509 
510 	usb_init_urb(&b->urb);
511 	usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
512 		usb_sndbulkpipe(xfer->wa->usb_dev,
513 				xfer->wa->dto_epd->bEndpointAddress),
514 		&b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
515 	result = usb_submit_urb(&b->urb, GFP_ATOMIC);
516 	if (result < 0)
517 		goto error_submit;
518 	return result;				/* callback frees! */
519 
520 
521 error_submit:
522 	wa_put(xfer->wa);
523 	if (printk_ratelimit())
524 		dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
525 			xfer, result);
526 	kfree(b);
527 error_kmalloc:
528 	return result;
529 
530 }
531 
532 /*
533  * Calculate the number of isoc frames starting from isoc_frame_offset
534  * that will fit a in transfer segment.
535  */
__wa_seg_calculate_isoc_frame_count(struct wa_xfer * xfer,int isoc_frame_offset,int * total_size)536 static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
537 	int isoc_frame_offset, int *total_size)
538 {
539 	int segment_size = 0, frame_count = 0;
540 	int index = isoc_frame_offset;
541 	struct usb_iso_packet_descriptor *iso_frame_desc =
542 		xfer->urb->iso_frame_desc;
543 
544 	while ((index < xfer->urb->number_of_packets)
545 		&& ((segment_size + iso_frame_desc[index].length)
546 				<= xfer->seg_size)) {
547 		/*
548 		 * For Alereon HWA devices, only include an isoc frame in an
549 		 * out segment if it is physically contiguous with the previous
550 		 * frame.  This is required because those devices expect
551 		 * the isoc frames to be sent as a single USB transaction as
552 		 * opposed to one transaction per frame with standard HWA.
553 		 */
554 		if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
555 			&& (xfer->is_inbound == 0)
556 			&& (index > isoc_frame_offset)
557 			&& ((iso_frame_desc[index - 1].offset +
558 				iso_frame_desc[index - 1].length) !=
559 				iso_frame_desc[index].offset))
560 			break;
561 
562 		/* this frame fits. count it. */
563 		++frame_count;
564 		segment_size += iso_frame_desc[index].length;
565 
566 		/* move to the next isoc frame. */
567 		++index;
568 	}
569 
570 	*total_size = segment_size;
571 	return frame_count;
572 }
573 
574 /*
575  *
576  * @returns < 0 on error, transfer segment request size if ok
577  */
__wa_xfer_setup_sizes(struct wa_xfer * xfer,enum wa_xfer_type * pxfer_type)578 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
579 				     enum wa_xfer_type *pxfer_type)
580 {
581 	ssize_t result;
582 	struct device *dev = &xfer->wa->usb_iface->dev;
583 	size_t maxpktsize;
584 	struct urb *urb = xfer->urb;
585 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
586 
587 	switch (rpipe->descr.bmAttribute & 0x3) {
588 	case USB_ENDPOINT_XFER_CONTROL:
589 		*pxfer_type = WA_XFER_TYPE_CTL;
590 		result = sizeof(struct wa_xfer_ctl);
591 		break;
592 	case USB_ENDPOINT_XFER_INT:
593 	case USB_ENDPOINT_XFER_BULK:
594 		*pxfer_type = WA_XFER_TYPE_BI;
595 		result = sizeof(struct wa_xfer_bi);
596 		break;
597 	case USB_ENDPOINT_XFER_ISOC:
598 		*pxfer_type = WA_XFER_TYPE_ISO;
599 		result = sizeof(struct wa_xfer_hwaiso);
600 		break;
601 	default:
602 		/* never happens */
603 		BUG();
604 		result = -EINVAL;	/* shut gcc up */
605 	}
606 	xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
607 	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
608 
609 	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
610 	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
611 		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
612 	/* Compute the segment size and make sure it is a multiple of
613 	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
614 	 * a check (FIXME) */
615 	if (xfer->seg_size < maxpktsize) {
616 		dev_err(dev,
617 			"HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
618 			xfer->seg_size, maxpktsize);
619 		result = -EINVAL;
620 		goto error;
621 	}
622 	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
623 	if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
624 		int index = 0;
625 
626 		xfer->segs = 0;
627 		/*
628 		 * loop over urb->number_of_packets to determine how many
629 		 * xfer segments will be needed to send the isoc frames.
630 		 */
631 		while (index < urb->number_of_packets) {
632 			int seg_size; /* don't care. */
633 			index += __wa_seg_calculate_isoc_frame_count(xfer,
634 					index, &seg_size);
635 			++xfer->segs;
636 		}
637 	} else {
638 		xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
639 						xfer->seg_size);
640 		if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
641 			xfer->segs = 1;
642 	}
643 
644 	if (xfer->segs > WA_SEGS_MAX) {
645 		dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
646 			(urb->transfer_buffer_length/xfer->seg_size),
647 			WA_SEGS_MAX);
648 		result = -EINVAL;
649 		goto error;
650 	}
651 error:
652 	return result;
653 }
654 
__wa_setup_isoc_packet_descr(struct wa_xfer_packet_info_hwaiso * packet_desc,struct wa_xfer * xfer,struct wa_seg * seg)655 static void __wa_setup_isoc_packet_descr(
656 		struct wa_xfer_packet_info_hwaiso *packet_desc,
657 		struct wa_xfer *xfer,
658 		struct wa_seg *seg) {
659 	struct usb_iso_packet_descriptor *iso_frame_desc =
660 		xfer->urb->iso_frame_desc;
661 	int frame_index;
662 
663 	/* populate isoc packet descriptor. */
664 	packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
665 	packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
666 		(sizeof(packet_desc->PacketLength[0]) *
667 			seg->isoc_frame_count));
668 	for (frame_index = 0; frame_index < seg->isoc_frame_count;
669 		++frame_index) {
670 		int offset_index = frame_index + seg->isoc_frame_offset;
671 		packet_desc->PacketLength[frame_index] =
672 			cpu_to_le16(iso_frame_desc[offset_index].length);
673 	}
674 }
675 
676 
677 /* Fill in the common request header and xfer-type specific data. */
__wa_xfer_setup_hdr0(struct wa_xfer * xfer,struct wa_xfer_hdr * xfer_hdr0,enum wa_xfer_type xfer_type,size_t xfer_hdr_size)678 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
679 				 struct wa_xfer_hdr *xfer_hdr0,
680 				 enum wa_xfer_type xfer_type,
681 				 size_t xfer_hdr_size)
682 {
683 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
684 	struct wa_seg *seg = xfer->seg[0];
685 
686 	xfer_hdr0 = &seg->xfer_hdr;
687 	xfer_hdr0->bLength = xfer_hdr_size;
688 	xfer_hdr0->bRequestType = xfer_type;
689 	xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
690 	xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
691 	xfer_hdr0->bTransferSegment = 0;
692 	switch (xfer_type) {
693 	case WA_XFER_TYPE_CTL: {
694 		struct wa_xfer_ctl *xfer_ctl =
695 			container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
696 		xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
697 		memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
698 		       sizeof(xfer_ctl->baSetupData));
699 		break;
700 	}
701 	case WA_XFER_TYPE_BI:
702 		break;
703 	case WA_XFER_TYPE_ISO: {
704 		struct wa_xfer_hwaiso *xfer_iso =
705 			container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
706 		struct wa_xfer_packet_info_hwaiso *packet_desc =
707 			((void *)xfer_iso) + xfer_hdr_size;
708 
709 		/* populate the isoc section of the transfer request. */
710 		xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
711 		/* populate isoc packet descriptor. */
712 		__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
713 		break;
714 	}
715 	default:
716 		BUG();
717 	};
718 }
719 
720 /*
721  * Callback for the OUT data phase of the segment request
722  *
723  * Check wa_seg_tr_cb(); most comments also apply here because this
724  * function does almost the same thing and they work closely
725  * together.
726  *
727  * If the seg request has failed but this DTO phase has succeeded,
728  * wa_seg_tr_cb() has already failed the segment and moved the
729  * status to WA_SEG_ERROR, so this will go through 'case 0' and
730  * effectively do nothing.
731  */
wa_seg_dto_cb(struct urb * urb)732 static void wa_seg_dto_cb(struct urb *urb)
733 {
734 	struct wa_seg *seg = urb->context;
735 	struct wa_xfer *xfer = seg->xfer;
736 	struct wahc *wa;
737 	struct device *dev;
738 	struct wa_rpipe *rpipe;
739 	unsigned long flags;
740 	unsigned rpipe_ready = 0;
741 	int data_send_done = 1, release_dto = 0, holding_dto = 0;
742 	u8 done = 0;
743 	int result;
744 
745 	/* free the sg if it was used. */
746 	kfree(urb->sg);
747 	urb->sg = NULL;
748 
749 	spin_lock_irqsave(&xfer->lock, flags);
750 	wa = xfer->wa;
751 	dev = &wa->usb_iface->dev;
752 	if (usb_pipeisoc(xfer->urb->pipe)) {
753 		/* Alereon HWA sends all isoc frames in a single transfer. */
754 		if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
755 			seg->isoc_frame_index += seg->isoc_frame_count;
756 		else
757 			seg->isoc_frame_index += 1;
758 		if (seg->isoc_frame_index < seg->isoc_frame_count) {
759 			data_send_done = 0;
760 			holding_dto = 1; /* checked in error cases. */
761 			/*
762 			 * if this is the last isoc frame of the segment, we
763 			 * can release DTO after sending this frame.
764 			 */
765 			if ((seg->isoc_frame_index + 1) >=
766 				seg->isoc_frame_count)
767 				release_dto = 1;
768 		}
769 		dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
770 			wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
771 			holding_dto, release_dto);
772 	}
773 	spin_unlock_irqrestore(&xfer->lock, flags);
774 
775 	switch (urb->status) {
776 	case 0:
777 		spin_lock_irqsave(&xfer->lock, flags);
778 		seg->result += urb->actual_length;
779 		if (data_send_done) {
780 			dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
781 				wa_xfer_id(xfer), seg->index, seg->result);
782 			if (seg->status < WA_SEG_PENDING)
783 				seg->status = WA_SEG_PENDING;
784 		} else {
785 			/* should only hit this for isoc xfers. */
786 			/*
787 			 * Populate the dto URB with the next isoc frame buffer,
788 			 * send the URB and release DTO if we no longer need it.
789 			 */
790 			 __wa_populate_dto_urb_isoc(xfer, seg,
791 				seg->isoc_frame_offset + seg->isoc_frame_index);
792 
793 			/* resubmit the URB with the next isoc frame. */
794 			/* take a ref on resubmit. */
795 			wa_xfer_get(xfer);
796 			result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
797 			if (result < 0) {
798 				dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
799 				       wa_xfer_id(xfer), seg->index, result);
800 				spin_unlock_irqrestore(&xfer->lock, flags);
801 				goto error_dto_submit;
802 			}
803 		}
804 		spin_unlock_irqrestore(&xfer->lock, flags);
805 		if (release_dto) {
806 			__wa_dto_put(wa);
807 			wa_check_for_delayed_rpipes(wa);
808 		}
809 		break;
810 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
811 	case -ENOENT:		/* as it was done by the who unlinked us */
812 		if (holding_dto) {
813 			__wa_dto_put(wa);
814 			wa_check_for_delayed_rpipes(wa);
815 		}
816 		break;
817 	default:		/* Other errors ... */
818 		dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
819 			wa_xfer_id(xfer), seg->index, urb->status);
820 		goto error_default;
821 	}
822 
823 	/* taken when this URB was submitted. */
824 	wa_xfer_put(xfer);
825 	return;
826 
827 error_dto_submit:
828 	/* taken on resubmit attempt. */
829 	wa_xfer_put(xfer);
830 error_default:
831 	spin_lock_irqsave(&xfer->lock, flags);
832 	rpipe = xfer->ep->hcpriv;
833 	if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
834 		    EDC_ERROR_TIMEFRAME)){
835 		dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
836 		wa_reset_all(wa);
837 	}
838 	if (seg->status != WA_SEG_ERROR) {
839 		seg->result = urb->status;
840 		__wa_xfer_abort(xfer);
841 		rpipe_ready = rpipe_avail_inc(rpipe);
842 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
843 	}
844 	spin_unlock_irqrestore(&xfer->lock, flags);
845 	if (holding_dto) {
846 		__wa_dto_put(wa);
847 		wa_check_for_delayed_rpipes(wa);
848 	}
849 	if (done)
850 		wa_xfer_completion(xfer);
851 	if (rpipe_ready)
852 		wa_xfer_delayed_run(rpipe);
853 	/* taken when this URB was submitted. */
854 	wa_xfer_put(xfer);
855 }
856 
857 /*
858  * Callback for the isoc packet descriptor phase of the segment request
859  *
860  * Check wa_seg_tr_cb(); most comments also apply here because this
861  * function does almost the same thing and they work closely
862  * together.
863  *
864  * If the seg request has failed but this phase has succeeded,
865  * wa_seg_tr_cb() has already failed the segment and moved the
866  * status to WA_SEG_ERROR, so this will go through 'case 0' and
867  * effectively do nothing.
868  */
wa_seg_iso_pack_desc_cb(struct urb * urb)869 static void wa_seg_iso_pack_desc_cb(struct urb *urb)
870 {
871 	struct wa_seg *seg = urb->context;
872 	struct wa_xfer *xfer = seg->xfer;
873 	struct wahc *wa;
874 	struct device *dev;
875 	struct wa_rpipe *rpipe;
876 	unsigned long flags;
877 	unsigned rpipe_ready = 0;
878 	u8 done = 0;
879 
880 	switch (urb->status) {
881 	case 0:
882 		spin_lock_irqsave(&xfer->lock, flags);
883 		wa = xfer->wa;
884 		dev = &wa->usb_iface->dev;
885 		dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
886 			wa_xfer_id(xfer), seg->index);
887 		if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
888 			seg->status = WA_SEG_PENDING;
889 		spin_unlock_irqrestore(&xfer->lock, flags);
890 		break;
891 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
892 	case -ENOENT:		/* as it was done by the who unlinked us */
893 		break;
894 	default:		/* Other errors ... */
895 		spin_lock_irqsave(&xfer->lock, flags);
896 		wa = xfer->wa;
897 		dev = &wa->usb_iface->dev;
898 		rpipe = xfer->ep->hcpriv;
899 		pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
900 				wa_xfer_id(xfer), seg->index, urb->status);
901 		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
902 			    EDC_ERROR_TIMEFRAME)){
903 			dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
904 			wa_reset_all(wa);
905 		}
906 		if (seg->status != WA_SEG_ERROR) {
907 			usb_unlink_urb(seg->dto_urb);
908 			seg->result = urb->status;
909 			__wa_xfer_abort(xfer);
910 			rpipe_ready = rpipe_avail_inc(rpipe);
911 			done = __wa_xfer_mark_seg_as_done(xfer, seg,
912 					WA_SEG_ERROR);
913 		}
914 		spin_unlock_irqrestore(&xfer->lock, flags);
915 		if (done)
916 			wa_xfer_completion(xfer);
917 		if (rpipe_ready)
918 			wa_xfer_delayed_run(rpipe);
919 	}
920 	/* taken when this URB was submitted. */
921 	wa_xfer_put(xfer);
922 }
923 
924 /*
925  * Callback for the segment request
926  *
927  * If successful transition state (unless already transitioned or
928  * outbound transfer); otherwise, take a note of the error, mark this
929  * segment done and try completion.
930  *
931  * Note we don't access until we are sure that the transfer hasn't
932  * been cancelled (ECONNRESET, ENOENT), which could mean that
933  * seg->xfer could be already gone.
934  *
935  * We have to check before setting the status to WA_SEG_PENDING
936  * because sometimes the xfer result callback arrives before this
937  * callback (geeeeeeze), so it might happen that we are already in
938  * another state. As well, we don't set it if the transfer is not inbound,
939  * as in that case, wa_seg_dto_cb will do it when the OUT data phase
940  * finishes.
941  */
wa_seg_tr_cb(struct urb * urb)942 static void wa_seg_tr_cb(struct urb *urb)
943 {
944 	struct wa_seg *seg = urb->context;
945 	struct wa_xfer *xfer = seg->xfer;
946 	struct wahc *wa;
947 	struct device *dev;
948 	struct wa_rpipe *rpipe;
949 	unsigned long flags;
950 	unsigned rpipe_ready;
951 	u8 done = 0;
952 
953 	switch (urb->status) {
954 	case 0:
955 		spin_lock_irqsave(&xfer->lock, flags);
956 		wa = xfer->wa;
957 		dev = &wa->usb_iface->dev;
958 		dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
959 			xfer, wa_xfer_id(xfer), seg->index);
960 		if (xfer->is_inbound &&
961 			seg->status < WA_SEG_PENDING &&
962 			!(usb_pipeisoc(xfer->urb->pipe)))
963 			seg->status = WA_SEG_PENDING;
964 		spin_unlock_irqrestore(&xfer->lock, flags);
965 		break;
966 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
967 	case -ENOENT:		/* as it was done by the who unlinked us */
968 		break;
969 	default:		/* Other errors ... */
970 		spin_lock_irqsave(&xfer->lock, flags);
971 		wa = xfer->wa;
972 		dev = &wa->usb_iface->dev;
973 		rpipe = xfer->ep->hcpriv;
974 		if (printk_ratelimit())
975 			dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
976 				xfer, wa_xfer_id(xfer), seg->index,
977 				urb->status);
978 		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
979 			    EDC_ERROR_TIMEFRAME)){
980 			dev_err(dev, "DTO: URB max acceptable errors "
981 				"exceeded, resetting device\n");
982 			wa_reset_all(wa);
983 		}
984 		usb_unlink_urb(seg->isoc_pack_desc_urb);
985 		usb_unlink_urb(seg->dto_urb);
986 		seg->result = urb->status;
987 		__wa_xfer_abort(xfer);
988 		rpipe_ready = rpipe_avail_inc(rpipe);
989 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
990 		spin_unlock_irqrestore(&xfer->lock, flags);
991 		if (done)
992 			wa_xfer_completion(xfer);
993 		if (rpipe_ready)
994 			wa_xfer_delayed_run(rpipe);
995 	}
996 	/* taken when this URB was submitted. */
997 	wa_xfer_put(xfer);
998 }
999 
1000 /*
1001  * Allocate an SG list to store bytes_to_transfer bytes and copy the
1002  * subset of the in_sg that matches the buffer subset
1003  * we are about to transfer.
1004  */
wa_xfer_create_subset_sg(struct scatterlist * in_sg,const unsigned int bytes_transferred,const unsigned int bytes_to_transfer,int * out_num_sgs)1005 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1006 	const unsigned int bytes_transferred,
1007 	const unsigned int bytes_to_transfer, int *out_num_sgs)
1008 {
1009 	struct scatterlist *out_sg;
1010 	unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1011 		nents;
1012 	struct scatterlist *current_xfer_sg = in_sg;
1013 	struct scatterlist *current_seg_sg, *last_seg_sg;
1014 
1015 	/* skip previously transferred pages. */
1016 	while ((current_xfer_sg) &&
1017 			(bytes_processed < bytes_transferred)) {
1018 		bytes_processed += current_xfer_sg->length;
1019 
1020 		/* advance the sg if current segment starts on or past the
1021 			next page. */
1022 		if (bytes_processed <= bytes_transferred)
1023 			current_xfer_sg = sg_next(current_xfer_sg);
1024 	}
1025 
1026 	/* the data for the current segment starts in current_xfer_sg.
1027 		calculate the offset. */
1028 	if (bytes_processed > bytes_transferred) {
1029 		offset_into_current_page_data = current_xfer_sg->length -
1030 			(bytes_processed - bytes_transferred);
1031 	}
1032 
1033 	/* calculate the number of pages needed by this segment. */
1034 	nents = DIV_ROUND_UP((bytes_to_transfer +
1035 		offset_into_current_page_data +
1036 		current_xfer_sg->offset),
1037 		PAGE_SIZE);
1038 
1039 	out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1040 	if (out_sg) {
1041 		sg_init_table(out_sg, nents);
1042 
1043 		/* copy the portion of the incoming SG that correlates to the
1044 		 * data to be transferred by this segment to the segment SG. */
1045 		last_seg_sg = current_seg_sg = out_sg;
1046 		bytes_processed = 0;
1047 
1048 		/* reset nents and calculate the actual number of sg entries
1049 			needed. */
1050 		nents = 0;
1051 		while ((bytes_processed < bytes_to_transfer) &&
1052 				current_seg_sg && current_xfer_sg) {
1053 			unsigned int page_len = min((current_xfer_sg->length -
1054 				offset_into_current_page_data),
1055 				(bytes_to_transfer - bytes_processed));
1056 
1057 			sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1058 				page_len,
1059 				current_xfer_sg->offset +
1060 				offset_into_current_page_data);
1061 
1062 			bytes_processed += page_len;
1063 
1064 			last_seg_sg = current_seg_sg;
1065 			current_seg_sg = sg_next(current_seg_sg);
1066 			current_xfer_sg = sg_next(current_xfer_sg);
1067 
1068 			/* only the first page may require additional offset. */
1069 			offset_into_current_page_data = 0;
1070 			nents++;
1071 		}
1072 
1073 		/* update num_sgs and terminate the list since we may have
1074 		 *  concatenated pages. */
1075 		sg_mark_end(last_seg_sg);
1076 		*out_num_sgs = nents;
1077 	}
1078 
1079 	return out_sg;
1080 }
1081 
1082 /*
1083  * Populate DMA buffer info for the isoc dto urb.
1084  */
__wa_populate_dto_urb_isoc(struct wa_xfer * xfer,struct wa_seg * seg,int curr_iso_frame)1085 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1086 	struct wa_seg *seg, int curr_iso_frame)
1087 {
1088 	seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1089 	seg->dto_urb->sg = NULL;
1090 	seg->dto_urb->num_sgs = 0;
1091 	/* dto urb buffer address pulled from iso_frame_desc. */
1092 	seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1093 		xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1094 	/* The Alereon HWA sends a single URB with all isoc segs. */
1095 	if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1096 		seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1097 	else
1098 		seg->dto_urb->transfer_buffer_length =
1099 			xfer->urb->iso_frame_desc[curr_iso_frame].length;
1100 }
1101 
1102 /*
1103  * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1104  */
__wa_populate_dto_urb(struct wa_xfer * xfer,struct wa_seg * seg,size_t buf_itr_offset,size_t buf_itr_size)1105 static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1106 	struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1107 {
1108 	int result = 0;
1109 
1110 	if (xfer->is_dma) {
1111 		seg->dto_urb->transfer_dma =
1112 			xfer->urb->transfer_dma + buf_itr_offset;
1113 		seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1114 		seg->dto_urb->sg = NULL;
1115 		seg->dto_urb->num_sgs = 0;
1116 	} else {
1117 		/* do buffer or SG processing. */
1118 		seg->dto_urb->transfer_flags &=
1119 			~URB_NO_TRANSFER_DMA_MAP;
1120 		/* this should always be 0 before a resubmit. */
1121 		seg->dto_urb->num_mapped_sgs = 0;
1122 
1123 		if (xfer->urb->transfer_buffer) {
1124 			seg->dto_urb->transfer_buffer =
1125 				xfer->urb->transfer_buffer +
1126 				buf_itr_offset;
1127 			seg->dto_urb->sg = NULL;
1128 			seg->dto_urb->num_sgs = 0;
1129 		} else {
1130 			seg->dto_urb->transfer_buffer = NULL;
1131 
1132 			/*
1133 			 * allocate an SG list to store seg_size bytes
1134 			 * and copy the subset of the xfer->urb->sg that
1135 			 * matches the buffer subset we are about to
1136 			 * read.
1137 			 */
1138 			seg->dto_urb->sg = wa_xfer_create_subset_sg(
1139 				xfer->urb->sg,
1140 				buf_itr_offset, buf_itr_size,
1141 				&(seg->dto_urb->num_sgs));
1142 			if (!(seg->dto_urb->sg))
1143 				result = -ENOMEM;
1144 		}
1145 	}
1146 	seg->dto_urb->transfer_buffer_length = buf_itr_size;
1147 
1148 	return result;
1149 }
1150 
1151 /*
1152  * Allocate the segs array and initialize each of them
1153  *
1154  * The segments are freed by wa_xfer_destroy() when the xfer use count
1155  * drops to zero; however, because each segment is given the same life
1156  * cycle as the USB URB it contains, it is actually freed by
1157  * usb_put_urb() on the contained USB URB (twisted, eh?).
1158  */
__wa_xfer_setup_segs(struct wa_xfer * xfer,size_t xfer_hdr_size)1159 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1160 {
1161 	int result, cnt, isoc_frame_offset = 0;
1162 	size_t alloc_size = sizeof(*xfer->seg[0])
1163 		- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1164 	struct usb_device *usb_dev = xfer->wa->usb_dev;
1165 	const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1166 	struct wa_seg *seg;
1167 	size_t buf_itr, buf_size, buf_itr_size;
1168 
1169 	result = -ENOMEM;
1170 	xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1171 	if (xfer->seg == NULL)
1172 		goto error_segs_kzalloc;
1173 	buf_itr = 0;
1174 	buf_size = xfer->urb->transfer_buffer_length;
1175 	for (cnt = 0; cnt < xfer->segs; cnt++) {
1176 		size_t iso_pkt_descr_size = 0;
1177 		int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1178 
1179 		/*
1180 		 * Adjust the size of the segment object to contain space for
1181 		 * the isoc packet descriptor buffer.
1182 		 */
1183 		if (usb_pipeisoc(xfer->urb->pipe)) {
1184 			seg_isoc_frame_count =
1185 				__wa_seg_calculate_isoc_frame_count(xfer,
1186 					isoc_frame_offset, &seg_isoc_size);
1187 
1188 			iso_pkt_descr_size =
1189 				sizeof(struct wa_xfer_packet_info_hwaiso) +
1190 				(seg_isoc_frame_count * sizeof(__le16));
1191 		}
1192 		result = -ENOMEM;
1193 		seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1194 						GFP_ATOMIC);
1195 		if (seg == NULL)
1196 			goto error_seg_kmalloc;
1197 		wa_seg_init(seg);
1198 		seg->xfer = xfer;
1199 		seg->index = cnt;
1200 		usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1201 				  usb_sndbulkpipe(usb_dev,
1202 						  dto_epd->bEndpointAddress),
1203 				  &seg->xfer_hdr, xfer_hdr_size,
1204 				  wa_seg_tr_cb, seg);
1205 		buf_itr_size = min(buf_size, xfer->seg_size);
1206 
1207 		if (usb_pipeisoc(xfer->urb->pipe)) {
1208 			seg->isoc_frame_count = seg_isoc_frame_count;
1209 			seg->isoc_frame_offset = isoc_frame_offset;
1210 			seg->isoc_size = seg_isoc_size;
1211 			/* iso packet descriptor. */
1212 			seg->isoc_pack_desc_urb =
1213 					usb_alloc_urb(0, GFP_ATOMIC);
1214 			if (seg->isoc_pack_desc_urb == NULL)
1215 				goto error_iso_pack_desc_alloc;
1216 			/*
1217 			 * The buffer for the isoc packet descriptor starts
1218 			 * after the transfer request header in the
1219 			 * segment object memory buffer.
1220 			 */
1221 			usb_fill_bulk_urb(
1222 				seg->isoc_pack_desc_urb, usb_dev,
1223 				usb_sndbulkpipe(usb_dev,
1224 					dto_epd->bEndpointAddress),
1225 				(void *)(&seg->xfer_hdr) +
1226 					xfer_hdr_size,
1227 				iso_pkt_descr_size,
1228 				wa_seg_iso_pack_desc_cb, seg);
1229 
1230 			/* adjust starting frame offset for next seg. */
1231 			isoc_frame_offset += seg_isoc_frame_count;
1232 		}
1233 
1234 		if (xfer->is_inbound == 0 && buf_size > 0) {
1235 			/* outbound data. */
1236 			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1237 			if (seg->dto_urb == NULL)
1238 				goto error_dto_alloc;
1239 			usb_fill_bulk_urb(
1240 				seg->dto_urb, usb_dev,
1241 				usb_sndbulkpipe(usb_dev,
1242 						dto_epd->bEndpointAddress),
1243 				NULL, 0, wa_seg_dto_cb, seg);
1244 
1245 			if (usb_pipeisoc(xfer->urb->pipe)) {
1246 				/*
1247 				 * Fill in the xfer buffer information for the
1248 				 * first isoc frame.  Subsequent frames in this
1249 				 * segment will be filled in and sent from the
1250 				 * DTO completion routine, if needed.
1251 				 */
1252 				__wa_populate_dto_urb_isoc(xfer, seg,
1253 					seg->isoc_frame_offset);
1254 			} else {
1255 				/* fill in the xfer buffer information. */
1256 				result = __wa_populate_dto_urb(xfer, seg,
1257 							buf_itr, buf_itr_size);
1258 				if (result < 0)
1259 					goto error_seg_outbound_populate;
1260 
1261 				buf_itr += buf_itr_size;
1262 				buf_size -= buf_itr_size;
1263 			}
1264 		}
1265 		seg->status = WA_SEG_READY;
1266 	}
1267 	return 0;
1268 
1269 	/*
1270 	 * Free the memory for the current segment which failed to init.
1271 	 * Use the fact that cnt is left at were it failed.  The remaining
1272 	 * segments will be cleaned up by wa_xfer_destroy.
1273 	 */
1274 error_seg_outbound_populate:
1275 	usb_free_urb(xfer->seg[cnt]->dto_urb);
1276 error_dto_alloc:
1277 	usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1278 error_iso_pack_desc_alloc:
1279 	kfree(xfer->seg[cnt]);
1280 	xfer->seg[cnt] = NULL;
1281 error_seg_kmalloc:
1282 error_segs_kzalloc:
1283 	return result;
1284 }
1285 
1286 /*
1287  * Allocates all the stuff needed to submit a transfer
1288  *
1289  * Breaks the whole data buffer in a list of segments, each one has a
1290  * structure allocated to it and linked in xfer->seg[index]
1291  *
1292  * FIXME: merge setup_segs() and the last part of this function, no
1293  *        need to do two for loops when we could run everything in a
1294  *        single one
1295  */
__wa_xfer_setup(struct wa_xfer * xfer,struct urb * urb)1296 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1297 {
1298 	int result;
1299 	struct device *dev = &xfer->wa->usb_iface->dev;
1300 	enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1301 	size_t xfer_hdr_size, cnt, transfer_size;
1302 	struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1303 
1304 	result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1305 	if (result < 0)
1306 		goto error_setup_sizes;
1307 	xfer_hdr_size = result;
1308 	result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1309 	if (result < 0) {
1310 		dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1311 			xfer, xfer->segs, result);
1312 		goto error_setup_segs;
1313 	}
1314 	/* Fill the first header */
1315 	xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1316 	wa_xfer_id_init(xfer);
1317 	__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1318 
1319 	/* Fill remaining headers */
1320 	xfer_hdr = xfer_hdr0;
1321 	if (xfer_type == WA_XFER_TYPE_ISO) {
1322 		xfer_hdr0->dwTransferLength =
1323 			cpu_to_le32(xfer->seg[0]->isoc_size);
1324 		for (cnt = 1; cnt < xfer->segs; cnt++) {
1325 			struct wa_xfer_packet_info_hwaiso *packet_desc;
1326 			struct wa_seg *seg = xfer->seg[cnt];
1327 			struct wa_xfer_hwaiso *xfer_iso;
1328 
1329 			xfer_hdr = &seg->xfer_hdr;
1330 			xfer_iso = container_of(xfer_hdr,
1331 						struct wa_xfer_hwaiso, hdr);
1332 			packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1333 			/*
1334 			 * Copy values from the 0th header. Segment specific
1335 			 * values are set below.
1336 			 */
1337 			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1338 			xfer_hdr->bTransferSegment = cnt;
1339 			xfer_hdr->dwTransferLength =
1340 				cpu_to_le32(seg->isoc_size);
1341 			xfer_iso->dwNumOfPackets =
1342 					cpu_to_le32(seg->isoc_frame_count);
1343 			__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1344 			seg->status = WA_SEG_READY;
1345 		}
1346 	} else {
1347 		transfer_size = urb->transfer_buffer_length;
1348 		xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1349 			cpu_to_le32(xfer->seg_size) :
1350 			cpu_to_le32(transfer_size);
1351 		transfer_size -=  xfer->seg_size;
1352 		for (cnt = 1; cnt < xfer->segs; cnt++) {
1353 			xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1354 			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1355 			xfer_hdr->bTransferSegment = cnt;
1356 			xfer_hdr->dwTransferLength =
1357 				transfer_size > xfer->seg_size ?
1358 					cpu_to_le32(xfer->seg_size)
1359 					: cpu_to_le32(transfer_size);
1360 			xfer->seg[cnt]->status = WA_SEG_READY;
1361 			transfer_size -=  xfer->seg_size;
1362 		}
1363 	}
1364 	xfer_hdr->bTransferSegment |= 0x80;	/* this is the last segment */
1365 	result = 0;
1366 error_setup_segs:
1367 error_setup_sizes:
1368 	return result;
1369 }
1370 
1371 /*
1372  *
1373  *
1374  * rpipe->seg_lock is held!
1375  */
__wa_seg_submit(struct wa_rpipe * rpipe,struct wa_xfer * xfer,struct wa_seg * seg,int * dto_done)1376 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1377 			   struct wa_seg *seg, int *dto_done)
1378 {
1379 	int result;
1380 
1381 	/* default to done unless we encounter a multi-frame isoc segment. */
1382 	*dto_done = 1;
1383 
1384 	/*
1385 	 * Take a ref for each segment urb so the xfer cannot disappear until
1386 	 * all of the callbacks run.
1387 	 */
1388 	wa_xfer_get(xfer);
1389 	/* submit the transfer request. */
1390 	seg->status = WA_SEG_SUBMITTED;
1391 	result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1392 	if (result < 0) {
1393 		pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1394 		       __func__, xfer, seg->index, result);
1395 		wa_xfer_put(xfer);
1396 		goto error_tr_submit;
1397 	}
1398 	/* submit the isoc packet descriptor if present. */
1399 	if (seg->isoc_pack_desc_urb) {
1400 		wa_xfer_get(xfer);
1401 		result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1402 		seg->isoc_frame_index = 0;
1403 		if (result < 0) {
1404 			pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1405 			       __func__, xfer, seg->index, result);
1406 			wa_xfer_put(xfer);
1407 			goto error_iso_pack_desc_submit;
1408 		}
1409 	}
1410 	/* submit the out data if this is an out request. */
1411 	if (seg->dto_urb) {
1412 		struct wahc *wa = xfer->wa;
1413 		wa_xfer_get(xfer);
1414 		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1415 		if (result < 0) {
1416 			pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1417 			       __func__, xfer, seg->index, result);
1418 			wa_xfer_put(xfer);
1419 			goto error_dto_submit;
1420 		}
1421 		/*
1422 		 * If this segment contains more than one isoc frame, hold
1423 		 * onto the dto resource until we send all frames.
1424 		 * Only applies to non-Alereon devices.
1425 		 */
1426 		if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1427 			&& (seg->isoc_frame_count > 1))
1428 			*dto_done = 0;
1429 	}
1430 	rpipe_avail_dec(rpipe);
1431 	return 0;
1432 
1433 error_dto_submit:
1434 	usb_unlink_urb(seg->isoc_pack_desc_urb);
1435 error_iso_pack_desc_submit:
1436 	usb_unlink_urb(&seg->tr_urb);
1437 error_tr_submit:
1438 	seg->status = WA_SEG_ERROR;
1439 	seg->result = result;
1440 	*dto_done = 1;
1441 	return result;
1442 }
1443 
1444 /*
1445  * Execute more queued request segments until the maximum concurrent allowed.
1446  * Return true if the DTO resource was acquired and released.
1447  *
1448  * The ugly unlock/lock sequence on the error path is needed as the
1449  * xfer->lock normally nests the seg_lock and not viceversa.
1450  */
__wa_xfer_delayed_run(struct wa_rpipe * rpipe,int * dto_waiting)1451 static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1452 {
1453 	int result, dto_acquired = 0, dto_done = 0;
1454 	struct device *dev = &rpipe->wa->usb_iface->dev;
1455 	struct wa_seg *seg;
1456 	struct wa_xfer *xfer;
1457 	unsigned long flags;
1458 
1459 	*dto_waiting = 0;
1460 
1461 	spin_lock_irqsave(&rpipe->seg_lock, flags);
1462 	while (atomic_read(&rpipe->segs_available) > 0
1463 	      && !list_empty(&rpipe->seg_list)
1464 	      && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1465 		seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1466 				 list_node);
1467 		list_del(&seg->list_node);
1468 		xfer = seg->xfer;
1469 		/*
1470 		 * Get a reference to the xfer in case the callbacks for the
1471 		 * URBs submitted by __wa_seg_submit attempt to complete
1472 		 * the xfer before this function completes.
1473 		 */
1474 		wa_xfer_get(xfer);
1475 		result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1476 		/* release the dto resource if this RPIPE is done with it. */
1477 		if (dto_done)
1478 			__wa_dto_put(rpipe->wa);
1479 		dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1480 			xfer, wa_xfer_id(xfer), seg->index,
1481 			atomic_read(&rpipe->segs_available), result);
1482 		if (unlikely(result < 0)) {
1483 			int done;
1484 
1485 			spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1486 			spin_lock_irqsave(&xfer->lock, flags);
1487 			__wa_xfer_abort(xfer);
1488 			/*
1489 			 * This seg was marked as submitted when it was put on
1490 			 * the RPIPE seg_list.  Mark it done.
1491 			 */
1492 			xfer->segs_done++;
1493 			done = __wa_xfer_is_done(xfer);
1494 			spin_unlock_irqrestore(&xfer->lock, flags);
1495 			if (done)
1496 				wa_xfer_completion(xfer);
1497 			spin_lock_irqsave(&rpipe->seg_lock, flags);
1498 		}
1499 		wa_xfer_put(xfer);
1500 	}
1501 	/*
1502 	 * Mark this RPIPE as waiting if dto was not acquired, there are
1503 	 * delayed segs and no active transfers to wake us up later.
1504 	 */
1505 	if (!dto_acquired && !list_empty(&rpipe->seg_list)
1506 		&& (atomic_read(&rpipe->segs_available) ==
1507 			le16_to_cpu(rpipe->descr.wRequests)))
1508 		*dto_waiting = 1;
1509 
1510 	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1511 
1512 	return dto_done;
1513 }
1514 
wa_xfer_delayed_run(struct wa_rpipe * rpipe)1515 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1516 {
1517 	int dto_waiting;
1518 	int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1519 
1520 	/*
1521 	 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1522 	 * the waiting list.
1523 	 * Otherwise, if the WA DTO resource was acquired and released by
1524 	 *  __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1525 	 * DTO and failed during that time.  Check the delayed list and process
1526 	 * any waiters.  Start searching from the next RPIPE index.
1527 	 */
1528 	if (dto_waiting)
1529 		wa_add_delayed_rpipe(rpipe->wa, rpipe);
1530 	else if (dto_done)
1531 		wa_check_for_delayed_rpipes(rpipe->wa);
1532 }
1533 
1534 /*
1535  *
1536  * xfer->lock is taken
1537  *
1538  * On failure submitting we just stop submitting and return error;
1539  * wa_urb_enqueue_b() will execute the completion path
1540  */
__wa_xfer_submit(struct wa_xfer * xfer)1541 static int __wa_xfer_submit(struct wa_xfer *xfer)
1542 {
1543 	int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1544 	struct wahc *wa = xfer->wa;
1545 	struct device *dev = &wa->usb_iface->dev;
1546 	unsigned cnt;
1547 	struct wa_seg *seg;
1548 	unsigned long flags;
1549 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1550 	size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1551 	u8 available;
1552 	u8 empty;
1553 
1554 	spin_lock_irqsave(&wa->xfer_list_lock, flags);
1555 	list_add_tail(&xfer->list_node, &wa->xfer_list);
1556 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1557 
1558 	BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1559 	result = 0;
1560 	spin_lock_irqsave(&rpipe->seg_lock, flags);
1561 	for (cnt = 0; cnt < xfer->segs; cnt++) {
1562 		int delay_seg = 1;
1563 
1564 		available = atomic_read(&rpipe->segs_available);
1565 		empty = list_empty(&rpipe->seg_list);
1566 		seg = xfer->seg[cnt];
1567 		if (available && empty) {
1568 			/*
1569 			 * Only attempt to acquire DTO if we have a segment
1570 			 * to send.
1571 			 */
1572 			dto_acquired = __wa_dto_try_get(rpipe->wa);
1573 			if (dto_acquired) {
1574 				delay_seg = 0;
1575 				result = __wa_seg_submit(rpipe, xfer, seg,
1576 							&dto_done);
1577 				dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1578 					xfer, wa_xfer_id(xfer), cnt, available,
1579 					empty);
1580 				if (dto_done)
1581 					__wa_dto_put(rpipe->wa);
1582 
1583 				if (result < 0) {
1584 					__wa_xfer_abort(xfer);
1585 					goto error_seg_submit;
1586 				}
1587 			}
1588 		}
1589 
1590 		if (delay_seg) {
1591 			dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1592 				xfer, wa_xfer_id(xfer), cnt, available,  empty);
1593 			seg->status = WA_SEG_DELAYED;
1594 			list_add_tail(&seg->list_node, &rpipe->seg_list);
1595 		}
1596 		xfer->segs_submitted++;
1597 	}
1598 error_seg_submit:
1599 	/*
1600 	 * Mark this RPIPE as waiting if dto was not acquired, there are
1601 	 * delayed segs and no active transfers to wake us up later.
1602 	 */
1603 	if (!dto_acquired && !list_empty(&rpipe->seg_list)
1604 		&& (atomic_read(&rpipe->segs_available) ==
1605 			le16_to_cpu(rpipe->descr.wRequests)))
1606 		dto_waiting = 1;
1607 	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1608 
1609 	if (dto_waiting)
1610 		wa_add_delayed_rpipe(rpipe->wa, rpipe);
1611 	else if (dto_done)
1612 		wa_check_for_delayed_rpipes(rpipe->wa);
1613 
1614 	return result;
1615 }
1616 
1617 /*
1618  * Second part of a URB/transfer enqueuement
1619  *
1620  * Assumes this comes from wa_urb_enqueue() [maybe through
1621  * wa_urb_enqueue_run()]. At this point:
1622  *
1623  * xfer->wa	filled and refcounted
1624  * xfer->ep	filled with rpipe refcounted if
1625  *              delayed == 0
1626  * xfer->urb 	filled and refcounted (this is the case when called
1627  *              from wa_urb_enqueue() as we come from usb_submit_urb()
1628  *              and when called by wa_urb_enqueue_run(), as we took an
1629  *              extra ref dropped by _run() after we return).
1630  * xfer->gfp	filled
1631  *
1632  * If we fail at __wa_xfer_submit(), then we just check if we are done
1633  * and if so, we run the completion procedure. However, if we are not
1634  * yet done, we do nothing and wait for the completion handlers from
1635  * the submitted URBs or from the xfer-result path to kick in. If xfer
1636  * result never kicks in, the xfer will timeout from the USB code and
1637  * dequeue() will be called.
1638  */
wa_urb_enqueue_b(struct wa_xfer * xfer)1639 static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1640 {
1641 	int result;
1642 	unsigned long flags;
1643 	struct urb *urb = xfer->urb;
1644 	struct wahc *wa = xfer->wa;
1645 	struct wusbhc *wusbhc = wa->wusb;
1646 	struct wusb_dev *wusb_dev;
1647 	unsigned done;
1648 
1649 	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1650 	if (result < 0) {
1651 		pr_err("%s: error_rpipe_get\n", __func__);
1652 		goto error_rpipe_get;
1653 	}
1654 	result = -ENODEV;
1655 	/* FIXME: segmentation broken -- kills DWA */
1656 	mutex_lock(&wusbhc->mutex);		/* get a WUSB dev */
1657 	if (urb->dev == NULL) {
1658 		mutex_unlock(&wusbhc->mutex);
1659 		pr_err("%s: error usb dev gone\n", __func__);
1660 		goto error_dev_gone;
1661 	}
1662 	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1663 	if (wusb_dev == NULL) {
1664 		mutex_unlock(&wusbhc->mutex);
1665 		dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1666 			__func__);
1667 		goto error_dev_gone;
1668 	}
1669 	mutex_unlock(&wusbhc->mutex);
1670 
1671 	spin_lock_irqsave(&xfer->lock, flags);
1672 	xfer->wusb_dev = wusb_dev;
1673 	result = urb->status;
1674 	if (urb->status != -EINPROGRESS) {
1675 		dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1676 		goto error_dequeued;
1677 	}
1678 
1679 	result = __wa_xfer_setup(xfer, urb);
1680 	if (result < 0) {
1681 		dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1682 		goto error_xfer_setup;
1683 	}
1684 	/*
1685 	 * Get a xfer reference since __wa_xfer_submit starts asynchronous
1686 	 * operations that may try to complete the xfer before this function
1687 	 * exits.
1688 	 */
1689 	wa_xfer_get(xfer);
1690 	result = __wa_xfer_submit(xfer);
1691 	if (result < 0) {
1692 		dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1693 		goto error_xfer_submit;
1694 	}
1695 	spin_unlock_irqrestore(&xfer->lock, flags);
1696 	wa_xfer_put(xfer);
1697 	return 0;
1698 
1699 	/*
1700 	 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1701 	 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1702 	 * setup().
1703 	 */
1704 error_xfer_setup:
1705 error_dequeued:
1706 	spin_unlock_irqrestore(&xfer->lock, flags);
1707 	/* FIXME: segmentation broken, kills DWA */
1708 	if (wusb_dev)
1709 		wusb_dev_put(wusb_dev);
1710 error_dev_gone:
1711 	rpipe_put(xfer->ep->hcpriv);
1712 error_rpipe_get:
1713 	xfer->result = result;
1714 	return result;
1715 
1716 error_xfer_submit:
1717 	done = __wa_xfer_is_done(xfer);
1718 	xfer->result = result;
1719 	spin_unlock_irqrestore(&xfer->lock, flags);
1720 	if (done)
1721 		wa_xfer_completion(xfer);
1722 	wa_xfer_put(xfer);
1723 	/* return success since the completion routine will run. */
1724 	return 0;
1725 }
1726 
1727 /*
1728  * Execute the delayed transfers in the Wire Adapter @wa
1729  *
1730  * We need to be careful here, as dequeue() could be called in the
1731  * middle.  That's why we do the whole thing under the
1732  * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1733  * and then checks the list -- so as we would be acquiring in inverse
1734  * order, we move the delayed list to a separate list while locked and then
1735  * submit them without the list lock held.
1736  */
wa_urb_enqueue_run(struct work_struct * ws)1737 void wa_urb_enqueue_run(struct work_struct *ws)
1738 {
1739 	struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1740 	struct wa_xfer *xfer, *next;
1741 	struct urb *urb;
1742 	LIST_HEAD(tmp_list);
1743 
1744 	/* Create a copy of the wa->xfer_delayed_list while holding the lock */
1745 	spin_lock_irq(&wa->xfer_list_lock);
1746 	list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1747 			wa->xfer_delayed_list.prev);
1748 	spin_unlock_irq(&wa->xfer_list_lock);
1749 
1750 	/*
1751 	 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1752 	 * can take xfer->lock as well as lock mutexes.
1753 	 */
1754 	list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1755 		list_del_init(&xfer->list_node);
1756 
1757 		urb = xfer->urb;
1758 		if (wa_urb_enqueue_b(xfer) < 0)
1759 			wa_xfer_giveback(xfer);
1760 		usb_put_urb(urb);	/* taken when queuing */
1761 	}
1762 }
1763 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1764 
1765 /*
1766  * Process the errored transfers on the Wire Adapter outside of interrupt.
1767  */
wa_process_errored_transfers_run(struct work_struct * ws)1768 void wa_process_errored_transfers_run(struct work_struct *ws)
1769 {
1770 	struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1771 	struct wa_xfer *xfer, *next;
1772 	LIST_HEAD(tmp_list);
1773 
1774 	pr_info("%s: Run delayed STALL processing.\n", __func__);
1775 
1776 	/* Create a copy of the wa->xfer_errored_list while holding the lock */
1777 	spin_lock_irq(&wa->xfer_list_lock);
1778 	list_cut_position(&tmp_list, &wa->xfer_errored_list,
1779 			wa->xfer_errored_list.prev);
1780 	spin_unlock_irq(&wa->xfer_list_lock);
1781 
1782 	/*
1783 	 * run rpipe_clear_feature_stalled from temp list without list lock
1784 	 * held.
1785 	 */
1786 	list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1787 		struct usb_host_endpoint *ep;
1788 		unsigned long flags;
1789 		struct wa_rpipe *rpipe;
1790 
1791 		spin_lock_irqsave(&xfer->lock, flags);
1792 		ep = xfer->ep;
1793 		rpipe = ep->hcpriv;
1794 		spin_unlock_irqrestore(&xfer->lock, flags);
1795 
1796 		/* clear RPIPE feature stalled without holding a lock. */
1797 		rpipe_clear_feature_stalled(wa, ep);
1798 
1799 		/* complete the xfer. This removes it from the tmp list. */
1800 		wa_xfer_completion(xfer);
1801 
1802 		/* check for work. */
1803 		wa_xfer_delayed_run(rpipe);
1804 	}
1805 }
1806 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1807 
1808 /*
1809  * Submit a transfer to the Wire Adapter in a delayed way
1810  *
1811  * The process of enqueuing involves possible sleeps() [see
1812  * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1813  * in an atomic section, we defer the enqueue_b() call--else we call direct.
1814  *
1815  * @urb: We own a reference to it done by the HCI Linux USB stack that
1816  *       will be given up by calling usb_hcd_giveback_urb() or by
1817  *       returning error from this function -> ergo we don't have to
1818  *       refcount it.
1819  */
wa_urb_enqueue(struct wahc * wa,struct usb_host_endpoint * ep,struct urb * urb,gfp_t gfp)1820 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1821 		   struct urb *urb, gfp_t gfp)
1822 {
1823 	int result;
1824 	struct device *dev = &wa->usb_iface->dev;
1825 	struct wa_xfer *xfer;
1826 	unsigned long my_flags;
1827 	unsigned cant_sleep = irqs_disabled() | in_atomic();
1828 
1829 	if ((urb->transfer_buffer == NULL)
1830 	    && (urb->sg == NULL)
1831 	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1832 	    && urb->transfer_buffer_length != 0) {
1833 		dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1834 		dump_stack();
1835 	}
1836 
1837 	spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1838 	result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1839 	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1840 	if (result < 0)
1841 		goto error_link_urb;
1842 
1843 	result = -ENOMEM;
1844 	xfer = kzalloc(sizeof(*xfer), gfp);
1845 	if (xfer == NULL)
1846 		goto error_kmalloc;
1847 
1848 	result = -ENOENT;
1849 	if (urb->status != -EINPROGRESS)	/* cancelled */
1850 		goto error_dequeued;		/* before starting? */
1851 	wa_xfer_init(xfer);
1852 	xfer->wa = wa_get(wa);
1853 	xfer->urb = urb;
1854 	xfer->gfp = gfp;
1855 	xfer->ep = ep;
1856 	urb->hcpriv = xfer;
1857 
1858 	dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1859 		xfer, urb, urb->pipe, urb->transfer_buffer_length,
1860 		urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1861 		urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1862 		cant_sleep ? "deferred" : "inline");
1863 
1864 	if (cant_sleep) {
1865 		usb_get_urb(urb);
1866 		spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1867 		list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1868 		spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1869 		queue_work(wusbd, &wa->xfer_enqueue_work);
1870 	} else {
1871 		result = wa_urb_enqueue_b(xfer);
1872 		if (result < 0) {
1873 			/*
1874 			 * URB submit/enqueue failed.  Clean up, return an
1875 			 * error and do not run the callback.  This avoids
1876 			 * an infinite submit/complete loop.
1877 			 */
1878 			dev_err(dev, "%s: URB enqueue failed: %d\n",
1879 			   __func__, result);
1880 			wa_put(xfer->wa);
1881 			wa_xfer_put(xfer);
1882 			spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1883 			usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1884 			spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1885 			return result;
1886 		}
1887 	}
1888 	return 0;
1889 
1890 error_dequeued:
1891 	kfree(xfer);
1892 error_kmalloc:
1893 	spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1894 	usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1895 	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1896 error_link_urb:
1897 	return result;
1898 }
1899 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1900 
1901 /*
1902  * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1903  * handler] is called.
1904  *
1905  * Until a transfer goes successfully through wa_urb_enqueue() it
1906  * needs to be dequeued with completion calling; when stuck in delayed
1907  * or before wa_xfer_setup() is called, we need to do completion.
1908  *
1909  *  not setup  If there is no hcpriv yet, that means that that enqueue
1910  *             still had no time to set the xfer up. Because
1911  *             urb->status should be other than -EINPROGRESS,
1912  *             enqueue() will catch that and bail out.
1913  *
1914  * If the transfer has gone through setup, we just need to clean it
1915  * up. If it has gone through submit(), we have to abort it [with an
1916  * asynch request] and then make sure we cancel each segment.
1917  *
1918  */
wa_urb_dequeue(struct wahc * wa,struct urb * urb,int status)1919 int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1920 {
1921 	unsigned long flags;
1922 	struct wa_xfer *xfer;
1923 	struct wa_seg *seg;
1924 	struct wa_rpipe *rpipe;
1925 	unsigned cnt, done = 0, xfer_abort_pending;
1926 	unsigned rpipe_ready = 0;
1927 	int result;
1928 
1929 	/* check if it is safe to unlink. */
1930 	spin_lock_irqsave(&wa->xfer_list_lock, flags);
1931 	result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1932 	if ((result == 0) && urb->hcpriv) {
1933 		/*
1934 		 * Get a xfer ref to prevent a race with wa_xfer_giveback
1935 		 * cleaning up the xfer while we are working with it.
1936 		 */
1937 		wa_xfer_get(urb->hcpriv);
1938 	}
1939 	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1940 	if (result)
1941 		return result;
1942 
1943 	xfer = urb->hcpriv;
1944 	if (xfer == NULL)
1945 		return -ENOENT;
1946 	spin_lock_irqsave(&xfer->lock, flags);
1947 	pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1948 	rpipe = xfer->ep->hcpriv;
1949 	if (rpipe == NULL) {
1950 		pr_debug("%s: xfer %p id 0x%08X has no RPIPE.  %s",
1951 			__func__, xfer, wa_xfer_id(xfer),
1952 			"Probably already aborted.\n" );
1953 		result = -ENOENT;
1954 		goto out_unlock;
1955 	}
1956 	/*
1957 	 * Check for done to avoid racing with wa_xfer_giveback and completing
1958 	 * twice.
1959 	 */
1960 	if (__wa_xfer_is_done(xfer)) {
1961 		pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1962 			xfer, wa_xfer_id(xfer));
1963 		result = -ENOENT;
1964 		goto out_unlock;
1965 	}
1966 	/* Check the delayed list -> if there, release and complete */
1967 	spin_lock(&wa->xfer_list_lock);
1968 	if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1969 		goto dequeue_delayed;
1970 	spin_unlock(&wa->xfer_list_lock);
1971 	if (xfer->seg == NULL)  	/* still hasn't reached */
1972 		goto out_unlock;	/* setup(), enqueue_b() completes */
1973 	/* Ok, the xfer is in flight already, it's been setup and submitted.*/
1974 	xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1975 	/*
1976 	 * grab the rpipe->seg_lock here to prevent racing with
1977 	 * __wa_xfer_delayed_run.
1978 	 */
1979 	spin_lock(&rpipe->seg_lock);
1980 	for (cnt = 0; cnt < xfer->segs; cnt++) {
1981 		seg = xfer->seg[cnt];
1982 		pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1983 			__func__, wa_xfer_id(xfer), cnt, seg->status);
1984 		switch (seg->status) {
1985 		case WA_SEG_NOTREADY:
1986 		case WA_SEG_READY:
1987 			printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1988 			       xfer, cnt, seg->status);
1989 			WARN_ON(1);
1990 			break;
1991 		case WA_SEG_DELAYED:
1992 			/*
1993 			 * delete from rpipe delayed list.  If no segments on
1994 			 * this xfer have been submitted, __wa_xfer_is_done will
1995 			 * trigger a giveback below.  Otherwise, the submitted
1996 			 * segments will be completed in the DTI interrupt.
1997 			 */
1998 			seg->status = WA_SEG_ABORTED;
1999 			seg->result = -ENOENT;
2000 			list_del(&seg->list_node);
2001 			xfer->segs_done++;
2002 			break;
2003 		case WA_SEG_DONE:
2004 		case WA_SEG_ERROR:
2005 		case WA_SEG_ABORTED:
2006 			break;
2007 			/*
2008 			 * The buf_in data for a segment in the
2009 			 * WA_SEG_DTI_PENDING state is actively being read.
2010 			 * Let wa_buf_in_cb handle it since it will be called
2011 			 * and will increment xfer->segs_done.  Cleaning up
2012 			 * here could cause wa_buf_in_cb to access the xfer
2013 			 * after it has been completed/freed.
2014 			 */
2015 		case WA_SEG_DTI_PENDING:
2016 			break;
2017 			/*
2018 			 * In the states below, the HWA device already knows
2019 			 * about the transfer.  If an abort request was sent,
2020 			 * allow the HWA to process it and wait for the
2021 			 * results.  Otherwise, the DTI state and seg completed
2022 			 * counts can get out of sync.
2023 			 */
2024 		case WA_SEG_SUBMITTED:
2025 		case WA_SEG_PENDING:
2026 			/*
2027 			 * Check if the abort was successfully sent.  This could
2028 			 * be false if the HWA has been removed but we haven't
2029 			 * gotten the disconnect notification yet.
2030 			 */
2031 			if (!xfer_abort_pending) {
2032 				seg->status = WA_SEG_ABORTED;
2033 				rpipe_ready = rpipe_avail_inc(rpipe);
2034 				xfer->segs_done++;
2035 			}
2036 			break;
2037 		}
2038 	}
2039 	spin_unlock(&rpipe->seg_lock);
2040 	xfer->result = urb->status;	/* -ENOENT or -ECONNRESET */
2041 	done = __wa_xfer_is_done(xfer);
2042 	spin_unlock_irqrestore(&xfer->lock, flags);
2043 	if (done)
2044 		wa_xfer_completion(xfer);
2045 	if (rpipe_ready)
2046 		wa_xfer_delayed_run(rpipe);
2047 	wa_xfer_put(xfer);
2048 	return result;
2049 
2050 out_unlock:
2051 	spin_unlock_irqrestore(&xfer->lock, flags);
2052 	wa_xfer_put(xfer);
2053 	return result;
2054 
2055 dequeue_delayed:
2056 	list_del_init(&xfer->list_node);
2057 	spin_unlock(&wa->xfer_list_lock);
2058 	xfer->result = urb->status;
2059 	spin_unlock_irqrestore(&xfer->lock, flags);
2060 	wa_xfer_giveback(xfer);
2061 	wa_xfer_put(xfer);
2062 	usb_put_urb(urb);		/* we got a ref in enqueue() */
2063 	return 0;
2064 }
2065 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2066 
2067 /*
2068  * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2069  * codes
2070  *
2071  * Positive errno values are internal inconsistencies and should be
2072  * flagged louder. Negative are to be passed up to the user in the
2073  * normal way.
2074  *
2075  * @status: USB WA status code -- high two bits are stripped.
2076  */
wa_xfer_status_to_errno(u8 status)2077 static int wa_xfer_status_to_errno(u8 status)
2078 {
2079 	int errno;
2080 	u8 real_status = status;
2081 	static int xlat[] = {
2082 		[WA_XFER_STATUS_SUCCESS] = 		0,
2083 		[WA_XFER_STATUS_HALTED] = 		-EPIPE,
2084 		[WA_XFER_STATUS_DATA_BUFFER_ERROR] = 	-ENOBUFS,
2085 		[WA_XFER_STATUS_BABBLE] = 		-EOVERFLOW,
2086 		[WA_XFER_RESERVED] = 			EINVAL,
2087 		[WA_XFER_STATUS_NOT_FOUND] =		0,
2088 		[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2089 		[WA_XFER_STATUS_TRANSACTION_ERROR] = 	-EILSEQ,
2090 		[WA_XFER_STATUS_ABORTED] =		-ENOENT,
2091 		[WA_XFER_STATUS_RPIPE_NOT_READY] = 	EINVAL,
2092 		[WA_XFER_INVALID_FORMAT] = 		EINVAL,
2093 		[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = 	EINVAL,
2094 		[WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = 	EINVAL,
2095 	};
2096 	status &= 0x3f;
2097 
2098 	if (status == 0)
2099 		return 0;
2100 	if (status >= ARRAY_SIZE(xlat)) {
2101 		printk_ratelimited(KERN_ERR "%s(): BUG? "
2102 			       "Unknown WA transfer status 0x%02x\n",
2103 			       __func__, real_status);
2104 		return -EINVAL;
2105 	}
2106 	errno = xlat[status];
2107 	if (unlikely(errno > 0)) {
2108 		printk_ratelimited(KERN_ERR "%s(): BUG? "
2109 			       "Inconsistent WA status: 0x%02x\n",
2110 			       __func__, real_status);
2111 		errno = -errno;
2112 	}
2113 	return errno;
2114 }
2115 
2116 /*
2117  * If a last segment flag and/or a transfer result error is encountered,
2118  * no other segment transfer results will be returned from the device.
2119  * Mark the remaining submitted or pending xfers as completed so that
2120  * the xfer will complete cleanly.
2121  *
2122  * xfer->lock must be held
2123  *
2124  */
wa_complete_remaining_xfer_segs(struct wa_xfer * xfer,int starting_index,enum wa_seg_status status)2125 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2126 		int starting_index, enum wa_seg_status status)
2127 {
2128 	int index;
2129 	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2130 
2131 	for (index = starting_index; index < xfer->segs_submitted; index++) {
2132 		struct wa_seg *current_seg = xfer->seg[index];
2133 
2134 		BUG_ON(current_seg == NULL);
2135 
2136 		switch (current_seg->status) {
2137 		case WA_SEG_SUBMITTED:
2138 		case WA_SEG_PENDING:
2139 		case WA_SEG_DTI_PENDING:
2140 			rpipe_avail_inc(rpipe);
2141 		/*
2142 		 * do not increment RPIPE avail for the WA_SEG_DELAYED case
2143 		 * since it has not been submitted to the RPIPE.
2144 		 */
2145 		/* fall through */
2146 		case WA_SEG_DELAYED:
2147 			xfer->segs_done++;
2148 			current_seg->status = status;
2149 			break;
2150 		case WA_SEG_ABORTED:
2151 			break;
2152 		default:
2153 			WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2154 				__func__, wa_xfer_id(xfer), index,
2155 				current_seg->status);
2156 			break;
2157 		}
2158 	}
2159 }
2160 
2161 /* Populate the given urb based on the current isoc transfer state. */
__wa_populate_buf_in_urb_isoc(struct wahc * wa,struct urb * buf_in_urb,struct wa_xfer * xfer,struct wa_seg * seg)2162 static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2163 	struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2164 {
2165 	int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2166 	int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2167 	struct usb_iso_packet_descriptor *iso_frame_desc =
2168 						xfer->urb->iso_frame_desc;
2169 	const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2170 	int next_frame_contiguous;
2171 	struct usb_iso_packet_descriptor *iso_frame;
2172 
2173 	BUG_ON(buf_in_urb->status == -EINPROGRESS);
2174 
2175 	/*
2176 	 * If the current frame actual_length is contiguous with the next frame
2177 	 * and actual_length is a multiple of the DTI endpoint max packet size,
2178 	 * combine the current frame with the next frame in a single URB.  This
2179 	 * reduces the number of URBs that must be submitted in that case.
2180 	 */
2181 	seg_index = seg->isoc_frame_index;
2182 	do {
2183 		next_frame_contiguous = 0;
2184 
2185 		iso_frame = &iso_frame_desc[urb_frame_index];
2186 		total_len += iso_frame->actual_length;
2187 		++urb_frame_index;
2188 		++seg_index;
2189 
2190 		if (seg_index < seg->isoc_frame_count) {
2191 			struct usb_iso_packet_descriptor *next_iso_frame;
2192 
2193 			next_iso_frame = &iso_frame_desc[urb_frame_index];
2194 
2195 			if ((iso_frame->offset + iso_frame->actual_length) ==
2196 				next_iso_frame->offset)
2197 				next_frame_contiguous = 1;
2198 		}
2199 	} while (next_frame_contiguous
2200 			&& ((iso_frame->actual_length % dti_packet_size) == 0));
2201 
2202 	/* this should always be 0 before a resubmit. */
2203 	buf_in_urb->num_mapped_sgs	= 0;
2204 	buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2205 		iso_frame_desc[urb_start_frame].offset;
2206 	buf_in_urb->transfer_buffer_length = total_len;
2207 	buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2208 	buf_in_urb->transfer_buffer = NULL;
2209 	buf_in_urb->sg = NULL;
2210 	buf_in_urb->num_sgs = 0;
2211 	buf_in_urb->context = seg;
2212 
2213 	/* return the number of frames included in this URB. */
2214 	return seg_index - seg->isoc_frame_index;
2215 }
2216 
2217 /* Populate the given urb based on the current transfer state. */
wa_populate_buf_in_urb(struct urb * buf_in_urb,struct wa_xfer * xfer,unsigned int seg_idx,unsigned int bytes_transferred)2218 static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2219 	unsigned int seg_idx, unsigned int bytes_transferred)
2220 {
2221 	int result = 0;
2222 	struct wa_seg *seg = xfer->seg[seg_idx];
2223 
2224 	BUG_ON(buf_in_urb->status == -EINPROGRESS);
2225 	/* this should always be 0 before a resubmit. */
2226 	buf_in_urb->num_mapped_sgs	= 0;
2227 
2228 	if (xfer->is_dma) {
2229 		buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2230 			+ (seg_idx * xfer->seg_size);
2231 		buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2232 		buf_in_urb->transfer_buffer = NULL;
2233 		buf_in_urb->sg = NULL;
2234 		buf_in_urb->num_sgs = 0;
2235 	} else {
2236 		/* do buffer or SG processing. */
2237 		buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2238 
2239 		if (xfer->urb->transfer_buffer) {
2240 			buf_in_urb->transfer_buffer =
2241 				xfer->urb->transfer_buffer
2242 				+ (seg_idx * xfer->seg_size);
2243 			buf_in_urb->sg = NULL;
2244 			buf_in_urb->num_sgs = 0;
2245 		} else {
2246 			/* allocate an SG list to store seg_size bytes
2247 				and copy the subset of the xfer->urb->sg
2248 				that matches the buffer subset we are
2249 				about to read. */
2250 			buf_in_urb->sg = wa_xfer_create_subset_sg(
2251 				xfer->urb->sg,
2252 				seg_idx * xfer->seg_size,
2253 				bytes_transferred,
2254 				&(buf_in_urb->num_sgs));
2255 
2256 			if (!(buf_in_urb->sg)) {
2257 				buf_in_urb->num_sgs	= 0;
2258 				result = -ENOMEM;
2259 			}
2260 			buf_in_urb->transfer_buffer = NULL;
2261 		}
2262 	}
2263 	buf_in_urb->transfer_buffer_length = bytes_transferred;
2264 	buf_in_urb->context = seg;
2265 
2266 	return result;
2267 }
2268 
2269 /*
2270  * Process a xfer result completion message
2271  *
2272  * inbound transfers: need to schedule a buf_in_urb read
2273  *
2274  * FIXME: this function needs to be broken up in parts
2275  */
wa_xfer_result_chew(struct wahc * wa,struct wa_xfer * xfer,struct wa_xfer_result * xfer_result)2276 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2277 		struct wa_xfer_result *xfer_result)
2278 {
2279 	int result;
2280 	struct device *dev = &wa->usb_iface->dev;
2281 	unsigned long flags;
2282 	unsigned int seg_idx;
2283 	struct wa_seg *seg;
2284 	struct wa_rpipe *rpipe;
2285 	unsigned done = 0;
2286 	u8 usb_status;
2287 	unsigned rpipe_ready = 0;
2288 	unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2289 	struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2290 
2291 	spin_lock_irqsave(&xfer->lock, flags);
2292 	seg_idx = xfer_result->bTransferSegment & 0x7f;
2293 	if (unlikely(seg_idx >= xfer->segs))
2294 		goto error_bad_seg;
2295 	seg = xfer->seg[seg_idx];
2296 	rpipe = xfer->ep->hcpriv;
2297 	usb_status = xfer_result->bTransferStatus;
2298 	dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2299 		xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2300 	if (seg->status == WA_SEG_ABORTED
2301 	    || seg->status == WA_SEG_ERROR)	/* already handled */
2302 		goto segment_aborted;
2303 	if (seg->status == WA_SEG_SUBMITTED)	/* ops, got here */
2304 		seg->status = WA_SEG_PENDING;	/* before wa_seg{_dto}_cb() */
2305 	if (seg->status != WA_SEG_PENDING) {
2306 		if (printk_ratelimit())
2307 			dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2308 				xfer, seg_idx, seg->status);
2309 		seg->status = WA_SEG_PENDING;	/* workaround/"fix" it */
2310 	}
2311 	if (usb_status & 0x80) {
2312 		seg->result = wa_xfer_status_to_errno(usb_status);
2313 		dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2314 			xfer, xfer->id, seg->index, usb_status);
2315 		seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2316 			WA_SEG_ABORTED : WA_SEG_ERROR;
2317 		goto error_complete;
2318 	}
2319 	/* FIXME: we ignore warnings, tally them for stats */
2320 	if (usb_status & 0x40) 		/* Warning?... */
2321 		usb_status = 0;		/* ... pass */
2322 	/*
2323 	 * If the last segment bit is set, complete the remaining segments.
2324 	 * When the current segment is completed, either in wa_buf_in_cb for
2325 	 * transfers with data or below for no data, the xfer will complete.
2326 	 */
2327 	if (xfer_result->bTransferSegment & 0x80)
2328 		wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2329 			WA_SEG_DONE);
2330 	if (usb_pipeisoc(xfer->urb->pipe)
2331 		&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2332 		/* set up WA state to read the isoc packet status next. */
2333 		wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2334 		wa->dti_isoc_xfer_seg = seg_idx;
2335 		wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2336 	} else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2337 			&& (bytes_transferred > 0)) {
2338 		/* IN data phase: read to buffer */
2339 		seg->status = WA_SEG_DTI_PENDING;
2340 		result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2341 			bytes_transferred);
2342 		if (result < 0)
2343 			goto error_buf_in_populate;
2344 		++(wa->active_buf_in_urbs);
2345 		result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2346 		if (result < 0) {
2347 			--(wa->active_buf_in_urbs);
2348 			goto error_submit_buf_in;
2349 		}
2350 	} else {
2351 		/* OUT data phase or no data, complete it -- */
2352 		seg->result = bytes_transferred;
2353 		rpipe_ready = rpipe_avail_inc(rpipe);
2354 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2355 	}
2356 	spin_unlock_irqrestore(&xfer->lock, flags);
2357 	if (done)
2358 		wa_xfer_completion(xfer);
2359 	if (rpipe_ready)
2360 		wa_xfer_delayed_run(rpipe);
2361 	return;
2362 
2363 error_submit_buf_in:
2364 	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2365 		dev_err(dev, "DTI: URB max acceptable errors "
2366 			"exceeded, resetting device\n");
2367 		wa_reset_all(wa);
2368 	}
2369 	if (printk_ratelimit())
2370 		dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2371 			xfer, seg_idx, result);
2372 	seg->result = result;
2373 	kfree(buf_in_urb->sg);
2374 	buf_in_urb->sg = NULL;
2375 error_buf_in_populate:
2376 	__wa_xfer_abort(xfer);
2377 	seg->status = WA_SEG_ERROR;
2378 error_complete:
2379 	xfer->segs_done++;
2380 	rpipe_ready = rpipe_avail_inc(rpipe);
2381 	wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2382 	done = __wa_xfer_is_done(xfer);
2383 	/*
2384 	 * queue work item to clear STALL for control endpoints.
2385 	 * Otherwise, let endpoint_reset take care of it.
2386 	 */
2387 	if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2388 		usb_endpoint_xfer_control(&xfer->ep->desc) &&
2389 		done) {
2390 
2391 		dev_info(dev, "Control EP stall.  Queue delayed work.\n");
2392 		spin_lock(&wa->xfer_list_lock);
2393 		/* move xfer from xfer_list to xfer_errored_list. */
2394 		list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2395 		spin_unlock(&wa->xfer_list_lock);
2396 		spin_unlock_irqrestore(&xfer->lock, flags);
2397 		queue_work(wusbd, &wa->xfer_error_work);
2398 	} else {
2399 		spin_unlock_irqrestore(&xfer->lock, flags);
2400 		if (done)
2401 			wa_xfer_completion(xfer);
2402 		if (rpipe_ready)
2403 			wa_xfer_delayed_run(rpipe);
2404 	}
2405 
2406 	return;
2407 
2408 error_bad_seg:
2409 	spin_unlock_irqrestore(&xfer->lock, flags);
2410 	wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2411 	if (printk_ratelimit())
2412 		dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2413 	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2414 		dev_err(dev, "DTI: URB max acceptable errors "
2415 			"exceeded, resetting device\n");
2416 		wa_reset_all(wa);
2417 	}
2418 	return;
2419 
2420 segment_aborted:
2421 	/* nothing to do, as the aborter did the completion */
2422 	spin_unlock_irqrestore(&xfer->lock, flags);
2423 }
2424 
2425 /*
2426  * Process a isochronous packet status message
2427  *
2428  * inbound transfers: need to schedule a buf_in_urb read
2429  */
wa_process_iso_packet_status(struct wahc * wa,struct urb * urb)2430 static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2431 {
2432 	struct device *dev = &wa->usb_iface->dev;
2433 	struct wa_xfer_packet_status_hwaiso *packet_status;
2434 	struct wa_xfer_packet_status_len_hwaiso *status_array;
2435 	struct wa_xfer *xfer;
2436 	unsigned long flags;
2437 	struct wa_seg *seg;
2438 	struct wa_rpipe *rpipe;
2439 	unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2440 	unsigned first_frame_index = 0, rpipe_ready = 0;
2441 	int expected_size;
2442 
2443 	/* We have a xfer result buffer; check it */
2444 	dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2445 		urb->actual_length, urb->transfer_buffer);
2446 	packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2447 	if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2448 		dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2449 			packet_status->bPacketType);
2450 		goto error_parse_buffer;
2451 	}
2452 	xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2453 	if (xfer == NULL) {
2454 		dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2455 			wa->dti_isoc_xfer_in_progress);
2456 		goto error_parse_buffer;
2457 	}
2458 	spin_lock_irqsave(&xfer->lock, flags);
2459 	if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2460 		goto error_bad_seg;
2461 	seg = xfer->seg[wa->dti_isoc_xfer_seg];
2462 	rpipe = xfer->ep->hcpriv;
2463 	expected_size = sizeof(*packet_status) +
2464 			(sizeof(packet_status->PacketStatus[0]) *
2465 			seg->isoc_frame_count);
2466 	if (urb->actual_length != expected_size) {
2467 		dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2468 			urb->actual_length, expected_size);
2469 		goto error_bad_seg;
2470 	}
2471 	if (le16_to_cpu(packet_status->wLength) != expected_size) {
2472 		dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2473 			le16_to_cpu(packet_status->wLength));
2474 		goto error_bad_seg;
2475 	}
2476 	/* write isoc packet status and lengths back to the xfer urb. */
2477 	status_array = packet_status->PacketStatus;
2478 	xfer->urb->start_frame =
2479 		wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2480 	for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2481 		struct usb_iso_packet_descriptor *iso_frame_desc =
2482 			xfer->urb->iso_frame_desc;
2483 		const int xfer_frame_index =
2484 			seg->isoc_frame_offset + seg_index;
2485 
2486 		iso_frame_desc[xfer_frame_index].status =
2487 			wa_xfer_status_to_errno(
2488 			le16_to_cpu(status_array[seg_index].PacketStatus));
2489 		iso_frame_desc[xfer_frame_index].actual_length =
2490 			le16_to_cpu(status_array[seg_index].PacketLength);
2491 		/* track the number of frames successfully transferred. */
2492 		if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2493 			/* save the starting frame index for buf_in_urb. */
2494 			if (!data_frame_count)
2495 				first_frame_index = seg_index;
2496 			++data_frame_count;
2497 		}
2498 	}
2499 
2500 	if (xfer->is_inbound && data_frame_count) {
2501 		int result, total_frames_read = 0, urb_index = 0;
2502 		struct urb *buf_in_urb;
2503 
2504 		/* IN data phase: read to buffer */
2505 		seg->status = WA_SEG_DTI_PENDING;
2506 
2507 		/* start with the first frame with data. */
2508 		seg->isoc_frame_index = first_frame_index;
2509 		/* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2510 		do {
2511 			int urb_frame_index, urb_frame_count;
2512 			struct usb_iso_packet_descriptor *iso_frame_desc;
2513 
2514 			buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2515 			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2516 				buf_in_urb, xfer, seg);
2517 			/* advance frame index to start of next read URB. */
2518 			seg->isoc_frame_index += urb_frame_count;
2519 			total_frames_read += urb_frame_count;
2520 
2521 			++(wa->active_buf_in_urbs);
2522 			result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2523 
2524 			/* skip 0-byte frames. */
2525 			urb_frame_index =
2526 				seg->isoc_frame_offset + seg->isoc_frame_index;
2527 			iso_frame_desc =
2528 				&(xfer->urb->iso_frame_desc[urb_frame_index]);
2529 			while ((seg->isoc_frame_index <
2530 						seg->isoc_frame_count) &&
2531 				 (iso_frame_desc->actual_length == 0)) {
2532 				++(seg->isoc_frame_index);
2533 				++iso_frame_desc;
2534 			}
2535 			++urb_index;
2536 
2537 		} while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2538 				&& (seg->isoc_frame_index <
2539 						seg->isoc_frame_count));
2540 
2541 		if (result < 0) {
2542 			--(wa->active_buf_in_urbs);
2543 			dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2544 				result);
2545 			wa_reset_all(wa);
2546 		} else if (data_frame_count > total_frames_read)
2547 			/* If we need to read more frames, set DTI busy. */
2548 			dti_busy = 1;
2549 	} else {
2550 		/* OUT transfer or no more IN data, complete it -- */
2551 		rpipe_ready = rpipe_avail_inc(rpipe);
2552 		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2553 	}
2554 	spin_unlock_irqrestore(&xfer->lock, flags);
2555 	if (dti_busy)
2556 		wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2557 	else
2558 		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2559 	if (done)
2560 		wa_xfer_completion(xfer);
2561 	if (rpipe_ready)
2562 		wa_xfer_delayed_run(rpipe);
2563 	wa_xfer_put(xfer);
2564 	return dti_busy;
2565 
2566 error_bad_seg:
2567 	spin_unlock_irqrestore(&xfer->lock, flags);
2568 	wa_xfer_put(xfer);
2569 error_parse_buffer:
2570 	return dti_busy;
2571 }
2572 
2573 /*
2574  * Callback for the IN data phase
2575  *
2576  * If successful transition state; otherwise, take a note of the
2577  * error, mark this segment done and try completion.
2578  *
2579  * Note we don't access until we are sure that the transfer hasn't
2580  * been cancelled (ECONNRESET, ENOENT), which could mean that
2581  * seg->xfer could be already gone.
2582  */
wa_buf_in_cb(struct urb * urb)2583 static void wa_buf_in_cb(struct urb *urb)
2584 {
2585 	struct wa_seg *seg = urb->context;
2586 	struct wa_xfer *xfer = seg->xfer;
2587 	struct wahc *wa;
2588 	struct device *dev;
2589 	struct wa_rpipe *rpipe;
2590 	unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2591 	unsigned long flags;
2592 	int resubmit_dti = 0, active_buf_in_urbs;
2593 	u8 done = 0;
2594 
2595 	/* free the sg if it was used. */
2596 	kfree(urb->sg);
2597 	urb->sg = NULL;
2598 
2599 	spin_lock_irqsave(&xfer->lock, flags);
2600 	wa = xfer->wa;
2601 	dev = &wa->usb_iface->dev;
2602 	--(wa->active_buf_in_urbs);
2603 	active_buf_in_urbs = wa->active_buf_in_urbs;
2604 	rpipe = xfer->ep->hcpriv;
2605 
2606 	if (usb_pipeisoc(xfer->urb->pipe)) {
2607 		struct usb_iso_packet_descriptor *iso_frame_desc =
2608 			xfer->urb->iso_frame_desc;
2609 		int	seg_index;
2610 
2611 		/*
2612 		 * Find the next isoc frame with data and count how many
2613 		 * frames with data remain.
2614 		 */
2615 		seg_index = seg->isoc_frame_index;
2616 		while (seg_index < seg->isoc_frame_count) {
2617 			const int urb_frame_index =
2618 				seg->isoc_frame_offset + seg_index;
2619 
2620 			if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2621 				/* save the index of the next frame with data */
2622 				if (!isoc_data_frame_count)
2623 					seg->isoc_frame_index = seg_index;
2624 				++isoc_data_frame_count;
2625 			}
2626 			++seg_index;
2627 		}
2628 	}
2629 	spin_unlock_irqrestore(&xfer->lock, flags);
2630 
2631 	switch (urb->status) {
2632 	case 0:
2633 		spin_lock_irqsave(&xfer->lock, flags);
2634 
2635 		seg->result += urb->actual_length;
2636 		if (isoc_data_frame_count > 0) {
2637 			int result, urb_frame_count;
2638 
2639 			/* submit a read URB for the next frame with data. */
2640 			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2641 				 xfer, seg);
2642 			/* advance index to start of next read URB. */
2643 			seg->isoc_frame_index += urb_frame_count;
2644 			++(wa->active_buf_in_urbs);
2645 			result = usb_submit_urb(urb, GFP_ATOMIC);
2646 			if (result < 0) {
2647 				--(wa->active_buf_in_urbs);
2648 				dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2649 					result);
2650 				wa_reset_all(wa);
2651 			}
2652 			/*
2653 			 * If we are in this callback and
2654 			 * isoc_data_frame_count > 0, it means that the dti_urb
2655 			 * submission was delayed in wa_dti_cb.  Once
2656 			 * we submit the last buf_in_urb, we can submit the
2657 			 * delayed dti_urb.
2658 			 */
2659 			  resubmit_dti = (isoc_data_frame_count ==
2660 							urb_frame_count);
2661 		} else if (active_buf_in_urbs == 0) {
2662 			dev_dbg(dev,
2663 				"xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2664 				xfer, wa_xfer_id(xfer), seg->index,
2665 				seg->result);
2666 			rpipe_ready = rpipe_avail_inc(rpipe);
2667 			done = __wa_xfer_mark_seg_as_done(xfer, seg,
2668 					WA_SEG_DONE);
2669 		}
2670 		spin_unlock_irqrestore(&xfer->lock, flags);
2671 		if (done)
2672 			wa_xfer_completion(xfer);
2673 		if (rpipe_ready)
2674 			wa_xfer_delayed_run(rpipe);
2675 		break;
2676 	case -ECONNRESET:	/* URB unlinked; no need to do anything */
2677 	case -ENOENT:		/* as it was done by the who unlinked us */
2678 		break;
2679 	default:		/* Other errors ... */
2680 		/*
2681 		 * Error on data buf read.  Only resubmit DTI if it hasn't
2682 		 * already been done by previously hitting this error or by a
2683 		 * successful completion of the previous buf_in_urb.
2684 		 */
2685 		resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2686 		spin_lock_irqsave(&xfer->lock, flags);
2687 		if (printk_ratelimit())
2688 			dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2689 				xfer, wa_xfer_id(xfer), seg->index,
2690 				urb->status);
2691 		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2692 			    EDC_ERROR_TIMEFRAME)){
2693 			dev_err(dev, "DTO: URB max acceptable errors "
2694 				"exceeded, resetting device\n");
2695 			wa_reset_all(wa);
2696 		}
2697 		seg->result = urb->status;
2698 		rpipe_ready = rpipe_avail_inc(rpipe);
2699 		if (active_buf_in_urbs == 0)
2700 			done = __wa_xfer_mark_seg_as_done(xfer, seg,
2701 				WA_SEG_ERROR);
2702 		else
2703 			__wa_xfer_abort(xfer);
2704 		spin_unlock_irqrestore(&xfer->lock, flags);
2705 		if (done)
2706 			wa_xfer_completion(xfer);
2707 		if (rpipe_ready)
2708 			wa_xfer_delayed_run(rpipe);
2709 	}
2710 
2711 	if (resubmit_dti) {
2712 		int result;
2713 
2714 		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2715 
2716 		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2717 		if (result < 0) {
2718 			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2719 				result);
2720 			wa_reset_all(wa);
2721 		}
2722 	}
2723 }
2724 
2725 /*
2726  * Handle an incoming transfer result buffer
2727  *
2728  * Given a transfer result buffer, it completes the transfer (possibly
2729  * scheduling and buffer in read) and then resubmits the DTI URB for a
2730  * new transfer result read.
2731  *
2732  *
2733  * The xfer_result DTI URB state machine
2734  *
2735  * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2736  *
2737  * We start in OFF mode, the first xfer_result notification [through
2738  * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2739  * read.
2740  *
2741  * We receive a buffer -- if it is not a xfer_result, we complain and
2742  * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2743  * request accounting. If it is an IN segment, we move to RBI and post
2744  * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2745  * repost the DTI-URB and move to RXR state. if there was no IN
2746  * segment, it will repost the DTI-URB.
2747  *
2748  * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2749  * errors) in the URBs.
2750  */
wa_dti_cb(struct urb * urb)2751 static void wa_dti_cb(struct urb *urb)
2752 {
2753 	int result, dti_busy = 0;
2754 	struct wahc *wa = urb->context;
2755 	struct device *dev = &wa->usb_iface->dev;
2756 	u32 xfer_id;
2757 	u8 usb_status;
2758 
2759 	BUG_ON(wa->dti_urb != urb);
2760 	switch (wa->dti_urb->status) {
2761 	case 0:
2762 		if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2763 			struct wa_xfer_result *xfer_result;
2764 			struct wa_xfer *xfer;
2765 
2766 			/* We have a xfer result buffer; check it */
2767 			dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2768 				urb->actual_length, urb->transfer_buffer);
2769 			if (urb->actual_length != sizeof(*xfer_result)) {
2770 				dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2771 					urb->actual_length,
2772 					sizeof(*xfer_result));
2773 				break;
2774 			}
2775 			xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2776 			if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2777 				dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2778 					xfer_result->hdr.bLength);
2779 				break;
2780 			}
2781 			if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2782 				dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2783 					xfer_result->hdr.bNotifyType);
2784 				break;
2785 			}
2786 			xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2787 			usb_status = xfer_result->bTransferStatus & 0x3f;
2788 			if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2789 				/* taken care of already */
2790 				dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2791 					__func__, xfer_id,
2792 					xfer_result->bTransferSegment & 0x7f);
2793 				break;
2794 			}
2795 			xfer = wa_xfer_get_by_id(wa, xfer_id);
2796 			if (xfer == NULL) {
2797 				/* FIXME: transaction not found. */
2798 				dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2799 					xfer_id, usb_status);
2800 				break;
2801 			}
2802 			wa_xfer_result_chew(wa, xfer, xfer_result);
2803 			wa_xfer_put(xfer);
2804 		} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2805 			dti_busy = wa_process_iso_packet_status(wa, urb);
2806 		} else {
2807 			dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2808 				wa->dti_state);
2809 		}
2810 		break;
2811 	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
2812 	case -ESHUTDOWN:	/* going away! */
2813 		dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2814 		goto out;
2815 	default:
2816 		/* Unknown error */
2817 		if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2818 			    EDC_ERROR_TIMEFRAME)) {
2819 			dev_err(dev, "DTI: URB max acceptable errors "
2820 				"exceeded, resetting device\n");
2821 			wa_reset_all(wa);
2822 			goto out;
2823 		}
2824 		if (printk_ratelimit())
2825 			dev_err(dev, "DTI: URB error %d\n", urb->status);
2826 		break;
2827 	}
2828 
2829 	/* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2830 	if (!dti_busy) {
2831 		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2832 		if (result < 0) {
2833 			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2834 				result);
2835 			wa_reset_all(wa);
2836 		}
2837 	}
2838 out:
2839 	return;
2840 }
2841 
2842 /*
2843  * Initialize the DTI URB for reading transfer result notifications and also
2844  * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2845  */
wa_dti_start(struct wahc * wa)2846 int wa_dti_start(struct wahc *wa)
2847 {
2848 	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2849 	struct device *dev = &wa->usb_iface->dev;
2850 	int result = -ENOMEM, index;
2851 
2852 	if (wa->dti_urb != NULL)	/* DTI URB already started */
2853 		goto out;
2854 
2855 	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2856 	if (wa->dti_urb == NULL)
2857 		goto error_dti_urb_alloc;
2858 	usb_fill_bulk_urb(
2859 		wa->dti_urb, wa->usb_dev,
2860 		usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2861 		wa->dti_buf, wa->dti_buf_size,
2862 		wa_dti_cb, wa);
2863 
2864 	/* init the buf in URBs */
2865 	for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2866 		usb_fill_bulk_urb(
2867 			&(wa->buf_in_urbs[index]), wa->usb_dev,
2868 			usb_rcvbulkpipe(wa->usb_dev,
2869 				0x80 | dti_epd->bEndpointAddress),
2870 			NULL, 0, wa_buf_in_cb, wa);
2871 	}
2872 	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2873 	if (result < 0) {
2874 		dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2875 			result);
2876 		goto error_dti_urb_submit;
2877 	}
2878 out:
2879 	return 0;
2880 
2881 error_dti_urb_submit:
2882 	usb_put_urb(wa->dti_urb);
2883 	wa->dti_urb = NULL;
2884 error_dti_urb_alloc:
2885 	return result;
2886 }
2887 EXPORT_SYMBOL_GPL(wa_dti_start);
2888 /*
2889  * Transfer complete notification
2890  *
2891  * Called from the notif.c code. We get a notification on EP2 saying
2892  * that some endpoint has some transfer result data available. We are
2893  * about to read it.
2894  *
2895  * To speed up things, we always have a URB reading the DTI URB; we
2896  * don't really set it up and start it until the first xfer complete
2897  * notification arrives, which is what we do here.
2898  *
2899  * Follow up in wa_dti_cb(), as that's where the whole state
2900  * machine starts.
2901  *
2902  * @wa shall be referenced
2903  */
wa_handle_notif_xfer(struct wahc * wa,struct wa_notif_hdr * notif_hdr)2904 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2905 {
2906 	struct device *dev = &wa->usb_iface->dev;
2907 	struct wa_notif_xfer *notif_xfer;
2908 	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2909 
2910 	notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2911 	BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2912 
2913 	if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2914 		/* FIXME: hardcoded limitation, adapt */
2915 		dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2916 			notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2917 		goto error;
2918 	}
2919 
2920 	/* attempt to start the DTI ep processing. */
2921 	if (wa_dti_start(wa) < 0)
2922 		goto error;
2923 
2924 	return;
2925 
2926 error:
2927 	wa_reset_all(wa);
2928 }
2929