1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
4  *
5  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
6  * Author: Thomas Dahlmann
7  */
8 
9 /*
10  * This file does the core driver implementation for the UDC that is based
11  * on Synopsys device controller IP (different than HS OTG IP) that is either
12  * connected through PCI bus or integrated to SoC platforms.
13  */
14 
15 /* Driver strings */
16 #define UDC_MOD_DESCRIPTION		"Synopsys USB Device Controller"
17 #define UDC_DRIVER_VERSION_STRING	"01.00.0206"
18 
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/kernel.h>
22 #include <linux/delay.h>
23 #include <linux/ioport.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/timer.h>
28 #include <linux/list.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioctl.h>
31 #include <linux/fs.h>
32 #include <linux/dmapool.h>
33 #include <linux/prefetch.h>
34 #include <linux/moduleparam.h>
35 #include <asm/byteorder.h>
36 #include <asm/unaligned.h>
37 #include "amd5536udc.h"
38 
39 static void udc_tasklet_disconnect(unsigned long);
40 static void udc_setup_endpoints(struct udc *dev);
41 static void udc_soft_reset(struct udc *dev);
42 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
43 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
44 
45 /* description */
46 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
47 static const char name[] = "udc";
48 
49 /* structure to hold endpoint function pointers */
50 static const struct usb_ep_ops udc_ep_ops;
51 
52 /* received setup data */
53 static union udc_setup_data setup_data;
54 
55 /* pointer to device object */
56 static struct udc *udc;
57 
58 /* irq spin lock for soft reset */
59 static DEFINE_SPINLOCK(udc_irq_spinlock);
60 /* stall spin lock */
61 static DEFINE_SPINLOCK(udc_stall_spinlock);
62 
63 /*
64 * slave mode: pending bytes in rx fifo after nyet,
65 * used if EPIN irq came but no req was available
66 */
67 static unsigned int udc_rxfifo_pending;
68 
69 /* count soft resets after suspend to avoid loop */
70 static int soft_reset_occured;
71 static int soft_reset_after_usbreset_occured;
72 
73 /* timer */
74 static struct timer_list udc_timer;
75 static int stop_timer;
76 
77 /* set_rde -- Is used to control enabling of RX DMA. Problem is
78  * that UDC has only one bit (RDE) to enable/disable RX DMA for
79  * all OUT endpoints. So we have to handle race conditions like
80  * when OUT data reaches the fifo but no request was queued yet.
81  * This cannot be solved by letting the RX DMA disabled until a
82  * request gets queued because there may be other OUT packets
83  * in the FIFO (important for not blocking control traffic).
84  * The value of set_rde controls the correspondig timer.
85  *
86  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
87  * set_rde  0 == do not touch RDE, do no start the RDE timer
88  * set_rde  1 == timer function will look whether FIFO has data
89  * set_rde  2 == set by timer function to enable RX DMA on next call
90  */
91 static int set_rde = -1;
92 
93 static DECLARE_COMPLETION(on_exit);
94 static struct timer_list udc_pollstall_timer;
95 static int stop_pollstall_timer;
96 static DECLARE_COMPLETION(on_pollstall_exit);
97 
98 /* tasklet for usb disconnect */
99 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
100 		(unsigned long) &udc);
101 
102 
103 /* endpoint names used for print */
104 static const char ep0_string[] = "ep0in";
105 static const struct {
106 	const char *name;
107 	const struct usb_ep_caps caps;
108 } ep_info[] = {
109 #define EP_INFO(_name, _caps) \
110 	{ \
111 		.name = _name, \
112 		.caps = _caps, \
113 	}
114 
115 	EP_INFO(ep0_string,
116 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
117 	EP_INFO("ep1in-int",
118 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
119 	EP_INFO("ep2in-bulk",
120 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
121 	EP_INFO("ep3in-bulk",
122 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
123 	EP_INFO("ep4in-bulk",
124 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
125 	EP_INFO("ep5in-bulk",
126 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
127 	EP_INFO("ep6in-bulk",
128 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
129 	EP_INFO("ep7in-bulk",
130 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
131 	EP_INFO("ep8in-bulk",
132 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
133 	EP_INFO("ep9in-bulk",
134 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
135 	EP_INFO("ep10in-bulk",
136 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
137 	EP_INFO("ep11in-bulk",
138 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
139 	EP_INFO("ep12in-bulk",
140 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
141 	EP_INFO("ep13in-bulk",
142 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
143 	EP_INFO("ep14in-bulk",
144 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
145 	EP_INFO("ep15in-bulk",
146 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
147 	EP_INFO("ep0out",
148 		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
149 	EP_INFO("ep1out-bulk",
150 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
151 	EP_INFO("ep2out-bulk",
152 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
153 	EP_INFO("ep3out-bulk",
154 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
155 	EP_INFO("ep4out-bulk",
156 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
157 	EP_INFO("ep5out-bulk",
158 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
159 	EP_INFO("ep6out-bulk",
160 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
161 	EP_INFO("ep7out-bulk",
162 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
163 	EP_INFO("ep8out-bulk",
164 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
165 	EP_INFO("ep9out-bulk",
166 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
167 	EP_INFO("ep10out-bulk",
168 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
169 	EP_INFO("ep11out-bulk",
170 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
171 	EP_INFO("ep12out-bulk",
172 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
173 	EP_INFO("ep13out-bulk",
174 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
175 	EP_INFO("ep14out-bulk",
176 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
177 	EP_INFO("ep15out-bulk",
178 		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
179 
180 #undef EP_INFO
181 };
182 
183 /* buffer fill mode */
184 static int use_dma_bufferfill_mode;
185 /* tx buffer size for high speed */
186 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
187 
188 /*---------------------------------------------------------------------------*/
189 /* Prints UDC device registers and endpoint irq registers */
print_regs(struct udc * dev)190 static void print_regs(struct udc *dev)
191 {
192 	DBG(dev, "------- Device registers -------\n");
193 	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
194 	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
195 	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
196 	DBG(dev, "\n");
197 	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
198 	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
199 	DBG(dev, "\n");
200 	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
201 	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
202 	DBG(dev, "\n");
203 	DBG(dev, "USE DMA        = %d\n", use_dma);
204 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
205 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
206 			"WITHOUT desc. update)\n");
207 		dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
208 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
209 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
210 			"WITH desc. update)\n");
211 		dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
212 	}
213 	if (use_dma && use_dma_bufferfill_mode) {
214 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
215 		dev_info(dev->dev, "DMA mode (%s)\n", "BF");
216 	}
217 	if (!use_dma)
218 		dev_info(dev->dev, "FIFO mode\n");
219 	DBG(dev, "-------------------------------------------------------\n");
220 }
221 
222 /* Masks unused interrupts */
udc_mask_unused_interrupts(struct udc * dev)223 int udc_mask_unused_interrupts(struct udc *dev)
224 {
225 	u32 tmp;
226 
227 	/* mask all dev interrupts */
228 	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
229 		AMD_BIT(UDC_DEVINT_ENUM) |
230 		AMD_BIT(UDC_DEVINT_US) |
231 		AMD_BIT(UDC_DEVINT_UR) |
232 		AMD_BIT(UDC_DEVINT_ES) |
233 		AMD_BIT(UDC_DEVINT_SI) |
234 		AMD_BIT(UDC_DEVINT_SOF)|
235 		AMD_BIT(UDC_DEVINT_SC);
236 	writel(tmp, &dev->regs->irqmsk);
237 
238 	/* mask all ep interrupts */
239 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
240 
241 	return 0;
242 }
243 EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
244 
245 /* Enables endpoint 0 interrupts */
udc_enable_ep0_interrupts(struct udc * dev)246 static int udc_enable_ep0_interrupts(struct udc *dev)
247 {
248 	u32 tmp;
249 
250 	DBG(dev, "udc_enable_ep0_interrupts()\n");
251 
252 	/* read irq mask */
253 	tmp = readl(&dev->regs->ep_irqmsk);
254 	/* enable ep0 irq's */
255 	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
256 		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
257 	writel(tmp, &dev->regs->ep_irqmsk);
258 
259 	return 0;
260 }
261 
262 /* Enables device interrupts for SET_INTF and SET_CONFIG */
udc_enable_dev_setup_interrupts(struct udc * dev)263 int udc_enable_dev_setup_interrupts(struct udc *dev)
264 {
265 	u32 tmp;
266 
267 	DBG(dev, "enable device interrupts for setup data\n");
268 
269 	/* read irq mask */
270 	tmp = readl(&dev->regs->irqmsk);
271 
272 	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
273 	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
274 		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
275 		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
276 		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
277 		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
278 	writel(tmp, &dev->regs->irqmsk);
279 
280 	return 0;
281 }
282 EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
283 
284 /* Calculates fifo start of endpoint based on preceding endpoints */
udc_set_txfifo_addr(struct udc_ep * ep)285 static int udc_set_txfifo_addr(struct udc_ep *ep)
286 {
287 	struct udc	*dev;
288 	u32 tmp;
289 	int i;
290 
291 	if (!ep || !(ep->in))
292 		return -EINVAL;
293 
294 	dev = ep->dev;
295 	ep->txfifo = dev->txfifo;
296 
297 	/* traverse ep's */
298 	for (i = 0; i < ep->num; i++) {
299 		if (dev->ep[i].regs) {
300 			/* read fifo size */
301 			tmp = readl(&dev->ep[i].regs->bufin_framenum);
302 			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
303 			ep->txfifo += tmp;
304 		}
305 	}
306 	return 0;
307 }
308 
309 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
310 static u32 cnak_pending;
311 
UDC_QUEUE_CNAK(struct udc_ep * ep,unsigned num)312 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
313 {
314 	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
315 		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
316 		cnak_pending |= 1 << (num);
317 		ep->naking = 1;
318 	} else
319 		cnak_pending = cnak_pending & (~(1 << (num)));
320 }
321 
322 
323 /* Enables endpoint, is called by gadget driver */
324 static int
udc_ep_enable(struct usb_ep * usbep,const struct usb_endpoint_descriptor * desc)325 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
326 {
327 	struct udc_ep		*ep;
328 	struct udc		*dev;
329 	u32			tmp;
330 	unsigned long		iflags;
331 	u8 udc_csr_epix;
332 	unsigned		maxpacket;
333 
334 	if (!usbep
335 			|| usbep->name == ep0_string
336 			|| !desc
337 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
338 		return -EINVAL;
339 
340 	ep = container_of(usbep, struct udc_ep, ep);
341 	dev = ep->dev;
342 
343 	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
344 
345 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
346 		return -ESHUTDOWN;
347 
348 	spin_lock_irqsave(&dev->lock, iflags);
349 	ep->ep.desc = desc;
350 
351 	ep->halted = 0;
352 
353 	/* set traffic type */
354 	tmp = readl(&dev->ep[ep->num].regs->ctl);
355 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
356 	writel(tmp, &dev->ep[ep->num].regs->ctl);
357 
358 	/* set max packet size */
359 	maxpacket = usb_endpoint_maxp(desc);
360 	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
361 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
362 	ep->ep.maxpacket = maxpacket;
363 	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
364 
365 	/* IN ep */
366 	if (ep->in) {
367 
368 		/* ep ix in UDC CSR register space */
369 		udc_csr_epix = ep->num;
370 
371 		/* set buffer size (tx fifo entries) */
372 		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
373 		/* double buffering: fifo size = 2 x max packet size */
374 		tmp = AMD_ADDBITS(
375 				tmp,
376 				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
377 					  / UDC_DWORD_BYTES,
378 				UDC_EPIN_BUFF_SIZE);
379 		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
380 
381 		/* calc. tx fifo base addr */
382 		udc_set_txfifo_addr(ep);
383 
384 		/* flush fifo */
385 		tmp = readl(&ep->regs->ctl);
386 		tmp |= AMD_BIT(UDC_EPCTL_F);
387 		writel(tmp, &ep->regs->ctl);
388 
389 	/* OUT ep */
390 	} else {
391 		/* ep ix in UDC CSR register space */
392 		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
393 
394 		/* set max packet size UDC CSR	*/
395 		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
396 		tmp = AMD_ADDBITS(tmp, maxpacket,
397 					UDC_CSR_NE_MAX_PKT);
398 		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
399 
400 		if (use_dma && !ep->in) {
401 			/* alloc and init BNA dummy request */
402 			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
403 			ep->bna_occurred = 0;
404 		}
405 
406 		if (ep->num != UDC_EP0OUT_IX)
407 			dev->data_ep_enabled = 1;
408 	}
409 
410 	/* set ep values */
411 	tmp = readl(&dev->csr->ne[udc_csr_epix]);
412 	/* max packet */
413 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
414 	/* ep number */
415 	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
416 	/* ep direction */
417 	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
418 	/* ep type */
419 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
420 	/* ep config */
421 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
422 	/* ep interface */
423 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
424 	/* ep alt */
425 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
426 	/* write reg */
427 	writel(tmp, &dev->csr->ne[udc_csr_epix]);
428 
429 	/* enable ep irq */
430 	tmp = readl(&dev->regs->ep_irqmsk);
431 	tmp &= AMD_UNMASK_BIT(ep->num);
432 	writel(tmp, &dev->regs->ep_irqmsk);
433 
434 	/*
435 	 * clear NAK by writing CNAK
436 	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
437 	 */
438 	if (!use_dma || ep->in) {
439 		tmp = readl(&ep->regs->ctl);
440 		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
441 		writel(tmp, &ep->regs->ctl);
442 		ep->naking = 0;
443 		UDC_QUEUE_CNAK(ep, ep->num);
444 	}
445 	tmp = desc->bEndpointAddress;
446 	DBG(dev, "%s enabled\n", usbep->name);
447 
448 	spin_unlock_irqrestore(&dev->lock, iflags);
449 	return 0;
450 }
451 
452 /* Resets endpoint */
ep_init(struct udc_regs __iomem * regs,struct udc_ep * ep)453 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
454 {
455 	u32		tmp;
456 
457 	VDBG(ep->dev, "ep-%d reset\n", ep->num);
458 	ep->ep.desc = NULL;
459 	ep->ep.ops = &udc_ep_ops;
460 	INIT_LIST_HEAD(&ep->queue);
461 
462 	usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
463 	/* set NAK */
464 	tmp = readl(&ep->regs->ctl);
465 	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
466 	writel(tmp, &ep->regs->ctl);
467 	ep->naking = 1;
468 
469 	/* disable interrupt */
470 	tmp = readl(&regs->ep_irqmsk);
471 	tmp |= AMD_BIT(ep->num);
472 	writel(tmp, &regs->ep_irqmsk);
473 
474 	if (ep->in) {
475 		/* unset P and IN bit of potential former DMA */
476 		tmp = readl(&ep->regs->ctl);
477 		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
478 		writel(tmp, &ep->regs->ctl);
479 
480 		tmp = readl(&ep->regs->sts);
481 		tmp |= AMD_BIT(UDC_EPSTS_IN);
482 		writel(tmp, &ep->regs->sts);
483 
484 		/* flush the fifo */
485 		tmp = readl(&ep->regs->ctl);
486 		tmp |= AMD_BIT(UDC_EPCTL_F);
487 		writel(tmp, &ep->regs->ctl);
488 
489 	}
490 	/* reset desc pointer */
491 	writel(0, &ep->regs->desptr);
492 }
493 
494 /* Disables endpoint, is called by gadget driver */
udc_ep_disable(struct usb_ep * usbep)495 static int udc_ep_disable(struct usb_ep *usbep)
496 {
497 	struct udc_ep	*ep = NULL;
498 	unsigned long	iflags;
499 
500 	if (!usbep)
501 		return -EINVAL;
502 
503 	ep = container_of(usbep, struct udc_ep, ep);
504 	if (usbep->name == ep0_string || !ep->ep.desc)
505 		return -EINVAL;
506 
507 	DBG(ep->dev, "Disable ep-%d\n", ep->num);
508 
509 	spin_lock_irqsave(&ep->dev->lock, iflags);
510 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
511 	empty_req_queue(ep);
512 	ep_init(ep->dev->regs, ep);
513 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
514 
515 	return 0;
516 }
517 
518 /* Allocates request packet, called by gadget driver */
519 static struct usb_request *
udc_alloc_request(struct usb_ep * usbep,gfp_t gfp)520 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
521 {
522 	struct udc_request	*req;
523 	struct udc_data_dma	*dma_desc;
524 	struct udc_ep	*ep;
525 
526 	if (!usbep)
527 		return NULL;
528 
529 	ep = container_of(usbep, struct udc_ep, ep);
530 
531 	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
532 	req = kzalloc(sizeof(struct udc_request), gfp);
533 	if (!req)
534 		return NULL;
535 
536 	req->req.dma = DMA_DONT_USE;
537 	INIT_LIST_HEAD(&req->queue);
538 
539 	if (ep->dma) {
540 		/* ep0 in requests are allocated from data pool here */
541 		dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
542 						&req->td_phys);
543 		if (!dma_desc) {
544 			kfree(req);
545 			return NULL;
546 		}
547 
548 		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
549 				"td_phys = %lx\n",
550 				req, dma_desc,
551 				(unsigned long)req->td_phys);
552 		/* prevent from using desc. - set HOST BUSY */
553 		dma_desc->status = AMD_ADDBITS(dma_desc->status,
554 						UDC_DMA_STP_STS_BS_HOST_BUSY,
555 						UDC_DMA_STP_STS_BS);
556 		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
557 		req->td_data = dma_desc;
558 		req->td_data_last = NULL;
559 		req->chain_len = 1;
560 	}
561 
562 	return &req->req;
563 }
564 
565 /* frees pci pool descriptors of a DMA chain */
udc_free_dma_chain(struct udc * dev,struct udc_request * req)566 static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
567 {
568 	struct udc_data_dma *td = req->td_data;
569 	unsigned int i;
570 
571 	dma_addr_t addr_next = 0x00;
572 	dma_addr_t addr = (dma_addr_t)td->next;
573 
574 	DBG(dev, "free chain req = %p\n", req);
575 
576 	/* do not free first desc., will be done by free for request */
577 	for (i = 1; i < req->chain_len; i++) {
578 		td = phys_to_virt(addr);
579 		addr_next = (dma_addr_t)td->next;
580 		dma_pool_free(dev->data_requests, td, addr);
581 		addr = addr_next;
582 	}
583 }
584 
585 /* Frees request packet, called by gadget driver */
586 static void
udc_free_request(struct usb_ep * usbep,struct usb_request * usbreq)587 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
588 {
589 	struct udc_ep	*ep;
590 	struct udc_request	*req;
591 
592 	if (!usbep || !usbreq)
593 		return;
594 
595 	ep = container_of(usbep, struct udc_ep, ep);
596 	req = container_of(usbreq, struct udc_request, req);
597 	VDBG(ep->dev, "free_req req=%p\n", req);
598 	BUG_ON(!list_empty(&req->queue));
599 	if (req->td_data) {
600 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
601 
602 		/* free dma chain if created */
603 		if (req->chain_len > 1)
604 			udc_free_dma_chain(ep->dev, req);
605 
606 		dma_pool_free(ep->dev->data_requests, req->td_data,
607 							req->td_phys);
608 	}
609 	kfree(req);
610 }
611 
612 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
udc_init_bna_dummy(struct udc_request * req)613 static void udc_init_bna_dummy(struct udc_request *req)
614 {
615 	if (req) {
616 		/* set last bit */
617 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
618 		/* set next pointer to itself */
619 		req->td_data->next = req->td_phys;
620 		/* set HOST BUSY */
621 		req->td_data->status
622 			= AMD_ADDBITS(req->td_data->status,
623 					UDC_DMA_STP_STS_BS_DMA_DONE,
624 					UDC_DMA_STP_STS_BS);
625 #ifdef UDC_VERBOSE
626 		pr_debug("bna desc = %p, sts = %08x\n",
627 			req->td_data, req->td_data->status);
628 #endif
629 	}
630 }
631 
632 /* Allocate BNA dummy descriptor */
udc_alloc_bna_dummy(struct udc_ep * ep)633 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
634 {
635 	struct udc_request *req = NULL;
636 	struct usb_request *_req = NULL;
637 
638 	/* alloc the dummy request */
639 	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
640 	if (_req) {
641 		req = container_of(_req, struct udc_request, req);
642 		ep->bna_dummy_req = req;
643 		udc_init_bna_dummy(req);
644 	}
645 	return req;
646 }
647 
648 /* Write data to TX fifo for IN packets */
649 static void
udc_txfifo_write(struct udc_ep * ep,struct usb_request * req)650 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
651 {
652 	u8			*req_buf;
653 	u32			*buf;
654 	int			i, j;
655 	unsigned		bytes = 0;
656 	unsigned		remaining = 0;
657 
658 	if (!req || !ep)
659 		return;
660 
661 	req_buf = req->buf + req->actual;
662 	prefetch(req_buf);
663 	remaining = req->length - req->actual;
664 
665 	buf = (u32 *) req_buf;
666 
667 	bytes = ep->ep.maxpacket;
668 	if (bytes > remaining)
669 		bytes = remaining;
670 
671 	/* dwords first */
672 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
673 		writel(*(buf + i), ep->txfifo);
674 
675 	/* remaining bytes must be written by byte access */
676 	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
677 		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
678 							ep->txfifo);
679 	}
680 
681 	/* dummy write confirm */
682 	writel(0, &ep->regs->confirm);
683 }
684 
685 /* Read dwords from RX fifo for OUT transfers */
udc_rxfifo_read_dwords(struct udc * dev,u32 * buf,int dwords)686 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
687 {
688 	int i;
689 
690 	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
691 
692 	for (i = 0; i < dwords; i++)
693 		*(buf + i) = readl(dev->rxfifo);
694 	return 0;
695 }
696 
697 /* Read bytes from RX fifo for OUT transfers */
udc_rxfifo_read_bytes(struct udc * dev,u8 * buf,int bytes)698 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
699 {
700 	int i, j;
701 	u32 tmp;
702 
703 	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
704 
705 	/* dwords first */
706 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
707 		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
708 
709 	/* remaining bytes must be read by byte access */
710 	if (bytes % UDC_DWORD_BYTES) {
711 		tmp = readl(dev->rxfifo);
712 		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
713 			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
714 			tmp = tmp >> UDC_BITS_PER_BYTE;
715 		}
716 	}
717 
718 	return 0;
719 }
720 
721 /* Read data from RX fifo for OUT transfers */
722 static int
udc_rxfifo_read(struct udc_ep * ep,struct udc_request * req)723 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
724 {
725 	u8 *buf;
726 	unsigned buf_space;
727 	unsigned bytes = 0;
728 	unsigned finished = 0;
729 
730 	/* received number bytes */
731 	bytes = readl(&ep->regs->sts);
732 	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
733 
734 	buf_space = req->req.length - req->req.actual;
735 	buf = req->req.buf + req->req.actual;
736 	if (bytes > buf_space) {
737 		if ((buf_space % ep->ep.maxpacket) != 0) {
738 			DBG(ep->dev,
739 				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
740 				ep->ep.name, bytes, buf_space);
741 			req->req.status = -EOVERFLOW;
742 		}
743 		bytes = buf_space;
744 	}
745 	req->req.actual += bytes;
746 
747 	/* last packet ? */
748 	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
749 		|| ((req->req.actual == req->req.length) && !req->req.zero))
750 		finished = 1;
751 
752 	/* read rx fifo bytes */
753 	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
754 	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
755 
756 	return finished;
757 }
758 
759 /* Creates or re-inits a DMA chain */
udc_create_dma_chain(struct udc_ep * ep,struct udc_request * req,unsigned long buf_len,gfp_t gfp_flags)760 static int udc_create_dma_chain(
761 	struct udc_ep *ep,
762 	struct udc_request *req,
763 	unsigned long buf_len, gfp_t gfp_flags
764 )
765 {
766 	unsigned long bytes = req->req.length;
767 	unsigned int i;
768 	dma_addr_t dma_addr;
769 	struct udc_data_dma	*td = NULL;
770 	struct udc_data_dma	*last = NULL;
771 	unsigned long txbytes;
772 	unsigned create_new_chain = 0;
773 	unsigned len;
774 
775 	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
776 	     bytes, buf_len);
777 	dma_addr = DMA_DONT_USE;
778 
779 	/* unset L bit in first desc for OUT */
780 	if (!ep->in)
781 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
782 
783 	/* alloc only new desc's if not already available */
784 	len = req->req.length / ep->ep.maxpacket;
785 	if (req->req.length % ep->ep.maxpacket)
786 		len++;
787 
788 	if (len > req->chain_len) {
789 		/* shorter chain already allocated before */
790 		if (req->chain_len > 1)
791 			udc_free_dma_chain(ep->dev, req);
792 		req->chain_len = len;
793 		create_new_chain = 1;
794 	}
795 
796 	td = req->td_data;
797 	/* gen. required number of descriptors and buffers */
798 	for (i = buf_len; i < bytes; i += buf_len) {
799 		/* create or determine next desc. */
800 		if (create_new_chain) {
801 			td = dma_pool_alloc(ep->dev->data_requests,
802 					    gfp_flags, &dma_addr);
803 			if (!td)
804 				return -ENOMEM;
805 
806 			td->status = 0;
807 		} else if (i == buf_len) {
808 			/* first td */
809 			td = (struct udc_data_dma *)phys_to_virt(
810 						req->td_data->next);
811 			td->status = 0;
812 		} else {
813 			td = (struct udc_data_dma *)phys_to_virt(last->next);
814 			td->status = 0;
815 		}
816 
817 		if (td)
818 			td->bufptr = req->req.dma + i; /* assign buffer */
819 		else
820 			break;
821 
822 		/* short packet ? */
823 		if ((bytes - i) >= buf_len) {
824 			txbytes = buf_len;
825 		} else {
826 			/* short packet */
827 			txbytes = bytes - i;
828 		}
829 
830 		/* link td and assign tx bytes */
831 		if (i == buf_len) {
832 			if (create_new_chain)
833 				req->td_data->next = dma_addr;
834 			/*
835 			 * else
836 			 *	req->td_data->next = virt_to_phys(td);
837 			 */
838 			/* write tx bytes */
839 			if (ep->in) {
840 				/* first desc */
841 				req->td_data->status =
842 					AMD_ADDBITS(req->td_data->status,
843 						    ep->ep.maxpacket,
844 						    UDC_DMA_IN_STS_TXBYTES);
845 				/* second desc */
846 				td->status = AMD_ADDBITS(td->status,
847 							txbytes,
848 							UDC_DMA_IN_STS_TXBYTES);
849 			}
850 		} else {
851 			if (create_new_chain)
852 				last->next = dma_addr;
853 			/*
854 			 * else
855 			 *	last->next = virt_to_phys(td);
856 			 */
857 			if (ep->in) {
858 				/* write tx bytes */
859 				td->status = AMD_ADDBITS(td->status,
860 							txbytes,
861 							UDC_DMA_IN_STS_TXBYTES);
862 			}
863 		}
864 		last = td;
865 	}
866 	/* set last bit */
867 	if (td) {
868 		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
869 		/* last desc. points to itself */
870 		req->td_data_last = td;
871 	}
872 
873 	return 0;
874 }
875 
876 /* create/re-init a DMA descriptor or a DMA descriptor chain */
prep_dma(struct udc_ep * ep,struct udc_request * req,gfp_t gfp)877 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
878 {
879 	int	retval = 0;
880 	u32	tmp;
881 
882 	VDBG(ep->dev, "prep_dma\n");
883 	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
884 			ep->num, req->td_data);
885 
886 	/* set buffer pointer */
887 	req->td_data->bufptr = req->req.dma;
888 
889 	/* set last bit */
890 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
891 
892 	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
893 	if (use_dma_ppb) {
894 
895 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
896 		if (retval != 0) {
897 			if (retval == -ENOMEM)
898 				DBG(ep->dev, "Out of DMA memory\n");
899 			return retval;
900 		}
901 		if (ep->in) {
902 			if (req->req.length == ep->ep.maxpacket) {
903 				/* write tx bytes */
904 				req->td_data->status =
905 					AMD_ADDBITS(req->td_data->status,
906 						ep->ep.maxpacket,
907 						UDC_DMA_IN_STS_TXBYTES);
908 
909 			}
910 		}
911 
912 	}
913 
914 	if (ep->in) {
915 		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
916 				"maxpacket=%d ep%d\n",
917 				use_dma_ppb, req->req.length,
918 				ep->ep.maxpacket, ep->num);
919 		/*
920 		 * if bytes < max packet then tx bytes must
921 		 * be written in packet per buffer mode
922 		 */
923 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
924 				|| ep->num == UDC_EP0OUT_IX
925 				|| ep->num == UDC_EP0IN_IX) {
926 			/* write tx bytes */
927 			req->td_data->status =
928 				AMD_ADDBITS(req->td_data->status,
929 						req->req.length,
930 						UDC_DMA_IN_STS_TXBYTES);
931 			/* reset frame num */
932 			req->td_data->status =
933 				AMD_ADDBITS(req->td_data->status,
934 						0,
935 						UDC_DMA_IN_STS_FRAMENUM);
936 		}
937 		/* set HOST BUSY */
938 		req->td_data->status =
939 			AMD_ADDBITS(req->td_data->status,
940 				UDC_DMA_STP_STS_BS_HOST_BUSY,
941 				UDC_DMA_STP_STS_BS);
942 	} else {
943 		VDBG(ep->dev, "OUT set host ready\n");
944 		/* set HOST READY */
945 		req->td_data->status =
946 			AMD_ADDBITS(req->td_data->status,
947 				UDC_DMA_STP_STS_BS_HOST_READY,
948 				UDC_DMA_STP_STS_BS);
949 
950 		/* clear NAK by writing CNAK */
951 		if (ep->naking) {
952 			tmp = readl(&ep->regs->ctl);
953 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
954 			writel(tmp, &ep->regs->ctl);
955 			ep->naking = 0;
956 			UDC_QUEUE_CNAK(ep, ep->num);
957 		}
958 
959 	}
960 
961 	return retval;
962 }
963 
964 /* Completes request packet ... caller MUST hold lock */
965 static void
complete_req(struct udc_ep * ep,struct udc_request * req,int sts)966 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
967 __releases(ep->dev->lock)
968 __acquires(ep->dev->lock)
969 {
970 	struct udc		*dev;
971 	unsigned		halted;
972 
973 	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
974 
975 	dev = ep->dev;
976 	/* unmap DMA */
977 	if (ep->dma)
978 		usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
979 
980 	halted = ep->halted;
981 	ep->halted = 1;
982 
983 	/* set new status if pending */
984 	if (req->req.status == -EINPROGRESS)
985 		req->req.status = sts;
986 
987 	/* remove from ep queue */
988 	list_del_init(&req->queue);
989 
990 	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
991 		&req->req, req->req.length, ep->ep.name, sts);
992 
993 	spin_unlock(&dev->lock);
994 	usb_gadget_giveback_request(&ep->ep, &req->req);
995 	spin_lock(&dev->lock);
996 	ep->halted = halted;
997 }
998 
999 /* Iterates to the end of a DMA chain and returns last descriptor */
udc_get_last_dma_desc(struct udc_request * req)1000 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
1001 {
1002 	struct udc_data_dma	*td;
1003 
1004 	td = req->td_data;
1005 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
1006 		td = phys_to_virt(td->next);
1007 
1008 	return td;
1009 
1010 }
1011 
1012 /* Iterates to the end of a DMA chain and counts bytes received */
udc_get_ppbdu_rxbytes(struct udc_request * req)1013 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
1014 {
1015 	struct udc_data_dma	*td;
1016 	u32 count;
1017 
1018 	td = req->td_data;
1019 	/* received number bytes */
1020 	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
1021 
1022 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
1023 		td = phys_to_virt(td->next);
1024 		/* received number bytes */
1025 		if (td) {
1026 			count += AMD_GETBITS(td->status,
1027 				UDC_DMA_OUT_STS_RXBYTES);
1028 		}
1029 	}
1030 
1031 	return count;
1032 
1033 }
1034 
1035 /* Enabling RX DMA */
udc_set_rde(struct udc * dev)1036 static void udc_set_rde(struct udc *dev)
1037 {
1038 	u32 tmp;
1039 
1040 	VDBG(dev, "udc_set_rde()\n");
1041 	/* stop RDE timer */
1042 	if (timer_pending(&udc_timer)) {
1043 		set_rde = 0;
1044 		mod_timer(&udc_timer, jiffies - 1);
1045 	}
1046 	/* set RDE */
1047 	tmp = readl(&dev->regs->ctl);
1048 	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1049 	writel(tmp, &dev->regs->ctl);
1050 }
1051 
1052 /* Queues a request packet, called by gadget driver */
1053 static int
udc_queue(struct usb_ep * usbep,struct usb_request * usbreq,gfp_t gfp)1054 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1055 {
1056 	int			retval = 0;
1057 	u8			open_rxfifo = 0;
1058 	unsigned long		iflags;
1059 	struct udc_ep		*ep;
1060 	struct udc_request	*req;
1061 	struct udc		*dev;
1062 	u32			tmp;
1063 
1064 	/* check the inputs */
1065 	req = container_of(usbreq, struct udc_request, req);
1066 
1067 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1068 			|| !list_empty(&req->queue))
1069 		return -EINVAL;
1070 
1071 	ep = container_of(usbep, struct udc_ep, ep);
1072 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1073 		return -EINVAL;
1074 
1075 	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1076 	dev = ep->dev;
1077 
1078 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1079 		return -ESHUTDOWN;
1080 
1081 	/* map dma (usually done before) */
1082 	if (ep->dma) {
1083 		VDBG(dev, "DMA map req %p\n", req);
1084 		retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1085 		if (retval)
1086 			return retval;
1087 	}
1088 
1089 	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1090 			usbep->name, usbreq, usbreq->length,
1091 			req->td_data, usbreq->buf);
1092 
1093 	spin_lock_irqsave(&dev->lock, iflags);
1094 	usbreq->actual = 0;
1095 	usbreq->status = -EINPROGRESS;
1096 	req->dma_done = 0;
1097 
1098 	/* on empty queue just do first transfer */
1099 	if (list_empty(&ep->queue)) {
1100 		/* zlp */
1101 		if (usbreq->length == 0) {
1102 			/* IN zlp's are handled by hardware */
1103 			complete_req(ep, req, 0);
1104 			VDBG(dev, "%s: zlp\n", ep->ep.name);
1105 			/*
1106 			 * if set_config or set_intf is waiting for ack by zlp
1107 			 * then set CSR_DONE
1108 			 */
1109 			if (dev->set_cfg_not_acked) {
1110 				tmp = readl(&dev->regs->ctl);
1111 				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1112 				writel(tmp, &dev->regs->ctl);
1113 				dev->set_cfg_not_acked = 0;
1114 			}
1115 			/* setup command is ACK'ed now by zlp */
1116 			if (dev->waiting_zlp_ack_ep0in) {
1117 				/* clear NAK by writing CNAK in EP0_IN */
1118 				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1119 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1120 				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1121 				dev->ep[UDC_EP0IN_IX].naking = 0;
1122 				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1123 							UDC_EP0IN_IX);
1124 				dev->waiting_zlp_ack_ep0in = 0;
1125 			}
1126 			goto finished;
1127 		}
1128 		if (ep->dma) {
1129 			retval = prep_dma(ep, req, GFP_ATOMIC);
1130 			if (retval != 0)
1131 				goto finished;
1132 			/* write desc pointer to enable DMA */
1133 			if (ep->in) {
1134 				/* set HOST READY */
1135 				req->td_data->status =
1136 					AMD_ADDBITS(req->td_data->status,
1137 						UDC_DMA_IN_STS_BS_HOST_READY,
1138 						UDC_DMA_IN_STS_BS);
1139 			}
1140 
1141 			/* disabled rx dma while descriptor update */
1142 			if (!ep->in) {
1143 				/* stop RDE timer */
1144 				if (timer_pending(&udc_timer)) {
1145 					set_rde = 0;
1146 					mod_timer(&udc_timer, jiffies - 1);
1147 				}
1148 				/* clear RDE */
1149 				tmp = readl(&dev->regs->ctl);
1150 				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1151 				writel(tmp, &dev->regs->ctl);
1152 				open_rxfifo = 1;
1153 
1154 				/*
1155 				 * if BNA occurred then let BNA dummy desc.
1156 				 * point to current desc.
1157 				 */
1158 				if (ep->bna_occurred) {
1159 					VDBG(dev, "copy to BNA dummy desc.\n");
1160 					memcpy(ep->bna_dummy_req->td_data,
1161 						req->td_data,
1162 						sizeof(struct udc_data_dma));
1163 				}
1164 			}
1165 			/* write desc pointer */
1166 			writel(req->td_phys, &ep->regs->desptr);
1167 
1168 			/* clear NAK by writing CNAK */
1169 			if (ep->naking) {
1170 				tmp = readl(&ep->regs->ctl);
1171 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1172 				writel(tmp, &ep->regs->ctl);
1173 				ep->naking = 0;
1174 				UDC_QUEUE_CNAK(ep, ep->num);
1175 			}
1176 
1177 			if (ep->in) {
1178 				/* enable ep irq */
1179 				tmp = readl(&dev->regs->ep_irqmsk);
1180 				tmp &= AMD_UNMASK_BIT(ep->num);
1181 				writel(tmp, &dev->regs->ep_irqmsk);
1182 			}
1183 		} else if (ep->in) {
1184 				/* enable ep irq */
1185 				tmp = readl(&dev->regs->ep_irqmsk);
1186 				tmp &= AMD_UNMASK_BIT(ep->num);
1187 				writel(tmp, &dev->regs->ep_irqmsk);
1188 			}
1189 
1190 	} else if (ep->dma) {
1191 
1192 		/*
1193 		 * prep_dma not used for OUT ep's, this is not possible
1194 		 * for PPB modes, because of chain creation reasons
1195 		 */
1196 		if (ep->in) {
1197 			retval = prep_dma(ep, req, GFP_ATOMIC);
1198 			if (retval != 0)
1199 				goto finished;
1200 		}
1201 	}
1202 	VDBG(dev, "list_add\n");
1203 	/* add request to ep queue */
1204 	if (req) {
1205 
1206 		list_add_tail(&req->queue, &ep->queue);
1207 
1208 		/* open rxfifo if out data queued */
1209 		if (open_rxfifo) {
1210 			/* enable DMA */
1211 			req->dma_going = 1;
1212 			udc_set_rde(dev);
1213 			if (ep->num != UDC_EP0OUT_IX)
1214 				dev->data_ep_queued = 1;
1215 		}
1216 		/* stop OUT naking */
1217 		if (!ep->in) {
1218 			if (!use_dma && udc_rxfifo_pending) {
1219 				DBG(dev, "udc_queue(): pending bytes in "
1220 					"rxfifo after nyet\n");
1221 				/*
1222 				 * read pending bytes afer nyet:
1223 				 * referring to isr
1224 				 */
1225 				if (udc_rxfifo_read(ep, req)) {
1226 					/* finish */
1227 					complete_req(ep, req, 0);
1228 				}
1229 				udc_rxfifo_pending = 0;
1230 
1231 			}
1232 		}
1233 	}
1234 
1235 finished:
1236 	spin_unlock_irqrestore(&dev->lock, iflags);
1237 	return retval;
1238 }
1239 
1240 /* Empty request queue of an endpoint; caller holds spinlock */
empty_req_queue(struct udc_ep * ep)1241 void empty_req_queue(struct udc_ep *ep)
1242 {
1243 	struct udc_request	*req;
1244 
1245 	ep->halted = 1;
1246 	while (!list_empty(&ep->queue)) {
1247 		req = list_entry(ep->queue.next,
1248 			struct udc_request,
1249 			queue);
1250 		complete_req(ep, req, -ESHUTDOWN);
1251 	}
1252 }
1253 EXPORT_SYMBOL_GPL(empty_req_queue);
1254 
1255 /* Dequeues a request packet, called by gadget driver */
udc_dequeue(struct usb_ep * usbep,struct usb_request * usbreq)1256 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1257 {
1258 	struct udc_ep		*ep;
1259 	struct udc_request	*req;
1260 	unsigned		halted;
1261 	unsigned long		iflags;
1262 
1263 	ep = container_of(usbep, struct udc_ep, ep);
1264 	if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
1265 				&& ep->num != UDC_EP0OUT_IX)))
1266 		return -EINVAL;
1267 
1268 	req = container_of(usbreq, struct udc_request, req);
1269 
1270 	spin_lock_irqsave(&ep->dev->lock, iflags);
1271 	halted = ep->halted;
1272 	ep->halted = 1;
1273 	/* request in processing or next one */
1274 	if (ep->queue.next == &req->queue) {
1275 		if (ep->dma && req->dma_going) {
1276 			if (ep->in)
1277 				ep->cancel_transfer = 1;
1278 			else {
1279 				u32 tmp;
1280 				u32 dma_sts;
1281 				/* stop potential receive DMA */
1282 				tmp = readl(&udc->regs->ctl);
1283 				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1284 							&udc->regs->ctl);
1285 				/*
1286 				 * Cancel transfer later in ISR
1287 				 * if descriptor was touched.
1288 				 */
1289 				dma_sts = AMD_GETBITS(req->td_data->status,
1290 							UDC_DMA_OUT_STS_BS);
1291 				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1292 					ep->cancel_transfer = 1;
1293 				else {
1294 					udc_init_bna_dummy(ep->req);
1295 					writel(ep->bna_dummy_req->td_phys,
1296 						&ep->regs->desptr);
1297 				}
1298 				writel(tmp, &udc->regs->ctl);
1299 			}
1300 		}
1301 	}
1302 	complete_req(ep, req, -ECONNRESET);
1303 	ep->halted = halted;
1304 
1305 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1306 	return 0;
1307 }
1308 
1309 /* Halt or clear halt of endpoint */
1310 static int
udc_set_halt(struct usb_ep * usbep,int halt)1311 udc_set_halt(struct usb_ep *usbep, int halt)
1312 {
1313 	struct udc_ep	*ep;
1314 	u32 tmp;
1315 	unsigned long iflags;
1316 	int retval = 0;
1317 
1318 	if (!usbep)
1319 		return -EINVAL;
1320 
1321 	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1322 
1323 	ep = container_of(usbep, struct udc_ep, ep);
1324 	if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1325 		return -EINVAL;
1326 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1327 		return -ESHUTDOWN;
1328 
1329 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1330 	/* halt or clear halt */
1331 	if (halt) {
1332 		if (ep->num == 0)
1333 			ep->dev->stall_ep0in = 1;
1334 		else {
1335 			/*
1336 			 * set STALL
1337 			 * rxfifo empty not taken into acount
1338 			 */
1339 			tmp = readl(&ep->regs->ctl);
1340 			tmp |= AMD_BIT(UDC_EPCTL_S);
1341 			writel(tmp, &ep->regs->ctl);
1342 			ep->halted = 1;
1343 
1344 			/* setup poll timer */
1345 			if (!timer_pending(&udc_pollstall_timer)) {
1346 				udc_pollstall_timer.expires = jiffies +
1347 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1348 					/ (1000 * 1000);
1349 				if (!stop_pollstall_timer) {
1350 					DBG(ep->dev, "start polltimer\n");
1351 					add_timer(&udc_pollstall_timer);
1352 				}
1353 			}
1354 		}
1355 	} else {
1356 		/* ep is halted by set_halt() before */
1357 		if (ep->halted) {
1358 			tmp = readl(&ep->regs->ctl);
1359 			/* clear stall bit */
1360 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1361 			/* clear NAK by writing CNAK */
1362 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1363 			writel(tmp, &ep->regs->ctl);
1364 			ep->halted = 0;
1365 			UDC_QUEUE_CNAK(ep, ep->num);
1366 		}
1367 	}
1368 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1369 	return retval;
1370 }
1371 
1372 /* gadget interface */
1373 static const struct usb_ep_ops udc_ep_ops = {
1374 	.enable		= udc_ep_enable,
1375 	.disable	= udc_ep_disable,
1376 
1377 	.alloc_request	= udc_alloc_request,
1378 	.free_request	= udc_free_request,
1379 
1380 	.queue		= udc_queue,
1381 	.dequeue	= udc_dequeue,
1382 
1383 	.set_halt	= udc_set_halt,
1384 	/* fifo ops not implemented */
1385 };
1386 
1387 /*-------------------------------------------------------------------------*/
1388 
1389 /* Get frame counter (not implemented) */
udc_get_frame(struct usb_gadget * gadget)1390 static int udc_get_frame(struct usb_gadget *gadget)
1391 {
1392 	return -EOPNOTSUPP;
1393 }
1394 
1395 /* Initiates a remote wakeup */
udc_remote_wakeup(struct udc * dev)1396 static int udc_remote_wakeup(struct udc *dev)
1397 {
1398 	unsigned long flags;
1399 	u32 tmp;
1400 
1401 	DBG(dev, "UDC initiates remote wakeup\n");
1402 
1403 	spin_lock_irqsave(&dev->lock, flags);
1404 
1405 	tmp = readl(&dev->regs->ctl);
1406 	tmp |= AMD_BIT(UDC_DEVCTL_RES);
1407 	writel(tmp, &dev->regs->ctl);
1408 	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1409 	writel(tmp, &dev->regs->ctl);
1410 
1411 	spin_unlock_irqrestore(&dev->lock, flags);
1412 	return 0;
1413 }
1414 
1415 /* Remote wakeup gadget interface */
udc_wakeup(struct usb_gadget * gadget)1416 static int udc_wakeup(struct usb_gadget *gadget)
1417 {
1418 	struct udc		*dev;
1419 
1420 	if (!gadget)
1421 		return -EINVAL;
1422 	dev = container_of(gadget, struct udc, gadget);
1423 	udc_remote_wakeup(dev);
1424 
1425 	return 0;
1426 }
1427 
1428 static int amd5536_udc_start(struct usb_gadget *g,
1429 		struct usb_gadget_driver *driver);
1430 static int amd5536_udc_stop(struct usb_gadget *g);
1431 
1432 static const struct usb_gadget_ops udc_ops = {
1433 	.wakeup		= udc_wakeup,
1434 	.get_frame	= udc_get_frame,
1435 	.udc_start	= amd5536_udc_start,
1436 	.udc_stop	= amd5536_udc_stop,
1437 };
1438 
1439 /* Setups endpoint parameters, adds endpoints to linked list */
make_ep_lists(struct udc * dev)1440 static void make_ep_lists(struct udc *dev)
1441 {
1442 	/* make gadget ep lists */
1443 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1444 	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1445 						&dev->gadget.ep_list);
1446 	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1447 						&dev->gadget.ep_list);
1448 	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1449 						&dev->gadget.ep_list);
1450 
1451 	/* fifo config */
1452 	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1453 	if (dev->gadget.speed == USB_SPEED_FULL)
1454 		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1455 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1456 		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1457 	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1458 }
1459 
1460 /* Inits UDC context */
udc_basic_init(struct udc * dev)1461 void udc_basic_init(struct udc *dev)
1462 {
1463 	u32	tmp;
1464 
1465 	DBG(dev, "udc_basic_init()\n");
1466 
1467 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1468 
1469 	/* stop RDE timer */
1470 	if (timer_pending(&udc_timer)) {
1471 		set_rde = 0;
1472 		mod_timer(&udc_timer, jiffies - 1);
1473 	}
1474 	/* stop poll stall timer */
1475 	if (timer_pending(&udc_pollstall_timer))
1476 		mod_timer(&udc_pollstall_timer, jiffies - 1);
1477 	/* disable DMA */
1478 	tmp = readl(&dev->regs->ctl);
1479 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1480 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1481 	writel(tmp, &dev->regs->ctl);
1482 
1483 	/* enable dynamic CSR programming */
1484 	tmp = readl(&dev->regs->cfg);
1485 	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1486 	/* set self powered */
1487 	tmp |= AMD_BIT(UDC_DEVCFG_SP);
1488 	/* set remote wakeupable */
1489 	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1490 	writel(tmp, &dev->regs->cfg);
1491 
1492 	make_ep_lists(dev);
1493 
1494 	dev->data_ep_enabled = 0;
1495 	dev->data_ep_queued = 0;
1496 }
1497 EXPORT_SYMBOL_GPL(udc_basic_init);
1498 
1499 /* init registers at driver load time */
startup_registers(struct udc * dev)1500 static int startup_registers(struct udc *dev)
1501 {
1502 	u32 tmp;
1503 
1504 	/* init controller by soft reset */
1505 	udc_soft_reset(dev);
1506 
1507 	/* mask not needed interrupts */
1508 	udc_mask_unused_interrupts(dev);
1509 
1510 	/* put into initial config */
1511 	udc_basic_init(dev);
1512 	/* link up all endpoints */
1513 	udc_setup_endpoints(dev);
1514 
1515 	/* program speed */
1516 	tmp = readl(&dev->regs->cfg);
1517 	if (use_fullspeed)
1518 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1519 	else
1520 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1521 	writel(tmp, &dev->regs->cfg);
1522 
1523 	return 0;
1524 }
1525 
1526 /* Sets initial endpoint parameters */
udc_setup_endpoints(struct udc * dev)1527 static void udc_setup_endpoints(struct udc *dev)
1528 {
1529 	struct udc_ep	*ep;
1530 	u32	tmp;
1531 	u32	reg;
1532 
1533 	DBG(dev, "udc_setup_endpoints()\n");
1534 
1535 	/* read enum speed */
1536 	tmp = readl(&dev->regs->sts);
1537 	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1538 	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
1539 		dev->gadget.speed = USB_SPEED_HIGH;
1540 	else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
1541 		dev->gadget.speed = USB_SPEED_FULL;
1542 
1543 	/* set basic ep parameters */
1544 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1545 		ep = &dev->ep[tmp];
1546 		ep->dev = dev;
1547 		ep->ep.name = ep_info[tmp].name;
1548 		ep->ep.caps = ep_info[tmp].caps;
1549 		ep->num = tmp;
1550 		/* txfifo size is calculated at enable time */
1551 		ep->txfifo = dev->txfifo;
1552 
1553 		/* fifo size */
1554 		if (tmp < UDC_EPIN_NUM) {
1555 			ep->fifo_depth = UDC_TXFIFO_SIZE;
1556 			ep->in = 1;
1557 		} else {
1558 			ep->fifo_depth = UDC_RXFIFO_SIZE;
1559 			ep->in = 0;
1560 
1561 		}
1562 		ep->regs = &dev->ep_regs[tmp];
1563 		/*
1564 		 * ep will be reset only if ep was not enabled before to avoid
1565 		 * disabling ep interrupts when ENUM interrupt occurs but ep is
1566 		 * not enabled by gadget driver
1567 		 */
1568 		if (!ep->ep.desc)
1569 			ep_init(dev->regs, ep);
1570 
1571 		if (use_dma) {
1572 			/*
1573 			 * ep->dma is not really used, just to indicate that
1574 			 * DMA is active: remove this
1575 			 * dma regs = dev control regs
1576 			 */
1577 			ep->dma = &dev->regs->ctl;
1578 
1579 			/* nak OUT endpoints until enable - not for ep0 */
1580 			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1581 						&& tmp > UDC_EPIN_NUM) {
1582 				/* set NAK */
1583 				reg = readl(&dev->ep[tmp].regs->ctl);
1584 				reg |= AMD_BIT(UDC_EPCTL_SNAK);
1585 				writel(reg, &dev->ep[tmp].regs->ctl);
1586 				dev->ep[tmp].naking = 1;
1587 
1588 			}
1589 		}
1590 	}
1591 	/* EP0 max packet */
1592 	if (dev->gadget.speed == USB_SPEED_FULL) {
1593 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1594 					   UDC_FS_EP0IN_MAX_PKT_SIZE);
1595 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1596 					   UDC_FS_EP0OUT_MAX_PKT_SIZE);
1597 	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
1598 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1599 					   UDC_EP0IN_MAX_PKT_SIZE);
1600 		usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1601 					   UDC_EP0OUT_MAX_PKT_SIZE);
1602 	}
1603 
1604 	/*
1605 	 * with suspend bug workaround, ep0 params for gadget driver
1606 	 * are set at gadget driver bind() call
1607 	 */
1608 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1609 	dev->ep[UDC_EP0IN_IX].halted = 0;
1610 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1611 
1612 	/* init cfg/alt/int */
1613 	dev->cur_config = 0;
1614 	dev->cur_intf = 0;
1615 	dev->cur_alt = 0;
1616 }
1617 
1618 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
usb_connect(struct udc * dev)1619 static void usb_connect(struct udc *dev)
1620 {
1621 	/* Return if already connected */
1622 	if (dev->connected)
1623 		return;
1624 
1625 	dev_info(dev->dev, "USB Connect\n");
1626 
1627 	dev->connected = 1;
1628 
1629 	/* put into initial config */
1630 	udc_basic_init(dev);
1631 
1632 	/* enable device setup interrupts */
1633 	udc_enable_dev_setup_interrupts(dev);
1634 }
1635 
1636 /*
1637  * Calls gadget with disconnect event and resets the UDC and makes
1638  * initial bringup to be ready for ep0 events
1639  */
usb_disconnect(struct udc * dev)1640 static void usb_disconnect(struct udc *dev)
1641 {
1642 	/* Return if already disconnected */
1643 	if (!dev->connected)
1644 		return;
1645 
1646 	dev_info(dev->dev, "USB Disconnect\n");
1647 
1648 	dev->connected = 0;
1649 
1650 	/* mask interrupts */
1651 	udc_mask_unused_interrupts(dev);
1652 
1653 	/* REVISIT there doesn't seem to be a point to having this
1654 	 * talk to a tasklet ... do it directly, we already hold
1655 	 * the spinlock needed to process the disconnect.
1656 	 */
1657 
1658 	tasklet_schedule(&disconnect_tasklet);
1659 }
1660 
1661 /* Tasklet for disconnect to be outside of interrupt context */
udc_tasklet_disconnect(unsigned long par)1662 static void udc_tasklet_disconnect(unsigned long par)
1663 {
1664 	struct udc *dev = (struct udc *)(*((struct udc **) par));
1665 	u32 tmp;
1666 
1667 	DBG(dev, "Tasklet disconnect\n");
1668 	spin_lock_irq(&dev->lock);
1669 
1670 	if (dev->driver) {
1671 		spin_unlock(&dev->lock);
1672 		dev->driver->disconnect(&dev->gadget);
1673 		spin_lock(&dev->lock);
1674 
1675 		/* empty queues */
1676 		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1677 			empty_req_queue(&dev->ep[tmp]);
1678 
1679 	}
1680 
1681 	/* disable ep0 */
1682 	ep_init(dev->regs,
1683 			&dev->ep[UDC_EP0IN_IX]);
1684 
1685 
1686 	if (!soft_reset_occured) {
1687 		/* init controller by soft reset */
1688 		udc_soft_reset(dev);
1689 		soft_reset_occured++;
1690 	}
1691 
1692 	/* re-enable dev interrupts */
1693 	udc_enable_dev_setup_interrupts(dev);
1694 	/* back to full speed ? */
1695 	if (use_fullspeed) {
1696 		tmp = readl(&dev->regs->cfg);
1697 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1698 		writel(tmp, &dev->regs->cfg);
1699 	}
1700 
1701 	spin_unlock_irq(&dev->lock);
1702 }
1703 
1704 /* Reset the UDC core */
udc_soft_reset(struct udc * dev)1705 static void udc_soft_reset(struct udc *dev)
1706 {
1707 	unsigned long	flags;
1708 
1709 	DBG(dev, "Soft reset\n");
1710 	/*
1711 	 * reset possible waiting interrupts, because int.
1712 	 * status is lost after soft reset,
1713 	 * ep int. status reset
1714 	 */
1715 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1716 	/* device int. status reset */
1717 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1718 
1719 	/* Don't do this for Broadcom UDC since this is a reserved
1720 	 * bit.
1721 	 */
1722 	if (dev->chiprev != UDC_BCM_REV) {
1723 		spin_lock_irqsave(&udc_irq_spinlock, flags);
1724 		writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1725 		readl(&dev->regs->cfg);
1726 		spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1727 	}
1728 }
1729 
1730 /* RDE timer callback to set RDE bit */
udc_timer_function(struct timer_list * unused)1731 static void udc_timer_function(struct timer_list *unused)
1732 {
1733 	u32 tmp;
1734 
1735 	spin_lock_irq(&udc_irq_spinlock);
1736 
1737 	if (set_rde > 0) {
1738 		/*
1739 		 * open the fifo if fifo was filled on last timer call
1740 		 * conditionally
1741 		 */
1742 		if (set_rde > 1) {
1743 			/* set RDE to receive setup data */
1744 			tmp = readl(&udc->regs->ctl);
1745 			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1746 			writel(tmp, &udc->regs->ctl);
1747 			set_rde = -1;
1748 		} else if (readl(&udc->regs->sts)
1749 				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1750 			/*
1751 			 * if fifo empty setup polling, do not just
1752 			 * open the fifo
1753 			 */
1754 			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1755 			if (!stop_timer)
1756 				add_timer(&udc_timer);
1757 		} else {
1758 			/*
1759 			 * fifo contains data now, setup timer for opening
1760 			 * the fifo when timer expires to be able to receive
1761 			 * setup packets, when data packets gets queued by
1762 			 * gadget layer then timer will forced to expire with
1763 			 * set_rde=0 (RDE is set in udc_queue())
1764 			 */
1765 			set_rde++;
1766 			/* debug: lhadmot_timer_start = 221070 */
1767 			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1768 			if (!stop_timer)
1769 				add_timer(&udc_timer);
1770 		}
1771 
1772 	} else
1773 		set_rde = -1; /* RDE was set by udc_queue() */
1774 	spin_unlock_irq(&udc_irq_spinlock);
1775 	if (stop_timer)
1776 		complete(&on_exit);
1777 
1778 }
1779 
1780 /* Handle halt state, used in stall poll timer */
udc_handle_halt_state(struct udc_ep * ep)1781 static void udc_handle_halt_state(struct udc_ep *ep)
1782 {
1783 	u32 tmp;
1784 	/* set stall as long not halted */
1785 	if (ep->halted == 1) {
1786 		tmp = readl(&ep->regs->ctl);
1787 		/* STALL cleared ? */
1788 		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1789 			/*
1790 			 * FIXME: MSC spec requires that stall remains
1791 			 * even on receivng of CLEAR_FEATURE HALT. So
1792 			 * we would set STALL again here to be compliant.
1793 			 * But with current mass storage drivers this does
1794 			 * not work (would produce endless host retries).
1795 			 * So we clear halt on CLEAR_FEATURE.
1796 			 *
1797 			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1798 			tmp |= AMD_BIT(UDC_EPCTL_S);
1799 			writel(tmp, &ep->regs->ctl);*/
1800 
1801 			/* clear NAK by writing CNAK */
1802 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1803 			writel(tmp, &ep->regs->ctl);
1804 			ep->halted = 0;
1805 			UDC_QUEUE_CNAK(ep, ep->num);
1806 		}
1807 	}
1808 }
1809 
1810 /* Stall timer callback to poll S bit and set it again after */
udc_pollstall_timer_function(struct timer_list * unused)1811 static void udc_pollstall_timer_function(struct timer_list *unused)
1812 {
1813 	struct udc_ep *ep;
1814 	int halted = 0;
1815 
1816 	spin_lock_irq(&udc_stall_spinlock);
1817 	/*
1818 	 * only one IN and OUT endpoints are handled
1819 	 * IN poll stall
1820 	 */
1821 	ep = &udc->ep[UDC_EPIN_IX];
1822 	udc_handle_halt_state(ep);
1823 	if (ep->halted)
1824 		halted = 1;
1825 	/* OUT poll stall */
1826 	ep = &udc->ep[UDC_EPOUT_IX];
1827 	udc_handle_halt_state(ep);
1828 	if (ep->halted)
1829 		halted = 1;
1830 
1831 	/* setup timer again when still halted */
1832 	if (!stop_pollstall_timer && halted) {
1833 		udc_pollstall_timer.expires = jiffies +
1834 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1835 					/ (1000 * 1000);
1836 		add_timer(&udc_pollstall_timer);
1837 	}
1838 	spin_unlock_irq(&udc_stall_spinlock);
1839 
1840 	if (stop_pollstall_timer)
1841 		complete(&on_pollstall_exit);
1842 }
1843 
1844 /* Inits endpoint 0 so that SETUP packets are processed */
activate_control_endpoints(struct udc * dev)1845 static void activate_control_endpoints(struct udc *dev)
1846 {
1847 	u32 tmp;
1848 
1849 	DBG(dev, "activate_control_endpoints\n");
1850 
1851 	/* flush fifo */
1852 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1853 	tmp |= AMD_BIT(UDC_EPCTL_F);
1854 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1855 
1856 	/* set ep0 directions */
1857 	dev->ep[UDC_EP0IN_IX].in = 1;
1858 	dev->ep[UDC_EP0OUT_IX].in = 0;
1859 
1860 	/* set buffer size (tx fifo entries) of EP0_IN */
1861 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1862 	if (dev->gadget.speed == USB_SPEED_FULL)
1863 		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1864 					UDC_EPIN_BUFF_SIZE);
1865 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1866 		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1867 					UDC_EPIN_BUFF_SIZE);
1868 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1869 
1870 	/* set max packet size of EP0_IN */
1871 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1872 	if (dev->gadget.speed == USB_SPEED_FULL)
1873 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1874 					UDC_EP_MAX_PKT_SIZE);
1875 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1876 		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1877 				UDC_EP_MAX_PKT_SIZE);
1878 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1879 
1880 	/* set max packet size of EP0_OUT */
1881 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1882 	if (dev->gadget.speed == USB_SPEED_FULL)
1883 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1884 					UDC_EP_MAX_PKT_SIZE);
1885 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1886 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1887 					UDC_EP_MAX_PKT_SIZE);
1888 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1889 
1890 	/* set max packet size of EP0 in UDC CSR */
1891 	tmp = readl(&dev->csr->ne[0]);
1892 	if (dev->gadget.speed == USB_SPEED_FULL)
1893 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1894 					UDC_CSR_NE_MAX_PKT);
1895 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1896 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1897 					UDC_CSR_NE_MAX_PKT);
1898 	writel(tmp, &dev->csr->ne[0]);
1899 
1900 	if (use_dma) {
1901 		dev->ep[UDC_EP0OUT_IX].td->status |=
1902 			AMD_BIT(UDC_DMA_OUT_STS_L);
1903 		/* write dma desc address */
1904 		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1905 			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
1906 		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1907 			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
1908 		/* stop RDE timer */
1909 		if (timer_pending(&udc_timer)) {
1910 			set_rde = 0;
1911 			mod_timer(&udc_timer, jiffies - 1);
1912 		}
1913 		/* stop pollstall timer */
1914 		if (timer_pending(&udc_pollstall_timer))
1915 			mod_timer(&udc_pollstall_timer, jiffies - 1);
1916 		/* enable DMA */
1917 		tmp = readl(&dev->regs->ctl);
1918 		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1919 				| AMD_BIT(UDC_DEVCTL_RDE)
1920 				| AMD_BIT(UDC_DEVCTL_TDE);
1921 		if (use_dma_bufferfill_mode)
1922 			tmp |= AMD_BIT(UDC_DEVCTL_BF);
1923 		else if (use_dma_ppb_du)
1924 			tmp |= AMD_BIT(UDC_DEVCTL_DU);
1925 		writel(tmp, &dev->regs->ctl);
1926 	}
1927 
1928 	/* clear NAK by writing CNAK for EP0IN */
1929 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1930 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1931 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1932 	dev->ep[UDC_EP0IN_IX].naking = 0;
1933 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1934 
1935 	/* clear NAK by writing CNAK for EP0OUT */
1936 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1937 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1938 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1939 	dev->ep[UDC_EP0OUT_IX].naking = 0;
1940 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1941 }
1942 
1943 /* Make endpoint 0 ready for control traffic */
setup_ep0(struct udc * dev)1944 static int setup_ep0(struct udc *dev)
1945 {
1946 	activate_control_endpoints(dev);
1947 	/* enable ep0 interrupts */
1948 	udc_enable_ep0_interrupts(dev);
1949 	/* enable device setup interrupts */
1950 	udc_enable_dev_setup_interrupts(dev);
1951 
1952 	return 0;
1953 }
1954 
1955 /* Called by gadget driver to register itself */
amd5536_udc_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1956 static int amd5536_udc_start(struct usb_gadget *g,
1957 		struct usb_gadget_driver *driver)
1958 {
1959 	struct udc *dev = to_amd5536_udc(g);
1960 	u32 tmp;
1961 
1962 	driver->driver.bus = NULL;
1963 	dev->driver = driver;
1964 
1965 	/* Some gadget drivers use both ep0 directions.
1966 	 * NOTE: to gadget driver, ep0 is just one endpoint...
1967 	 */
1968 	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1969 		dev->ep[UDC_EP0IN_IX].ep.driver_data;
1970 
1971 	/* get ready for ep0 traffic */
1972 	setup_ep0(dev);
1973 
1974 	/* clear SD */
1975 	tmp = readl(&dev->regs->ctl);
1976 	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1977 	writel(tmp, &dev->regs->ctl);
1978 
1979 	usb_connect(dev);
1980 
1981 	return 0;
1982 }
1983 
1984 /* shutdown requests and disconnect from gadget */
1985 static void
shutdown(struct udc * dev,struct usb_gadget_driver * driver)1986 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
1987 __releases(dev->lock)
1988 __acquires(dev->lock)
1989 {
1990 	int tmp;
1991 
1992 	/* empty queues and init hardware */
1993 	udc_basic_init(dev);
1994 
1995 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1996 		empty_req_queue(&dev->ep[tmp]);
1997 
1998 	udc_setup_endpoints(dev);
1999 }
2000 
2001 /* Called by gadget driver to unregister itself */
amd5536_udc_stop(struct usb_gadget * g)2002 static int amd5536_udc_stop(struct usb_gadget *g)
2003 {
2004 	struct udc *dev = to_amd5536_udc(g);
2005 	unsigned long flags;
2006 	u32 tmp;
2007 
2008 	spin_lock_irqsave(&dev->lock, flags);
2009 	udc_mask_unused_interrupts(dev);
2010 	shutdown(dev, NULL);
2011 	spin_unlock_irqrestore(&dev->lock, flags);
2012 
2013 	dev->driver = NULL;
2014 
2015 	/* set SD */
2016 	tmp = readl(&dev->regs->ctl);
2017 	tmp |= AMD_BIT(UDC_DEVCTL_SD);
2018 	writel(tmp, &dev->regs->ctl);
2019 
2020 	return 0;
2021 }
2022 
2023 /* Clear pending NAK bits */
udc_process_cnak_queue(struct udc * dev)2024 static void udc_process_cnak_queue(struct udc *dev)
2025 {
2026 	u32 tmp;
2027 	u32 reg;
2028 
2029 	/* check epin's */
2030 	DBG(dev, "CNAK pending queue processing\n");
2031 	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2032 		if (cnak_pending & (1 << tmp)) {
2033 			DBG(dev, "CNAK pending for ep%d\n", tmp);
2034 			/* clear NAK by writing CNAK */
2035 			reg = readl(&dev->ep[tmp].regs->ctl);
2036 			reg |= AMD_BIT(UDC_EPCTL_CNAK);
2037 			writel(reg, &dev->ep[tmp].regs->ctl);
2038 			dev->ep[tmp].naking = 0;
2039 			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2040 		}
2041 	}
2042 	/* ...	and ep0out */
2043 	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2044 		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2045 		/* clear NAK by writing CNAK */
2046 		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2047 		reg |= AMD_BIT(UDC_EPCTL_CNAK);
2048 		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2049 		dev->ep[UDC_EP0OUT_IX].naking = 0;
2050 		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2051 				dev->ep[UDC_EP0OUT_IX].num);
2052 	}
2053 }
2054 
2055 /* Enabling RX DMA after setup packet */
udc_ep0_set_rde(struct udc * dev)2056 static void udc_ep0_set_rde(struct udc *dev)
2057 {
2058 	if (use_dma) {
2059 		/*
2060 		 * only enable RXDMA when no data endpoint enabled
2061 		 * or data is queued
2062 		 */
2063 		if (!dev->data_ep_enabled || dev->data_ep_queued) {
2064 			udc_set_rde(dev);
2065 		} else {
2066 			/*
2067 			 * setup timer for enabling RDE (to not enable
2068 			 * RXFIFO DMA for data endpoints to early)
2069 			 */
2070 			if (set_rde != 0 && !timer_pending(&udc_timer)) {
2071 				udc_timer.expires =
2072 					jiffies + HZ/UDC_RDE_TIMER_DIV;
2073 				set_rde = 1;
2074 				if (!stop_timer)
2075 					add_timer(&udc_timer);
2076 			}
2077 		}
2078 	}
2079 }
2080 
2081 
2082 /* Interrupt handler for data OUT traffic */
udc_data_out_isr(struct udc * dev,int ep_ix)2083 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2084 {
2085 	irqreturn_t		ret_val = IRQ_NONE;
2086 	u32			tmp;
2087 	struct udc_ep		*ep;
2088 	struct udc_request	*req;
2089 	unsigned int		count;
2090 	struct udc_data_dma	*td = NULL;
2091 	unsigned		dma_done;
2092 
2093 	VDBG(dev, "ep%d irq\n", ep_ix);
2094 	ep = &dev->ep[ep_ix];
2095 
2096 	tmp = readl(&ep->regs->sts);
2097 	if (use_dma) {
2098 		/* BNA event ? */
2099 		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2100 			DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
2101 					ep->num, readl(&ep->regs->desptr));
2102 			/* clear BNA */
2103 			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2104 			if (!ep->cancel_transfer)
2105 				ep->bna_occurred = 1;
2106 			else
2107 				ep->cancel_transfer = 0;
2108 			ret_val = IRQ_HANDLED;
2109 			goto finished;
2110 		}
2111 	}
2112 	/* HE event ? */
2113 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2114 		dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
2115 
2116 		/* clear HE */
2117 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2118 		ret_val = IRQ_HANDLED;
2119 		goto finished;
2120 	}
2121 
2122 	if (!list_empty(&ep->queue)) {
2123 
2124 		/* next request */
2125 		req = list_entry(ep->queue.next,
2126 			struct udc_request, queue);
2127 	} else {
2128 		req = NULL;
2129 		udc_rxfifo_pending = 1;
2130 	}
2131 	VDBG(dev, "req = %p\n", req);
2132 	/* fifo mode */
2133 	if (!use_dma) {
2134 
2135 		/* read fifo */
2136 		if (req && udc_rxfifo_read(ep, req)) {
2137 			ret_val = IRQ_HANDLED;
2138 
2139 			/* finish */
2140 			complete_req(ep, req, 0);
2141 			/* next request */
2142 			if (!list_empty(&ep->queue) && !ep->halted) {
2143 				req = list_entry(ep->queue.next,
2144 					struct udc_request, queue);
2145 			} else
2146 				req = NULL;
2147 		}
2148 
2149 	/* DMA */
2150 	} else if (!ep->cancel_transfer && req) {
2151 		ret_val = IRQ_HANDLED;
2152 
2153 		/* check for DMA done */
2154 		if (!use_dma_ppb) {
2155 			dma_done = AMD_GETBITS(req->td_data->status,
2156 						UDC_DMA_OUT_STS_BS);
2157 		/* packet per buffer mode - rx bytes */
2158 		} else {
2159 			/*
2160 			 * if BNA occurred then recover desc. from
2161 			 * BNA dummy desc.
2162 			 */
2163 			if (ep->bna_occurred) {
2164 				VDBG(dev, "Recover desc. from BNA dummy\n");
2165 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
2166 						sizeof(struct udc_data_dma));
2167 				ep->bna_occurred = 0;
2168 				udc_init_bna_dummy(ep->req);
2169 			}
2170 			td = udc_get_last_dma_desc(req);
2171 			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2172 		}
2173 		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2174 			/* buffer fill mode - rx bytes */
2175 			if (!use_dma_ppb) {
2176 				/* received number bytes */
2177 				count = AMD_GETBITS(req->td_data->status,
2178 						UDC_DMA_OUT_STS_RXBYTES);
2179 				VDBG(dev, "rx bytes=%u\n", count);
2180 			/* packet per buffer mode - rx bytes */
2181 			} else {
2182 				VDBG(dev, "req->td_data=%p\n", req->td_data);
2183 				VDBG(dev, "last desc = %p\n", td);
2184 				/* received number bytes */
2185 				if (use_dma_ppb_du) {
2186 					/* every desc. counts bytes */
2187 					count = udc_get_ppbdu_rxbytes(req);
2188 				} else {
2189 					/* last desc. counts bytes */
2190 					count = AMD_GETBITS(td->status,
2191 						UDC_DMA_OUT_STS_RXBYTES);
2192 					if (!count && req->req.length
2193 						== UDC_DMA_MAXPACKET) {
2194 						/*
2195 						 * on 64k packets the RXBYTES
2196 						 * field is zero
2197 						 */
2198 						count = UDC_DMA_MAXPACKET;
2199 					}
2200 				}
2201 				VDBG(dev, "last desc rx bytes=%u\n", count);
2202 			}
2203 
2204 			tmp = req->req.length - req->req.actual;
2205 			if (count > tmp) {
2206 				if ((tmp % ep->ep.maxpacket) != 0) {
2207 					DBG(dev, "%s: rx %db, space=%db\n",
2208 						ep->ep.name, count, tmp);
2209 					req->req.status = -EOVERFLOW;
2210 				}
2211 				count = tmp;
2212 			}
2213 			req->req.actual += count;
2214 			req->dma_going = 0;
2215 			/* complete request */
2216 			complete_req(ep, req, 0);
2217 
2218 			/* next request */
2219 			if (!list_empty(&ep->queue) && !ep->halted) {
2220 				req = list_entry(ep->queue.next,
2221 					struct udc_request,
2222 					queue);
2223 				/*
2224 				 * DMA may be already started by udc_queue()
2225 				 * called by gadget drivers completion
2226 				 * routine. This happens when queue
2227 				 * holds one request only.
2228 				 */
2229 				if (req->dma_going == 0) {
2230 					/* next dma */
2231 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2232 						goto finished;
2233 					/* write desc pointer */
2234 					writel(req->td_phys,
2235 						&ep->regs->desptr);
2236 					req->dma_going = 1;
2237 					/* enable DMA */
2238 					udc_set_rde(dev);
2239 				}
2240 			} else {
2241 				/*
2242 				 * implant BNA dummy descriptor to allow
2243 				 * RXFIFO opening by RDE
2244 				 */
2245 				if (ep->bna_dummy_req) {
2246 					/* write desc pointer */
2247 					writel(ep->bna_dummy_req->td_phys,
2248 						&ep->regs->desptr);
2249 					ep->bna_occurred = 0;
2250 				}
2251 
2252 				/*
2253 				 * schedule timer for setting RDE if queue
2254 				 * remains empty to allow ep0 packets pass
2255 				 * through
2256 				 */
2257 				if (set_rde != 0
2258 						&& !timer_pending(&udc_timer)) {
2259 					udc_timer.expires =
2260 						jiffies
2261 						+ HZ*UDC_RDE_TIMER_SECONDS;
2262 					set_rde = 1;
2263 					if (!stop_timer)
2264 						add_timer(&udc_timer);
2265 				}
2266 				if (ep->num != UDC_EP0OUT_IX)
2267 					dev->data_ep_queued = 0;
2268 			}
2269 
2270 		} else {
2271 			/*
2272 			* RX DMA must be reenabled for each desc in PPBDU mode
2273 			* and must be enabled for PPBNDU mode in case of BNA
2274 			*/
2275 			udc_set_rde(dev);
2276 		}
2277 
2278 	} else if (ep->cancel_transfer) {
2279 		ret_val = IRQ_HANDLED;
2280 		ep->cancel_transfer = 0;
2281 	}
2282 
2283 	/* check pending CNAKS */
2284 	if (cnak_pending) {
2285 		/* CNAk processing when rxfifo empty only */
2286 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2287 			udc_process_cnak_queue(dev);
2288 	}
2289 
2290 	/* clear OUT bits in ep status */
2291 	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2292 finished:
2293 	return ret_val;
2294 }
2295 
2296 /* Interrupt handler for data IN traffic */
udc_data_in_isr(struct udc * dev,int ep_ix)2297 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2298 {
2299 	irqreturn_t ret_val = IRQ_NONE;
2300 	u32 tmp;
2301 	u32 epsts;
2302 	struct udc_ep *ep;
2303 	struct udc_request *req;
2304 	struct udc_data_dma *td;
2305 	unsigned len;
2306 
2307 	ep = &dev->ep[ep_ix];
2308 
2309 	epsts = readl(&ep->regs->sts);
2310 	if (use_dma) {
2311 		/* BNA ? */
2312 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2313 			dev_err(dev->dev,
2314 				"BNA ep%din occurred - DESPTR = %08lx\n",
2315 				ep->num,
2316 				(unsigned long) readl(&ep->regs->desptr));
2317 
2318 			/* clear BNA */
2319 			writel(epsts, &ep->regs->sts);
2320 			ret_val = IRQ_HANDLED;
2321 			goto finished;
2322 		}
2323 	}
2324 	/* HE event ? */
2325 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2326 		dev_err(dev->dev,
2327 			"HE ep%dn occurred - DESPTR = %08lx\n",
2328 			ep->num, (unsigned long) readl(&ep->regs->desptr));
2329 
2330 		/* clear HE */
2331 		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2332 		ret_val = IRQ_HANDLED;
2333 		goto finished;
2334 	}
2335 
2336 	/* DMA completion */
2337 	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2338 		VDBG(dev, "TDC set- completion\n");
2339 		ret_val = IRQ_HANDLED;
2340 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2341 			req = list_entry(ep->queue.next,
2342 					struct udc_request, queue);
2343 			/*
2344 			 * length bytes transferred
2345 			 * check dma done of last desc. in PPBDU mode
2346 			 */
2347 			if (use_dma_ppb_du) {
2348 				td = udc_get_last_dma_desc(req);
2349 				if (td)
2350 					req->req.actual = req->req.length;
2351 			} else {
2352 				/* assume all bytes transferred */
2353 				req->req.actual = req->req.length;
2354 			}
2355 
2356 			if (req->req.actual == req->req.length) {
2357 				/* complete req */
2358 				complete_req(ep, req, 0);
2359 				req->dma_going = 0;
2360 				/* further request available ? */
2361 				if (list_empty(&ep->queue)) {
2362 					/* disable interrupt */
2363 					tmp = readl(&dev->regs->ep_irqmsk);
2364 					tmp |= AMD_BIT(ep->num);
2365 					writel(tmp, &dev->regs->ep_irqmsk);
2366 				}
2367 			}
2368 		}
2369 		ep->cancel_transfer = 0;
2370 
2371 	}
2372 	/*
2373 	 * status reg has IN bit set and TDC not set (if TDC was handled,
2374 	 * IN must not be handled (UDC defect) ?
2375 	 */
2376 	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2377 			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2378 		ret_val = IRQ_HANDLED;
2379 		if (!list_empty(&ep->queue)) {
2380 			/* next request */
2381 			req = list_entry(ep->queue.next,
2382 					struct udc_request, queue);
2383 			/* FIFO mode */
2384 			if (!use_dma) {
2385 				/* write fifo */
2386 				udc_txfifo_write(ep, &req->req);
2387 				len = req->req.length - req->req.actual;
2388 				if (len > ep->ep.maxpacket)
2389 					len = ep->ep.maxpacket;
2390 				req->req.actual += len;
2391 				if (req->req.actual == req->req.length
2392 					|| (len != ep->ep.maxpacket)) {
2393 					/* complete req */
2394 					complete_req(ep, req, 0);
2395 				}
2396 			/* DMA */
2397 			} else if (req && !req->dma_going) {
2398 				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2399 					req, req->td_data);
2400 				if (req->td_data) {
2401 
2402 					req->dma_going = 1;
2403 
2404 					/*
2405 					 * unset L bit of first desc.
2406 					 * for chain
2407 					 */
2408 					if (use_dma_ppb && req->req.length >
2409 							ep->ep.maxpacket) {
2410 						req->td_data->status &=
2411 							AMD_CLEAR_BIT(
2412 							UDC_DMA_IN_STS_L);
2413 					}
2414 
2415 					/* write desc pointer */
2416 					writel(req->td_phys, &ep->regs->desptr);
2417 
2418 					/* set HOST READY */
2419 					req->td_data->status =
2420 						AMD_ADDBITS(
2421 						req->td_data->status,
2422 						UDC_DMA_IN_STS_BS_HOST_READY,
2423 						UDC_DMA_IN_STS_BS);
2424 
2425 					/* set poll demand bit */
2426 					tmp = readl(&ep->regs->ctl);
2427 					tmp |= AMD_BIT(UDC_EPCTL_P);
2428 					writel(tmp, &ep->regs->ctl);
2429 				}
2430 			}
2431 
2432 		} else if (!use_dma && ep->in) {
2433 			/* disable interrupt */
2434 			tmp = readl(
2435 				&dev->regs->ep_irqmsk);
2436 			tmp |= AMD_BIT(ep->num);
2437 			writel(tmp,
2438 				&dev->regs->ep_irqmsk);
2439 		}
2440 	}
2441 	/* clear status bits */
2442 	writel(epsts, &ep->regs->sts);
2443 
2444 finished:
2445 	return ret_val;
2446 
2447 }
2448 
2449 /* Interrupt handler for Control OUT traffic */
udc_control_out_isr(struct udc * dev)2450 static irqreturn_t udc_control_out_isr(struct udc *dev)
2451 __releases(dev->lock)
2452 __acquires(dev->lock)
2453 {
2454 	irqreturn_t ret_val = IRQ_NONE;
2455 	u32 tmp;
2456 	int setup_supported;
2457 	u32 count;
2458 	int set = 0;
2459 	struct udc_ep	*ep;
2460 	struct udc_ep	*ep_tmp;
2461 
2462 	ep = &dev->ep[UDC_EP0OUT_IX];
2463 
2464 	/* clear irq */
2465 	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2466 
2467 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2468 	/* check BNA and clear if set */
2469 	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2470 		VDBG(dev, "ep0: BNA set\n");
2471 		writel(AMD_BIT(UDC_EPSTS_BNA),
2472 			&dev->ep[UDC_EP0OUT_IX].regs->sts);
2473 		ep->bna_occurred = 1;
2474 		ret_val = IRQ_HANDLED;
2475 		goto finished;
2476 	}
2477 
2478 	/* type of data: SETUP or DATA 0 bytes */
2479 	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2480 	VDBG(dev, "data_typ = %x\n", tmp);
2481 
2482 	/* setup data */
2483 	if (tmp == UDC_EPSTS_OUT_SETUP) {
2484 		ret_val = IRQ_HANDLED;
2485 
2486 		ep->dev->stall_ep0in = 0;
2487 		dev->waiting_zlp_ack_ep0in = 0;
2488 
2489 		/* set NAK for EP0_IN */
2490 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2491 		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2492 		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2493 		dev->ep[UDC_EP0IN_IX].naking = 1;
2494 		/* get setup data */
2495 		if (use_dma) {
2496 
2497 			/* clear OUT bits in ep status */
2498 			writel(UDC_EPSTS_OUT_CLEAR,
2499 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2500 
2501 			setup_data.data[0] =
2502 				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2503 			setup_data.data[1] =
2504 				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2505 			/* set HOST READY */
2506 			dev->ep[UDC_EP0OUT_IX].td_stp->status =
2507 					UDC_DMA_STP_STS_BS_HOST_READY;
2508 		} else {
2509 			/* read fifo */
2510 			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2511 		}
2512 
2513 		/* determine direction of control data */
2514 		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2515 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2516 			/* enable RDE */
2517 			udc_ep0_set_rde(dev);
2518 			set = 0;
2519 		} else {
2520 			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2521 			/*
2522 			 * implant BNA dummy descriptor to allow RXFIFO opening
2523 			 * by RDE
2524 			 */
2525 			if (ep->bna_dummy_req) {
2526 				/* write desc pointer */
2527 				writel(ep->bna_dummy_req->td_phys,
2528 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2529 				ep->bna_occurred = 0;
2530 			}
2531 
2532 			set = 1;
2533 			dev->ep[UDC_EP0OUT_IX].naking = 1;
2534 			/*
2535 			 * setup timer for enabling RDE (to not enable
2536 			 * RXFIFO DMA for data to early)
2537 			 */
2538 			set_rde = 1;
2539 			if (!timer_pending(&udc_timer)) {
2540 				udc_timer.expires = jiffies +
2541 							HZ/UDC_RDE_TIMER_DIV;
2542 				if (!stop_timer)
2543 					add_timer(&udc_timer);
2544 			}
2545 		}
2546 
2547 		/*
2548 		 * mass storage reset must be processed here because
2549 		 * next packet may be a CLEAR_FEATURE HALT which would not
2550 		 * clear the stall bit when no STALL handshake was received
2551 		 * before (autostall can cause this)
2552 		 */
2553 		if (setup_data.data[0] == UDC_MSCRES_DWORD0
2554 				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
2555 			DBG(dev, "MSC Reset\n");
2556 			/*
2557 			 * clear stall bits
2558 			 * only one IN and OUT endpoints are handled
2559 			 */
2560 			ep_tmp = &udc->ep[UDC_EPIN_IX];
2561 			udc_set_halt(&ep_tmp->ep, 0);
2562 			ep_tmp = &udc->ep[UDC_EPOUT_IX];
2563 			udc_set_halt(&ep_tmp->ep, 0);
2564 		}
2565 
2566 		/* call gadget with setup data received */
2567 		spin_unlock(&dev->lock);
2568 		setup_supported = dev->driver->setup(&dev->gadget,
2569 						&setup_data.request);
2570 		spin_lock(&dev->lock);
2571 
2572 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2573 		/* ep0 in returns data (not zlp) on IN phase */
2574 		if (setup_supported >= 0 && setup_supported <
2575 				UDC_EP0IN_MAXPACKET) {
2576 			/* clear NAK by writing CNAK in EP0_IN */
2577 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2578 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2579 			dev->ep[UDC_EP0IN_IX].naking = 0;
2580 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2581 
2582 		/* if unsupported request then stall */
2583 		} else if (setup_supported < 0) {
2584 			tmp |= AMD_BIT(UDC_EPCTL_S);
2585 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2586 		} else
2587 			dev->waiting_zlp_ack_ep0in = 1;
2588 
2589 
2590 		/* clear NAK by writing CNAK in EP0_OUT */
2591 		if (!set) {
2592 			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2593 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2594 			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2595 			dev->ep[UDC_EP0OUT_IX].naking = 0;
2596 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2597 		}
2598 
2599 		if (!use_dma) {
2600 			/* clear OUT bits in ep status */
2601 			writel(UDC_EPSTS_OUT_CLEAR,
2602 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2603 		}
2604 
2605 	/* data packet 0 bytes */
2606 	} else if (tmp == UDC_EPSTS_OUT_DATA) {
2607 		/* clear OUT bits in ep status */
2608 		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2609 
2610 		/* get setup data: only 0 packet */
2611 		if (use_dma) {
2612 			/* no req if 0 packet, just reactivate */
2613 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2614 				VDBG(dev, "ZLP\n");
2615 
2616 				/* set HOST READY */
2617 				dev->ep[UDC_EP0OUT_IX].td->status =
2618 					AMD_ADDBITS(
2619 					dev->ep[UDC_EP0OUT_IX].td->status,
2620 					UDC_DMA_OUT_STS_BS_HOST_READY,
2621 					UDC_DMA_OUT_STS_BS);
2622 				/* enable RDE */
2623 				udc_ep0_set_rde(dev);
2624 				ret_val = IRQ_HANDLED;
2625 
2626 			} else {
2627 				/* control write */
2628 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2629 				/* re-program desc. pointer for possible ZLPs */
2630 				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2631 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2632 				/* enable RDE */
2633 				udc_ep0_set_rde(dev);
2634 			}
2635 		} else {
2636 
2637 			/* received number bytes */
2638 			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2639 			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2640 			/* out data for fifo mode not working */
2641 			count = 0;
2642 
2643 			/* 0 packet or real data ? */
2644 			if (count != 0) {
2645 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2646 			} else {
2647 				/* dummy read confirm */
2648 				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2649 				ret_val = IRQ_HANDLED;
2650 			}
2651 		}
2652 	}
2653 
2654 	/* check pending CNAKS */
2655 	if (cnak_pending) {
2656 		/* CNAk processing when rxfifo empty only */
2657 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2658 			udc_process_cnak_queue(dev);
2659 	}
2660 
2661 finished:
2662 	return ret_val;
2663 }
2664 
2665 /* Interrupt handler for Control IN traffic */
udc_control_in_isr(struct udc * dev)2666 static irqreturn_t udc_control_in_isr(struct udc *dev)
2667 {
2668 	irqreturn_t ret_val = IRQ_NONE;
2669 	u32 tmp;
2670 	struct udc_ep *ep;
2671 	struct udc_request *req;
2672 	unsigned len;
2673 
2674 	ep = &dev->ep[UDC_EP0IN_IX];
2675 
2676 	/* clear irq */
2677 	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2678 
2679 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2680 	/* DMA completion */
2681 	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2682 		VDBG(dev, "isr: TDC clear\n");
2683 		ret_val = IRQ_HANDLED;
2684 
2685 		/* clear TDC bit */
2686 		writel(AMD_BIT(UDC_EPSTS_TDC),
2687 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2688 
2689 	/* status reg has IN bit set ? */
2690 	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2691 		ret_val = IRQ_HANDLED;
2692 
2693 		if (ep->dma) {
2694 			/* clear IN bit */
2695 			writel(AMD_BIT(UDC_EPSTS_IN),
2696 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2697 		}
2698 		if (dev->stall_ep0in) {
2699 			DBG(dev, "stall ep0in\n");
2700 			/* halt ep0in */
2701 			tmp = readl(&ep->regs->ctl);
2702 			tmp |= AMD_BIT(UDC_EPCTL_S);
2703 			writel(tmp, &ep->regs->ctl);
2704 		} else {
2705 			if (!list_empty(&ep->queue)) {
2706 				/* next request */
2707 				req = list_entry(ep->queue.next,
2708 						struct udc_request, queue);
2709 
2710 				if (ep->dma) {
2711 					/* write desc pointer */
2712 					writel(req->td_phys, &ep->regs->desptr);
2713 					/* set HOST READY */
2714 					req->td_data->status =
2715 						AMD_ADDBITS(
2716 						req->td_data->status,
2717 						UDC_DMA_STP_STS_BS_HOST_READY,
2718 						UDC_DMA_STP_STS_BS);
2719 
2720 					/* set poll demand bit */
2721 					tmp =
2722 					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2723 					tmp |= AMD_BIT(UDC_EPCTL_P);
2724 					writel(tmp,
2725 					&dev->ep[UDC_EP0IN_IX].regs->ctl);
2726 
2727 					/* all bytes will be transferred */
2728 					req->req.actual = req->req.length;
2729 
2730 					/* complete req */
2731 					complete_req(ep, req, 0);
2732 
2733 				} else {
2734 					/* write fifo */
2735 					udc_txfifo_write(ep, &req->req);
2736 
2737 					/* lengh bytes transferred */
2738 					len = req->req.length - req->req.actual;
2739 					if (len > ep->ep.maxpacket)
2740 						len = ep->ep.maxpacket;
2741 
2742 					req->req.actual += len;
2743 					if (req->req.actual == req->req.length
2744 						|| (len != ep->ep.maxpacket)) {
2745 						/* complete req */
2746 						complete_req(ep, req, 0);
2747 					}
2748 				}
2749 
2750 			}
2751 		}
2752 		ep->halted = 0;
2753 		dev->stall_ep0in = 0;
2754 		if (!ep->dma) {
2755 			/* clear IN bit */
2756 			writel(AMD_BIT(UDC_EPSTS_IN),
2757 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2758 		}
2759 	}
2760 
2761 	return ret_val;
2762 }
2763 
2764 
2765 /* Interrupt handler for global device events */
udc_dev_isr(struct udc * dev,u32 dev_irq)2766 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2767 __releases(dev->lock)
2768 __acquires(dev->lock)
2769 {
2770 	irqreturn_t ret_val = IRQ_NONE;
2771 	u32 tmp;
2772 	u32 cfg;
2773 	struct udc_ep *ep;
2774 	u16 i;
2775 	u8 udc_csr_epix;
2776 
2777 	/* SET_CONFIG irq ? */
2778 	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2779 		ret_val = IRQ_HANDLED;
2780 
2781 		/* read config value */
2782 		tmp = readl(&dev->regs->sts);
2783 		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2784 		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2785 		dev->cur_config = cfg;
2786 		dev->set_cfg_not_acked = 1;
2787 
2788 		/* make usb request for gadget driver */
2789 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2790 		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2791 		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2792 
2793 		/* programm the NE registers */
2794 		for (i = 0; i < UDC_EP_NUM; i++) {
2795 			ep = &dev->ep[i];
2796 			if (ep->in) {
2797 
2798 				/* ep ix in UDC CSR register space */
2799 				udc_csr_epix = ep->num;
2800 
2801 
2802 			/* OUT ep */
2803 			} else {
2804 				/* ep ix in UDC CSR register space */
2805 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2806 			}
2807 
2808 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2809 			/* ep cfg */
2810 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2811 						UDC_CSR_NE_CFG);
2812 			/* write reg */
2813 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2814 
2815 			/* clear stall bits */
2816 			ep->halted = 0;
2817 			tmp = readl(&ep->regs->ctl);
2818 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2819 			writel(tmp, &ep->regs->ctl);
2820 		}
2821 		/* call gadget zero with setup data received */
2822 		spin_unlock(&dev->lock);
2823 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2824 		spin_lock(&dev->lock);
2825 
2826 	} /* SET_INTERFACE ? */
2827 	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2828 		ret_val = IRQ_HANDLED;
2829 
2830 		dev->set_cfg_not_acked = 1;
2831 		/* read interface and alt setting values */
2832 		tmp = readl(&dev->regs->sts);
2833 		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2834 		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2835 
2836 		/* make usb request for gadget driver */
2837 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2838 		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2839 		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2840 		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2841 		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2842 
2843 		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2844 				dev->cur_alt, dev->cur_intf);
2845 
2846 		/* programm the NE registers */
2847 		for (i = 0; i < UDC_EP_NUM; i++) {
2848 			ep = &dev->ep[i];
2849 			if (ep->in) {
2850 
2851 				/* ep ix in UDC CSR register space */
2852 				udc_csr_epix = ep->num;
2853 
2854 
2855 			/* OUT ep */
2856 			} else {
2857 				/* ep ix in UDC CSR register space */
2858 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2859 			}
2860 
2861 			/* UDC CSR reg */
2862 			/* set ep values */
2863 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2864 			/* ep interface */
2865 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2866 						UDC_CSR_NE_INTF);
2867 			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2868 			/* ep alt */
2869 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2870 						UDC_CSR_NE_ALT);
2871 			/* write reg */
2872 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2873 
2874 			/* clear stall bits */
2875 			ep->halted = 0;
2876 			tmp = readl(&ep->regs->ctl);
2877 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2878 			writel(tmp, &ep->regs->ctl);
2879 		}
2880 
2881 		/* call gadget zero with setup data received */
2882 		spin_unlock(&dev->lock);
2883 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2884 		spin_lock(&dev->lock);
2885 
2886 	} /* USB reset */
2887 	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2888 		DBG(dev, "USB Reset interrupt\n");
2889 		ret_val = IRQ_HANDLED;
2890 
2891 		/* allow soft reset when suspend occurs */
2892 		soft_reset_occured = 0;
2893 
2894 		dev->waiting_zlp_ack_ep0in = 0;
2895 		dev->set_cfg_not_acked = 0;
2896 
2897 		/* mask not needed interrupts */
2898 		udc_mask_unused_interrupts(dev);
2899 
2900 		/* call gadget to resume and reset configs etc. */
2901 		spin_unlock(&dev->lock);
2902 		if (dev->sys_suspended && dev->driver->resume) {
2903 			dev->driver->resume(&dev->gadget);
2904 			dev->sys_suspended = 0;
2905 		}
2906 		usb_gadget_udc_reset(&dev->gadget, dev->driver);
2907 		spin_lock(&dev->lock);
2908 
2909 		/* disable ep0 to empty req queue */
2910 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2911 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2912 
2913 		/* soft reset when rxfifo not empty */
2914 		tmp = readl(&dev->regs->sts);
2915 		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2916 				&& !soft_reset_after_usbreset_occured) {
2917 			udc_soft_reset(dev);
2918 			soft_reset_after_usbreset_occured++;
2919 		}
2920 
2921 		/*
2922 		 * DMA reset to kill potential old DMA hw hang,
2923 		 * POLL bit is already reset by ep_init() through
2924 		 * disconnect()
2925 		 */
2926 		DBG(dev, "DMA machine reset\n");
2927 		tmp = readl(&dev->regs->cfg);
2928 		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2929 		writel(tmp, &dev->regs->cfg);
2930 
2931 		/* put into initial config */
2932 		udc_basic_init(dev);
2933 
2934 		/* enable device setup interrupts */
2935 		udc_enable_dev_setup_interrupts(dev);
2936 
2937 		/* enable suspend interrupt */
2938 		tmp = readl(&dev->regs->irqmsk);
2939 		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2940 		writel(tmp, &dev->regs->irqmsk);
2941 
2942 	} /* USB suspend */
2943 	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2944 		DBG(dev, "USB Suspend interrupt\n");
2945 		ret_val = IRQ_HANDLED;
2946 		if (dev->driver->suspend) {
2947 			spin_unlock(&dev->lock);
2948 			dev->sys_suspended = 1;
2949 			dev->driver->suspend(&dev->gadget);
2950 			spin_lock(&dev->lock);
2951 		}
2952 	} /* new speed ? */
2953 	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2954 		DBG(dev, "ENUM interrupt\n");
2955 		ret_val = IRQ_HANDLED;
2956 		soft_reset_after_usbreset_occured = 0;
2957 
2958 		/* disable ep0 to empty req queue */
2959 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2960 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2961 
2962 		/* link up all endpoints */
2963 		udc_setup_endpoints(dev);
2964 		dev_info(dev->dev, "Connect: %s\n",
2965 			 usb_speed_string(dev->gadget.speed));
2966 
2967 		/* init ep 0 */
2968 		activate_control_endpoints(dev);
2969 
2970 		/* enable ep0 interrupts */
2971 		udc_enable_ep0_interrupts(dev);
2972 	}
2973 	/* session valid change interrupt */
2974 	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
2975 		DBG(dev, "USB SVC interrupt\n");
2976 		ret_val = IRQ_HANDLED;
2977 
2978 		/* check that session is not valid to detect disconnect */
2979 		tmp = readl(&dev->regs->sts);
2980 		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
2981 			/* disable suspend interrupt */
2982 			tmp = readl(&dev->regs->irqmsk);
2983 			tmp |= AMD_BIT(UDC_DEVINT_US);
2984 			writel(tmp, &dev->regs->irqmsk);
2985 			DBG(dev, "USB Disconnect (session valid low)\n");
2986 			/* cleanup on disconnect */
2987 			usb_disconnect(udc);
2988 		}
2989 
2990 	}
2991 
2992 	return ret_val;
2993 }
2994 
2995 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
udc_irq(int irq,void * pdev)2996 irqreturn_t udc_irq(int irq, void *pdev)
2997 {
2998 	struct udc *dev = pdev;
2999 	u32 reg;
3000 	u16 i;
3001 	u32 ep_irq;
3002 	irqreturn_t ret_val = IRQ_NONE;
3003 
3004 	spin_lock(&dev->lock);
3005 
3006 	/* check for ep irq */
3007 	reg = readl(&dev->regs->ep_irqsts);
3008 	if (reg) {
3009 		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3010 			ret_val |= udc_control_out_isr(dev);
3011 		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3012 			ret_val |= udc_control_in_isr(dev);
3013 
3014 		/*
3015 		 * data endpoint
3016 		 * iterate ep's
3017 		 */
3018 		for (i = 1; i < UDC_EP_NUM; i++) {
3019 			ep_irq = 1 << i;
3020 			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3021 				continue;
3022 
3023 			/* clear irq status */
3024 			writel(ep_irq, &dev->regs->ep_irqsts);
3025 
3026 			/* irq for out ep ? */
3027 			if (i > UDC_EPIN_NUM)
3028 				ret_val |= udc_data_out_isr(dev, i);
3029 			else
3030 				ret_val |= udc_data_in_isr(dev, i);
3031 		}
3032 
3033 	}
3034 
3035 
3036 	/* check for dev irq */
3037 	reg = readl(&dev->regs->irqsts);
3038 	if (reg) {
3039 		/* clear irq */
3040 		writel(reg, &dev->regs->irqsts);
3041 		ret_val |= udc_dev_isr(dev, reg);
3042 	}
3043 
3044 
3045 	spin_unlock(&dev->lock);
3046 	return ret_val;
3047 }
3048 EXPORT_SYMBOL_GPL(udc_irq);
3049 
3050 /* Tears down device */
gadget_release(struct device * pdev)3051 void gadget_release(struct device *pdev)
3052 {
3053 	struct amd5536udc *dev = dev_get_drvdata(pdev);
3054 	kfree(dev);
3055 }
3056 EXPORT_SYMBOL_GPL(gadget_release);
3057 
3058 /* Cleanup on device remove */
udc_remove(struct udc * dev)3059 void udc_remove(struct udc *dev)
3060 {
3061 	/* remove timer */
3062 	stop_timer++;
3063 	if (timer_pending(&udc_timer))
3064 		wait_for_completion(&on_exit);
3065 	del_timer_sync(&udc_timer);
3066 	/* remove pollstall timer */
3067 	stop_pollstall_timer++;
3068 	if (timer_pending(&udc_pollstall_timer))
3069 		wait_for_completion(&on_pollstall_exit);
3070 	del_timer_sync(&udc_pollstall_timer);
3071 	udc = NULL;
3072 }
3073 EXPORT_SYMBOL_GPL(udc_remove);
3074 
3075 /* free all the dma pools */
free_dma_pools(struct udc * dev)3076 void free_dma_pools(struct udc *dev)
3077 {
3078 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3079 		      dev->ep[UDC_EP0OUT_IX].td_phys);
3080 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3081 		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3082 	dma_pool_destroy(dev->stp_requests);
3083 	dma_pool_destroy(dev->data_requests);
3084 }
3085 EXPORT_SYMBOL_GPL(free_dma_pools);
3086 
3087 /* create dma pools on init */
init_dma_pools(struct udc * dev)3088 int init_dma_pools(struct udc *dev)
3089 {
3090 	struct udc_stp_dma	*td_stp;
3091 	struct udc_data_dma	*td_data;
3092 	int retval;
3093 
3094 	/* consistent DMA mode setting ? */
3095 	if (use_dma_ppb) {
3096 		use_dma_bufferfill_mode = 0;
3097 	} else {
3098 		use_dma_ppb_du = 0;
3099 		use_dma_bufferfill_mode = 1;
3100 	}
3101 
3102 	/* DMA setup */
3103 	dev->data_requests = dma_pool_create("data_requests", dev->dev,
3104 		sizeof(struct udc_data_dma), 0, 0);
3105 	if (!dev->data_requests) {
3106 		DBG(dev, "can't get request data pool\n");
3107 		return -ENOMEM;
3108 	}
3109 
3110 	/* EP0 in dma regs = dev control regs */
3111 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3112 
3113 	/* dma desc for setup data */
3114 	dev->stp_requests = dma_pool_create("setup requests", dev->dev,
3115 		sizeof(struct udc_stp_dma), 0, 0);
3116 	if (!dev->stp_requests) {
3117 		DBG(dev, "can't get stp request pool\n");
3118 		retval = -ENOMEM;
3119 		goto err_create_dma_pool;
3120 	}
3121 	/* setup */
3122 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3123 				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3124 	if (!td_stp) {
3125 		retval = -ENOMEM;
3126 		goto err_alloc_dma;
3127 	}
3128 	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3129 
3130 	/* data: 0 packets !? */
3131 	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3132 				&dev->ep[UDC_EP0OUT_IX].td_phys);
3133 	if (!td_data) {
3134 		retval = -ENOMEM;
3135 		goto err_alloc_phys;
3136 	}
3137 	dev->ep[UDC_EP0OUT_IX].td = td_data;
3138 	return 0;
3139 
3140 err_alloc_phys:
3141 	dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3142 		      dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3143 err_alloc_dma:
3144 	dma_pool_destroy(dev->stp_requests);
3145 	dev->stp_requests = NULL;
3146 err_create_dma_pool:
3147 	dma_pool_destroy(dev->data_requests);
3148 	dev->data_requests = NULL;
3149 	return retval;
3150 }
3151 EXPORT_SYMBOL_GPL(init_dma_pools);
3152 
3153 /* general probe */
udc_probe(struct udc * dev)3154 int udc_probe(struct udc *dev)
3155 {
3156 	char		tmp[128];
3157 	u32		reg;
3158 	int		retval;
3159 
3160 	/* device struct setup */
3161 	dev->gadget.ops = &udc_ops;
3162 
3163 	dev_set_name(&dev->gadget.dev, "gadget");
3164 	dev->gadget.name = name;
3165 	dev->gadget.max_speed = USB_SPEED_HIGH;
3166 
3167 	/* init registers, interrupts, ... */
3168 	startup_registers(dev);
3169 
3170 	dev_info(dev->dev, "%s\n", mod_desc);
3171 
3172 	snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3173 
3174 	/* Print this device info for AMD chips only*/
3175 	if (dev->chiprev == UDC_HSA0_REV ||
3176 	    dev->chiprev == UDC_HSB1_REV) {
3177 		dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3178 			 tmp, dev->phys_addr, dev->chiprev,
3179 			 (dev->chiprev == UDC_HSA0_REV) ?
3180 			 "A0" : "B1");
3181 		strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3182 		if (dev->chiprev == UDC_HSA0_REV) {
3183 			dev_err(dev->dev, "chip revision is A0; too old\n");
3184 			retval = -ENODEV;
3185 			goto finished;
3186 		}
3187 		dev_info(dev->dev,
3188 			 "driver version: %s(for Geode5536 B1)\n", tmp);
3189 	}
3190 
3191 	udc = dev;
3192 
3193 	retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
3194 					    gadget_release);
3195 	if (retval)
3196 		goto finished;
3197 
3198 	/* timer init */
3199 	timer_setup(&udc_timer, udc_timer_function, 0);
3200 	timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
3201 
3202 	/* set SD */
3203 	reg = readl(&dev->regs->ctl);
3204 	reg |= AMD_BIT(UDC_DEVCTL_SD);
3205 	writel(reg, &dev->regs->ctl);
3206 
3207 	/* print dev register info */
3208 	print_regs(dev);
3209 
3210 	return 0;
3211 
3212 finished:
3213 	return retval;
3214 }
3215 EXPORT_SYMBOL_GPL(udc_probe);
3216 
3217 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3218 MODULE_AUTHOR("Thomas Dahlmann");
3219 MODULE_LICENSE("GPL");
3220