1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * bdc_core.c - BRCM BDC USB3.0 device controller core operations
4 *
5 * Copyright (C) 2014 Broadcom Corporation
6 *
7 * Author: Ashwini Pahuja
8 */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
16 #include <linux/io.h>
17 #include <linux/list.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmapool.h>
21 #include <linux/of.h>
22 #include <linux/phy/phy.h>
23 #include <linux/moduleparam.h>
24 #include <linux/usb/ch9.h>
25 #include <linux/usb/gadget.h>
26 #include <linux/clk.h>
27
28 #include "bdc.h"
29 #include "bdc_dbg.h"
30
31 /* Poll till controller status is not OIP */
poll_oip(struct bdc * bdc,int usec)32 static int poll_oip(struct bdc *bdc, int usec)
33 {
34 u32 status;
35 /* Poll till STS!= OIP */
36 while (usec) {
37 status = bdc_readl(bdc->regs, BDC_BDCSC);
38 if (BDC_CSTS(status) != BDC_OIP) {
39 dev_dbg(bdc->dev,
40 "poll_oip complete status=%d",
41 BDC_CSTS(status));
42 return 0;
43 }
44 udelay(10);
45 usec -= 10;
46 }
47 dev_err(bdc->dev, "Err: operation timedout BDCSC: 0x%08x\n", status);
48
49 return -ETIMEDOUT;
50 }
51
52 /* Stop the BDC controller */
bdc_stop(struct bdc * bdc)53 int bdc_stop(struct bdc *bdc)
54 {
55 int ret;
56 u32 temp;
57
58 dev_dbg(bdc->dev, "%s ()\n\n", __func__);
59 temp = bdc_readl(bdc->regs, BDC_BDCSC);
60 /* Check if BDC is already halted */
61 if (BDC_CSTS(temp) == BDC_HLT) {
62 dev_vdbg(bdc->dev, "BDC already halted\n");
63 return 0;
64 }
65 temp &= ~BDC_COP_MASK;
66 temp |= BDC_COS|BDC_COP_STP;
67 bdc_writel(bdc->regs, BDC_BDCSC, temp);
68
69 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
70 if (ret)
71 dev_err(bdc->dev, "bdc stop operation failed");
72
73 return ret;
74 }
75
76 /* Issue a reset to BDC controller */
bdc_reset(struct bdc * bdc)77 int bdc_reset(struct bdc *bdc)
78 {
79 u32 temp;
80 int ret;
81
82 dev_dbg(bdc->dev, "%s ()\n", __func__);
83 /* First halt the controller */
84 ret = bdc_stop(bdc);
85 if (ret)
86 return ret;
87
88 temp = bdc_readl(bdc->regs, BDC_BDCSC);
89 temp &= ~BDC_COP_MASK;
90 temp |= BDC_COS|BDC_COP_RST;
91 bdc_writel(bdc->regs, BDC_BDCSC, temp);
92 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
93 if (ret)
94 dev_err(bdc->dev, "bdc reset operation failed");
95
96 return ret;
97 }
98
99 /* Run the BDC controller */
bdc_run(struct bdc * bdc)100 int bdc_run(struct bdc *bdc)
101 {
102 u32 temp;
103 int ret;
104
105 dev_dbg(bdc->dev, "%s ()\n", __func__);
106 temp = bdc_readl(bdc->regs, BDC_BDCSC);
107 /* if BDC is already in running state then do not do anything */
108 if (BDC_CSTS(temp) == BDC_NOR) {
109 dev_warn(bdc->dev, "bdc is already in running state\n");
110 return 0;
111 }
112 temp &= ~BDC_COP_MASK;
113 temp |= BDC_COP_RUN;
114 temp |= BDC_COS;
115 bdc_writel(bdc->regs, BDC_BDCSC, temp);
116 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
117 if (ret) {
118 dev_err(bdc->dev, "bdc run operation failed:%d", ret);
119 return ret;
120 }
121 temp = bdc_readl(bdc->regs, BDC_BDCSC);
122 if (BDC_CSTS(temp) != BDC_NOR) {
123 dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
124 BDC_CSTS(temp));
125 return -ESHUTDOWN;
126 }
127
128 return 0;
129 }
130
131 /*
132 * Present the termination to the host, typically called from upstream port
133 * event with Vbus present =1
134 */
bdc_softconn(struct bdc * bdc)135 void bdc_softconn(struct bdc *bdc)
136 {
137 u32 uspc;
138
139 uspc = bdc_readl(bdc->regs, BDC_USPC);
140 uspc &= ~BDC_PST_MASK;
141 uspc |= BDC_LINK_STATE_RX_DET;
142 uspc |= BDC_SWS;
143 dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
144 bdc_writel(bdc->regs, BDC_USPC, uspc);
145 }
146
147 /* Remove the termination */
bdc_softdisconn(struct bdc * bdc)148 void bdc_softdisconn(struct bdc *bdc)
149 {
150 u32 uspc;
151
152 uspc = bdc_readl(bdc->regs, BDC_USPC);
153 uspc |= BDC_SDC;
154 uspc &= ~BDC_SCN;
155 dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
156 bdc_writel(bdc->regs, BDC_USPC, uspc);
157 }
158
159 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
scratchpad_setup(struct bdc * bdc)160 static int scratchpad_setup(struct bdc *bdc)
161 {
162 int sp_buff_size;
163 u32 low32;
164 u32 upp32;
165
166 sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
167 dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
168 if (!sp_buff_size) {
169 dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
170 return 0;
171 }
172 /* Refer to BDC spec, Table 4 for description of SPB */
173 sp_buff_size = 1 << (sp_buff_size + 5);
174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
175 bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
176 &bdc->scratchpad.sp_dma,
177 GFP_KERNEL);
178
179 if (!bdc->scratchpad.buff)
180 goto fail;
181
182 bdc->sp_buff_size = sp_buff_size;
183 bdc->scratchpad.size = sp_buff_size;
184 low32 = lower_32_bits(bdc->scratchpad.sp_dma);
185 upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
186 cpu_to_le32s(&low32);
187 cpu_to_le32s(&upp32);
188 bdc_writel(bdc->regs, BDC_SPBBAL, low32);
189 bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
190 return 0;
191
192 fail:
193 bdc->scratchpad.buff = NULL;
194
195 return -ENOMEM;
196 }
197
198 /* Allocate the status report ring */
setup_srr(struct bdc * bdc,int interrupter)199 static int setup_srr(struct bdc *bdc, int interrupter)
200 {
201 dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
202 /* Reset the SRR */
203 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
204 bdc->srr.dqp_index = 0;
205 /* allocate the status report descriptors */
206 bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
207 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
208 &bdc->srr.dma_addr, GFP_KERNEL);
209 if (!bdc->srr.sr_bds)
210 return -ENOMEM;
211
212 return 0;
213 }
214
215 /* Initialize the HW regs and internal data structures */
bdc_mem_init(struct bdc * bdc,bool reinit)216 static void bdc_mem_init(struct bdc *bdc, bool reinit)
217 {
218 u8 size = 0;
219 u32 usb2_pm;
220 u32 low32;
221 u32 upp32;
222 u32 temp;
223
224 dev_dbg(bdc->dev, "%s ()\n", __func__);
225 bdc->ep0_state = WAIT_FOR_SETUP;
226 bdc->dev_addr = 0;
227 bdc->srr.eqp_index = 0;
228 bdc->srr.dqp_index = 0;
229 bdc->zlp_needed = false;
230 bdc->delayed_status = false;
231
232 bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
233 /* Init the SRR */
234 temp = BDC_SRR_RWS | BDC_SRR_RST;
235 /* Reset the SRR */
236 bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
237 dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
238 temp = lower_32_bits(bdc->srr.dma_addr);
239 size = fls(NUM_SR_ENTRIES) - 2;
240 temp |= size;
241 dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
242 temp, NUM_SR_ENTRIES, size);
243
244 low32 = lower_32_bits(temp);
245 upp32 = upper_32_bits(bdc->srr.dma_addr);
246 cpu_to_le32s(&low32);
247 cpu_to_le32s(&upp32);
248
249 /* Write the dma addresses into regs*/
250 bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
251 bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
252
253 temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
254 temp |= BDC_SRR_IE;
255 temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
256 bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
257
258 /* Set the Interrupt Coalescence ~500 usec */
259 temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
260 temp &= ~0xffff;
261 temp |= INT_CLS;
262 bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
263
264 usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
265 dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
266 /* Enable hardware LPM Enable */
267 usb2_pm |= BDC_HLE;
268 bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
269
270 /* readback for debug */
271 usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
272 dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
273
274 /* Disable any unwanted SR's on SRR */
275 temp = bdc_readl(bdc->regs, BDC_BDCSC);
276 /* We don't want Microframe counter wrap SR */
277 temp |= BDC_MASK_MCW;
278 bdc_writel(bdc->regs, BDC_BDCSC, temp);
279
280 /*
281 * In some error cases, driver has to reset the entire BDC controller
282 * in that case reinit is passed as 1
283 */
284 if (reinit) {
285 /* Enable interrupts */
286 temp = bdc_readl(bdc->regs, BDC_BDCSC);
287 temp |= BDC_GIE;
288 bdc_writel(bdc->regs, BDC_BDCSC, temp);
289 /* Init scratchpad to 0 */
290 memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
291 /* Initialize SRR to 0 */
292 memset(bdc->srr.sr_bds, 0,
293 NUM_SR_ENTRIES * sizeof(struct bdc_bd));
294 } else {
295 /* One time initiaization only */
296 /* Enable status report function pointers */
297 bdc->sr_handler[0] = bdc_sr_xsf;
298 bdc->sr_handler[1] = bdc_sr_uspc;
299
300 /* EP0 status report function pointers */
301 bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
302 bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
303 bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
304 }
305 }
306
307 /* Free the dynamic memory */
bdc_mem_free(struct bdc * bdc)308 static void bdc_mem_free(struct bdc *bdc)
309 {
310 dev_dbg(bdc->dev, "%s\n", __func__);
311 /* Free SRR */
312 if (bdc->srr.sr_bds)
313 dma_free_coherent(bdc->dev,
314 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
315 bdc->srr.sr_bds, bdc->srr.dma_addr);
316
317 /* Free scratchpad */
318 if (bdc->scratchpad.buff)
319 dma_free_coherent(bdc->dev, bdc->sp_buff_size,
320 bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
321
322 /* Destroy the dma pools */
323 dma_pool_destroy(bdc->bd_table_pool);
324
325 /* Free the bdc_ep array */
326 kfree(bdc->bdc_ep_array);
327
328 bdc->srr.sr_bds = NULL;
329 bdc->scratchpad.buff = NULL;
330 bdc->bd_table_pool = NULL;
331 bdc->bdc_ep_array = NULL;
332 }
333
334 /*
335 * bdc reinit gives a controller reset and reinitialize the registers,
336 * called from disconnect/bus reset scenario's, to ensure proper HW cleanup
337 */
bdc_reinit(struct bdc * bdc)338 int bdc_reinit(struct bdc *bdc)
339 {
340 int ret;
341
342 dev_dbg(bdc->dev, "%s\n", __func__);
343 ret = bdc_stop(bdc);
344 if (ret)
345 goto out;
346
347 ret = bdc_reset(bdc);
348 if (ret)
349 goto out;
350
351 /* the reinit flag is 1 */
352 bdc_mem_init(bdc, true);
353 ret = bdc_run(bdc);
354 out:
355 bdc->reinit = false;
356
357 return ret;
358 }
359
360 /* Allocate all the dyanmic memory */
bdc_mem_alloc(struct bdc * bdc)361 static int bdc_mem_alloc(struct bdc *bdc)
362 {
363 u32 page_size;
364 unsigned int num_ieps, num_oeps;
365
366 dev_dbg(bdc->dev,
367 "%s() NUM_BDS_PER_TABLE:%d\n", __func__,
368 NUM_BDS_PER_TABLE);
369 page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
370 /* page size is 2^pgs KB */
371 page_size = 1 << page_size;
372 /* KB */
373 page_size <<= 10;
374 dev_dbg(bdc->dev, "page_size=%d\n", page_size);
375
376 /* Create a pool of bd tables */
377 bdc->bd_table_pool =
378 dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
379 16, page_size);
380
381 if (!bdc->bd_table_pool)
382 goto fail;
383
384 if (scratchpad_setup(bdc))
385 goto fail;
386
387 /* read from regs */
388 num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
389 num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
390 /* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
391 bdc->num_eps = num_ieps + num_oeps + 2;
392 dev_dbg(bdc->dev,
393 "ieps:%d eops:%d num_eps:%d\n",
394 num_ieps, num_oeps, bdc->num_eps);
395 /* allocate array of ep pointers */
396 bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
397 GFP_KERNEL);
398 if (!bdc->bdc_ep_array)
399 goto fail;
400
401 dev_dbg(bdc->dev, "Allocating sr report0\n");
402 if (setup_srr(bdc, 0))
403 goto fail;
404
405 return 0;
406 fail:
407 dev_warn(bdc->dev, "Couldn't initialize memory\n");
408 bdc_mem_free(bdc);
409
410 return -ENOMEM;
411 }
412
413 /* opposite to bdc_hw_init */
bdc_hw_exit(struct bdc * bdc)414 static void bdc_hw_exit(struct bdc *bdc)
415 {
416 dev_dbg(bdc->dev, "%s ()\n", __func__);
417 bdc_mem_free(bdc);
418 }
419
420 /* Initialize the bdc HW and memory */
bdc_hw_init(struct bdc * bdc)421 static int bdc_hw_init(struct bdc *bdc)
422 {
423 int ret;
424
425 dev_dbg(bdc->dev, "%s ()\n", __func__);
426 ret = bdc_reset(bdc);
427 if (ret) {
428 dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
429 return ret;
430 }
431 ret = bdc_mem_alloc(bdc);
432 if (ret) {
433 dev_err(bdc->dev, "Mem alloc failed, aborting\n");
434 return -ENOMEM;
435 }
436 bdc_mem_init(bdc, 0);
437 bdc_dbg_regs(bdc);
438 dev_dbg(bdc->dev, "HW Init done\n");
439
440 return 0;
441 }
442
bdc_phy_init(struct bdc * bdc)443 static int bdc_phy_init(struct bdc *bdc)
444 {
445 int phy_num;
446 int ret;
447
448 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
449 ret = phy_init(bdc->phys[phy_num]);
450 if (ret)
451 goto err_exit_phy;
452 ret = phy_power_on(bdc->phys[phy_num]);
453 if (ret) {
454 phy_exit(bdc->phys[phy_num]);
455 goto err_exit_phy;
456 }
457 }
458
459 return 0;
460
461 err_exit_phy:
462 while (--phy_num >= 0) {
463 phy_power_off(bdc->phys[phy_num]);
464 phy_exit(bdc->phys[phy_num]);
465 }
466
467 return ret;
468 }
469
bdc_phy_exit(struct bdc * bdc)470 static void bdc_phy_exit(struct bdc *bdc)
471 {
472 int phy_num;
473
474 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
475 phy_power_off(bdc->phys[phy_num]);
476 phy_exit(bdc->phys[phy_num]);
477 }
478 }
479
bdc_probe(struct platform_device * pdev)480 static int bdc_probe(struct platform_device *pdev)
481 {
482 struct bdc *bdc;
483 struct resource *res;
484 int ret = -ENOMEM;
485 int irq;
486 u32 temp;
487 struct device *dev = &pdev->dev;
488 struct clk *clk;
489 int phy_num;
490
491 dev_dbg(dev, "%s()\n", __func__);
492
493 clk = devm_clk_get(dev, "sw_usbd");
494 if (IS_ERR(clk)) {
495 dev_info(dev, "Clock not found in Device Tree\n");
496 clk = NULL;
497 }
498
499 ret = clk_prepare_enable(clk);
500 if (ret) {
501 dev_err(dev, "could not enable clock\n");
502 return ret;
503 }
504
505 bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
506 if (!bdc)
507 return -ENOMEM;
508
509 bdc->clk = clk;
510
511 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
512 bdc->regs = devm_ioremap_resource(dev, res);
513 if (IS_ERR(bdc->regs)) {
514 dev_err(dev, "ioremap error\n");
515 return -ENOMEM;
516 }
517 irq = platform_get_irq(pdev, 0);
518 if (irq < 0)
519 return irq;
520 spin_lock_init(&bdc->lock);
521 platform_set_drvdata(pdev, bdc);
522 bdc->irq = irq;
523 bdc->dev = dev;
524 dev_dbg(dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
525
526 bdc->num_phys = of_count_phandle_with_args(dev->of_node,
527 "phys", "#phy-cells");
528 if (bdc->num_phys > 0) {
529 bdc->phys = devm_kcalloc(dev, bdc->num_phys,
530 sizeof(struct phy *), GFP_KERNEL);
531 if (!bdc->phys)
532 return -ENOMEM;
533 } else {
534 bdc->num_phys = 0;
535 }
536 dev_info(dev, "Using %d phy(s)\n", bdc->num_phys);
537
538 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
539 bdc->phys[phy_num] = devm_of_phy_get_by_index(
540 dev, dev->of_node, phy_num);
541 if (IS_ERR(bdc->phys[phy_num])) {
542 ret = PTR_ERR(bdc->phys[phy_num]);
543 dev_err(bdc->dev,
544 "BDC phy specified but not found:%d\n", ret);
545 return ret;
546 }
547 }
548
549 ret = bdc_phy_init(bdc);
550 if (ret) {
551 dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
552 return ret;
553 }
554
555 temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
556 if ((temp & BDC_P64) &&
557 !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
558 dev_dbg(dev, "Using 64-bit address\n");
559 } else {
560 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
561 if (ret) {
562 dev_err(dev,
563 "No suitable DMA config available, abort\n");
564 return -ENOTSUPP;
565 }
566 dev_dbg(dev, "Using 32-bit address\n");
567 }
568 ret = bdc_hw_init(bdc);
569 if (ret) {
570 dev_err(dev, "BDC init failure:%d\n", ret);
571 goto phycleanup;
572 }
573 ret = bdc_udc_init(bdc);
574 if (ret) {
575 dev_err(dev, "BDC Gadget init failure:%d\n", ret);
576 goto cleanup;
577 }
578 return 0;
579
580 cleanup:
581 bdc_hw_exit(bdc);
582 phycleanup:
583 bdc_phy_exit(bdc);
584 return ret;
585 }
586
bdc_remove(struct platform_device * pdev)587 static int bdc_remove(struct platform_device *pdev)
588 {
589 struct bdc *bdc;
590
591 bdc = platform_get_drvdata(pdev);
592 dev_dbg(bdc->dev, "%s ()\n", __func__);
593 bdc_udc_exit(bdc);
594 bdc_hw_exit(bdc);
595 bdc_phy_exit(bdc);
596 clk_disable_unprepare(bdc->clk);
597 return 0;
598 }
599
600 #ifdef CONFIG_PM_SLEEP
bdc_suspend(struct device * dev)601 static int bdc_suspend(struct device *dev)
602 {
603 struct bdc *bdc = dev_get_drvdata(dev);
604
605 clk_disable_unprepare(bdc->clk);
606 return 0;
607 }
608
bdc_resume(struct device * dev)609 static int bdc_resume(struct device *dev)
610 {
611 struct bdc *bdc = dev_get_drvdata(dev);
612 int ret;
613
614 ret = clk_prepare_enable(bdc->clk);
615 if (ret) {
616 dev_err(bdc->dev, "err enabling the clock\n");
617 return ret;
618 }
619 ret = bdc_reinit(bdc);
620 if (ret) {
621 dev_err(bdc->dev, "err in bdc reinit\n");
622 return ret;
623 }
624
625 return 0;
626 }
627
628 #endif /* CONFIG_PM_SLEEP */
629
630 static SIMPLE_DEV_PM_OPS(bdc_pm_ops, bdc_suspend,
631 bdc_resume);
632
633 static const struct of_device_id bdc_of_match[] = {
634 { .compatible = "brcm,bdc-v0.16" },
635 { .compatible = "brcm,bdc" },
636 { /* sentinel */ }
637 };
638
639 static struct platform_driver bdc_driver = {
640 .driver = {
641 .name = BRCM_BDC_NAME,
642 .pm = &bdc_pm_ops,
643 .of_match_table = bdc_of_match,
644 },
645 .probe = bdc_probe,
646 .remove = bdc_remove,
647 };
648
649 module_platform_driver(bdc_driver);
650 MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
651 MODULE_LICENSE("GPL");
652 MODULE_DESCRIPTION(BRCM_BDC_DESC);
653