1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * bdc_core.c - BRCM BDC USB3.0 device controller core operations
4 *
5 * Copyright (C) 2014 Broadcom Corporation
6 *
7 * Author: Ashwini Pahuja
8 */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
16 #include <linux/io.h>
17 #include <linux/list.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmapool.h>
21 #include <linux/of.h>
22 #include <linux/phy/phy.h>
23 #include <linux/moduleparam.h>
24 #include <linux/usb/ch9.h>
25 #include <linux/usb/gadget.h>
26 #include <linux/clk.h>
27
28 #include "bdc.h"
29 #include "bdc_dbg.h"
30
31 /* Poll till controller status is not OIP */
poll_oip(struct bdc * bdc,int usec)32 static int poll_oip(struct bdc *bdc, int usec)
33 {
34 u32 status;
35 /* Poll till STS!= OIP */
36 while (usec) {
37 status = bdc_readl(bdc->regs, BDC_BDCSC);
38 if (BDC_CSTS(status) != BDC_OIP) {
39 dev_dbg(bdc->dev,
40 "poll_oip complete status=%d",
41 BDC_CSTS(status));
42 return 0;
43 }
44 udelay(10);
45 usec -= 10;
46 }
47 dev_err(bdc->dev, "Err: operation timedout BDCSC: 0x%08x\n", status);
48
49 return -ETIMEDOUT;
50 }
51
52 /* Stop the BDC controller */
bdc_stop(struct bdc * bdc)53 int bdc_stop(struct bdc *bdc)
54 {
55 int ret;
56 u32 temp;
57
58 dev_dbg(bdc->dev, "%s ()\n\n", __func__);
59 temp = bdc_readl(bdc->regs, BDC_BDCSC);
60 /* Check if BDC is already halted */
61 if (BDC_CSTS(temp) == BDC_HLT) {
62 dev_vdbg(bdc->dev, "BDC already halted\n");
63 return 0;
64 }
65 temp &= ~BDC_COP_MASK;
66 temp |= BDC_COS|BDC_COP_STP;
67 bdc_writel(bdc->regs, BDC_BDCSC, temp);
68
69 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
70 if (ret)
71 dev_err(bdc->dev, "bdc stop operation failed");
72
73 return ret;
74 }
75
76 /* Issue a reset to BDC controller */
bdc_reset(struct bdc * bdc)77 int bdc_reset(struct bdc *bdc)
78 {
79 u32 temp;
80 int ret;
81
82 dev_dbg(bdc->dev, "%s ()\n", __func__);
83 /* First halt the controller */
84 ret = bdc_stop(bdc);
85 if (ret)
86 return ret;
87
88 temp = bdc_readl(bdc->regs, BDC_BDCSC);
89 temp &= ~BDC_COP_MASK;
90 temp |= BDC_COS|BDC_COP_RST;
91 bdc_writel(bdc->regs, BDC_BDCSC, temp);
92 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
93 if (ret)
94 dev_err(bdc->dev, "bdc reset operation failed");
95
96 return ret;
97 }
98
99 /* Run the BDC controller */
bdc_run(struct bdc * bdc)100 int bdc_run(struct bdc *bdc)
101 {
102 u32 temp;
103 int ret;
104
105 dev_dbg(bdc->dev, "%s ()\n", __func__);
106 temp = bdc_readl(bdc->regs, BDC_BDCSC);
107 /* if BDC is already in running state then do not do anything */
108 if (BDC_CSTS(temp) == BDC_NOR) {
109 dev_warn(bdc->dev, "bdc is already in running state\n");
110 return 0;
111 }
112 temp &= ~BDC_COP_MASK;
113 temp |= BDC_COP_RUN;
114 temp |= BDC_COS;
115 bdc_writel(bdc->regs, BDC_BDCSC, temp);
116 ret = poll_oip(bdc, BDC_COP_TIMEOUT);
117 if (ret) {
118 dev_err(bdc->dev, "bdc run operation failed:%d", ret);
119 return ret;
120 }
121 temp = bdc_readl(bdc->regs, BDC_BDCSC);
122 if (BDC_CSTS(temp) != BDC_NOR) {
123 dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
124 BDC_CSTS(temp));
125 return -ESHUTDOWN;
126 }
127
128 return 0;
129 }
130
131 /*
132 * Present the termination to the host, typically called from upstream port
133 * event with Vbus present =1
134 */
bdc_softconn(struct bdc * bdc)135 void bdc_softconn(struct bdc *bdc)
136 {
137 u32 uspc;
138
139 uspc = bdc_readl(bdc->regs, BDC_USPC);
140 uspc &= ~BDC_PST_MASK;
141 uspc |= BDC_LINK_STATE_RX_DET;
142 uspc |= BDC_SWS;
143 dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
144 bdc_writel(bdc->regs, BDC_USPC, uspc);
145 }
146
147 /* Remove the termination */
bdc_softdisconn(struct bdc * bdc)148 void bdc_softdisconn(struct bdc *bdc)
149 {
150 u32 uspc;
151
152 uspc = bdc_readl(bdc->regs, BDC_USPC);
153 uspc |= BDC_SDC;
154 uspc &= ~BDC_SCN;
155 dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
156 bdc_writel(bdc->regs, BDC_USPC, uspc);
157 }
158
159 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
scratchpad_setup(struct bdc * bdc)160 static int scratchpad_setup(struct bdc *bdc)
161 {
162 int sp_buff_size;
163 u32 low32;
164 u32 upp32;
165
166 sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
167 dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
168 if (!sp_buff_size) {
169 dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
170 return 0;
171 }
172 /* Refer to BDC spec, Table 4 for description of SPB */
173 sp_buff_size = 1 << (sp_buff_size + 5);
174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
175 bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size,
176 &bdc->scratchpad.sp_dma, GFP_KERNEL);
177
178 if (!bdc->scratchpad.buff)
179 goto fail;
180
181 bdc->sp_buff_size = sp_buff_size;
182 bdc->scratchpad.size = sp_buff_size;
183 low32 = lower_32_bits(bdc->scratchpad.sp_dma);
184 upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
185 cpu_to_le32s(&low32);
186 cpu_to_le32s(&upp32);
187 bdc_writel(bdc->regs, BDC_SPBBAL, low32);
188 bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
189 return 0;
190
191 fail:
192 bdc->scratchpad.buff = NULL;
193
194 return -ENOMEM;
195 }
196
197 /* Allocate the status report ring */
setup_srr(struct bdc * bdc,int interrupter)198 static int setup_srr(struct bdc *bdc, int interrupter)
199 {
200 dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
201 /* Reset the SRR */
202 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
203 bdc->srr.dqp_index = 0;
204 /* allocate the status report descriptors */
205 bdc->srr.sr_bds = dma_zalloc_coherent(
206 bdc->dev,
207 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
208 &bdc->srr.dma_addr,
209 GFP_KERNEL);
210 if (!bdc->srr.sr_bds)
211 return -ENOMEM;
212
213 return 0;
214 }
215
216 /* Initialize the HW regs and internal data structures */
bdc_mem_init(struct bdc * bdc,bool reinit)217 static void bdc_mem_init(struct bdc *bdc, bool reinit)
218 {
219 u8 size = 0;
220 u32 usb2_pm;
221 u32 low32;
222 u32 upp32;
223 u32 temp;
224
225 dev_dbg(bdc->dev, "%s ()\n", __func__);
226 bdc->ep0_state = WAIT_FOR_SETUP;
227 bdc->dev_addr = 0;
228 bdc->srr.eqp_index = 0;
229 bdc->srr.dqp_index = 0;
230 bdc->zlp_needed = false;
231 bdc->delayed_status = false;
232
233 bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
234 /* Init the SRR */
235 temp = BDC_SRR_RWS | BDC_SRR_RST;
236 /* Reset the SRR */
237 bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
238 dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
239 temp = lower_32_bits(bdc->srr.dma_addr);
240 size = fls(NUM_SR_ENTRIES) - 2;
241 temp |= size;
242 dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
243 temp, NUM_SR_ENTRIES, size);
244
245 low32 = lower_32_bits(temp);
246 upp32 = upper_32_bits(bdc->srr.dma_addr);
247 cpu_to_le32s(&low32);
248 cpu_to_le32s(&upp32);
249
250 /* Write the dma addresses into regs*/
251 bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
252 bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
253
254 temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
255 temp |= BDC_SRR_IE;
256 temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
257 bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
258
259 /* Set the Interrupt Coalescence ~500 usec */
260 temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
261 temp &= ~0xffff;
262 temp |= INT_CLS;
263 bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
264
265 usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
266 dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
267 /* Enable hardware LPM Enable */
268 usb2_pm |= BDC_HLE;
269 bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
270
271 /* readback for debug */
272 usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
273 dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
274
275 /* Disable any unwanted SR's on SRR */
276 temp = bdc_readl(bdc->regs, BDC_BDCSC);
277 /* We don't want Microframe counter wrap SR */
278 temp |= BDC_MASK_MCW;
279 bdc_writel(bdc->regs, BDC_BDCSC, temp);
280
281 /*
282 * In some error cases, driver has to reset the entire BDC controller
283 * in that case reinit is passed as 1
284 */
285 if (reinit) {
286 /* Enable interrupts */
287 temp = bdc_readl(bdc->regs, BDC_BDCSC);
288 temp |= BDC_GIE;
289 bdc_writel(bdc->regs, BDC_BDCSC, temp);
290 /* Init scratchpad to 0 */
291 memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
292 /* Initialize SRR to 0 */
293 memset(bdc->srr.sr_bds, 0,
294 NUM_SR_ENTRIES * sizeof(struct bdc_bd));
295 } else {
296 /* One time initiaization only */
297 /* Enable status report function pointers */
298 bdc->sr_handler[0] = bdc_sr_xsf;
299 bdc->sr_handler[1] = bdc_sr_uspc;
300
301 /* EP0 status report function pointers */
302 bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
303 bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
304 bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
305 }
306 }
307
308 /* Free the dynamic memory */
bdc_mem_free(struct bdc * bdc)309 static void bdc_mem_free(struct bdc *bdc)
310 {
311 dev_dbg(bdc->dev, "%s\n", __func__);
312 /* Free SRR */
313 if (bdc->srr.sr_bds)
314 dma_free_coherent(bdc->dev,
315 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
316 bdc->srr.sr_bds, bdc->srr.dma_addr);
317
318 /* Free scratchpad */
319 if (bdc->scratchpad.buff)
320 dma_free_coherent(bdc->dev, bdc->sp_buff_size,
321 bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
322
323 /* Destroy the dma pools */
324 dma_pool_destroy(bdc->bd_table_pool);
325
326 /* Free the bdc_ep array */
327 kfree(bdc->bdc_ep_array);
328
329 bdc->srr.sr_bds = NULL;
330 bdc->scratchpad.buff = NULL;
331 bdc->bd_table_pool = NULL;
332 bdc->bdc_ep_array = NULL;
333 }
334
335 /*
336 * bdc reinit gives a controller reset and reinitialize the registers,
337 * called from disconnect/bus reset scenario's, to ensure proper HW cleanup
338 */
bdc_reinit(struct bdc * bdc)339 int bdc_reinit(struct bdc *bdc)
340 {
341 int ret;
342
343 dev_dbg(bdc->dev, "%s\n", __func__);
344 ret = bdc_stop(bdc);
345 if (ret)
346 goto out;
347
348 ret = bdc_reset(bdc);
349 if (ret)
350 goto out;
351
352 /* the reinit flag is 1 */
353 bdc_mem_init(bdc, true);
354 ret = bdc_run(bdc);
355 out:
356 bdc->reinit = false;
357
358 return ret;
359 }
360
361 /* Allocate all the dyanmic memory */
bdc_mem_alloc(struct bdc * bdc)362 static int bdc_mem_alloc(struct bdc *bdc)
363 {
364 u32 page_size;
365 unsigned int num_ieps, num_oeps;
366
367 dev_dbg(bdc->dev,
368 "%s() NUM_BDS_PER_TABLE:%d\n", __func__,
369 NUM_BDS_PER_TABLE);
370 page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
371 /* page size is 2^pgs KB */
372 page_size = 1 << page_size;
373 /* KB */
374 page_size <<= 10;
375 dev_dbg(bdc->dev, "page_size=%d\n", page_size);
376
377 /* Create a pool of bd tables */
378 bdc->bd_table_pool =
379 dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
380 16, page_size);
381
382 if (!bdc->bd_table_pool)
383 goto fail;
384
385 if (scratchpad_setup(bdc))
386 goto fail;
387
388 /* read from regs */
389 num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
390 num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
391 /* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
392 bdc->num_eps = num_ieps + num_oeps + 2;
393 dev_dbg(bdc->dev,
394 "ieps:%d eops:%d num_eps:%d\n",
395 num_ieps, num_oeps, bdc->num_eps);
396 /* allocate array of ep pointers */
397 bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
398 GFP_KERNEL);
399 if (!bdc->bdc_ep_array)
400 goto fail;
401
402 dev_dbg(bdc->dev, "Allocating sr report0\n");
403 if (setup_srr(bdc, 0))
404 goto fail;
405
406 return 0;
407 fail:
408 dev_warn(bdc->dev, "Couldn't initialize memory\n");
409 bdc_mem_free(bdc);
410
411 return -ENOMEM;
412 }
413
414 /* opposite to bdc_hw_init */
bdc_hw_exit(struct bdc * bdc)415 static void bdc_hw_exit(struct bdc *bdc)
416 {
417 dev_dbg(bdc->dev, "%s ()\n", __func__);
418 bdc_mem_free(bdc);
419 }
420
421 /* Initialize the bdc HW and memory */
bdc_hw_init(struct bdc * bdc)422 static int bdc_hw_init(struct bdc *bdc)
423 {
424 int ret;
425
426 dev_dbg(bdc->dev, "%s ()\n", __func__);
427 ret = bdc_reset(bdc);
428 if (ret) {
429 dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
430 return ret;
431 }
432 ret = bdc_mem_alloc(bdc);
433 if (ret) {
434 dev_err(bdc->dev, "Mem alloc failed, aborting\n");
435 return -ENOMEM;
436 }
437 bdc_mem_init(bdc, 0);
438 bdc_dbg_regs(bdc);
439 dev_dbg(bdc->dev, "HW Init done\n");
440
441 return 0;
442 }
443
bdc_phy_init(struct bdc * bdc)444 static int bdc_phy_init(struct bdc *bdc)
445 {
446 int phy_num;
447 int ret;
448
449 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
450 ret = phy_init(bdc->phys[phy_num]);
451 if (ret)
452 goto err_exit_phy;
453 ret = phy_power_on(bdc->phys[phy_num]);
454 if (ret) {
455 phy_exit(bdc->phys[phy_num]);
456 goto err_exit_phy;
457 }
458 }
459
460 return 0;
461
462 err_exit_phy:
463 while (--phy_num >= 0) {
464 phy_power_off(bdc->phys[phy_num]);
465 phy_exit(bdc->phys[phy_num]);
466 }
467
468 return ret;
469 }
470
bdc_phy_exit(struct bdc * bdc)471 static void bdc_phy_exit(struct bdc *bdc)
472 {
473 int phy_num;
474
475 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
476 phy_power_off(bdc->phys[phy_num]);
477 phy_exit(bdc->phys[phy_num]);
478 }
479 }
480
bdc_probe(struct platform_device * pdev)481 static int bdc_probe(struct platform_device *pdev)
482 {
483 struct bdc *bdc;
484 struct resource *res;
485 int ret = -ENOMEM;
486 int irq;
487 u32 temp;
488 struct device *dev = &pdev->dev;
489 struct clk *clk;
490 int phy_num;
491
492 dev_dbg(dev, "%s()\n", __func__);
493
494 clk = devm_clk_get(dev, "sw_usbd");
495 if (IS_ERR(clk)) {
496 dev_info(dev, "Clock not found in Device Tree\n");
497 clk = NULL;
498 }
499
500 ret = clk_prepare_enable(clk);
501 if (ret) {
502 dev_err(dev, "could not enable clock\n");
503 return ret;
504 }
505
506 bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
507 if (!bdc)
508 return -ENOMEM;
509
510 bdc->clk = clk;
511
512 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
513 bdc->regs = devm_ioremap_resource(dev, res);
514 if (IS_ERR(bdc->regs)) {
515 dev_err(dev, "ioremap error\n");
516 return -ENOMEM;
517 }
518 irq = platform_get_irq(pdev, 0);
519 if (irq < 0) {
520 dev_err(dev, "platform_get_irq failed:%d\n", irq);
521 return irq;
522 }
523 spin_lock_init(&bdc->lock);
524 platform_set_drvdata(pdev, bdc);
525 bdc->irq = irq;
526 bdc->dev = dev;
527 dev_dbg(dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
528
529 bdc->num_phys = of_count_phandle_with_args(dev->of_node,
530 "phys", "#phy-cells");
531 if (bdc->num_phys > 0) {
532 bdc->phys = devm_kcalloc(dev, bdc->num_phys,
533 sizeof(struct phy *), GFP_KERNEL);
534 if (!bdc->phys)
535 return -ENOMEM;
536 } else {
537 bdc->num_phys = 0;
538 }
539 dev_info(dev, "Using %d phy(s)\n", bdc->num_phys);
540
541 for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
542 bdc->phys[phy_num] = devm_of_phy_get_by_index(
543 dev, dev->of_node, phy_num);
544 if (IS_ERR(bdc->phys[phy_num])) {
545 ret = PTR_ERR(bdc->phys[phy_num]);
546 dev_err(bdc->dev,
547 "BDC phy specified but not found:%d\n", ret);
548 return ret;
549 }
550 }
551
552 ret = bdc_phy_init(bdc);
553 if (ret) {
554 dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
555 return ret;
556 }
557
558 temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
559 if ((temp & BDC_P64) &&
560 !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
561 dev_dbg(dev, "Using 64-bit address\n");
562 } else {
563 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
564 if (ret) {
565 dev_err(dev,
566 "No suitable DMA config available, abort\n");
567 return -ENOTSUPP;
568 }
569 dev_dbg(dev, "Using 32-bit address\n");
570 }
571 ret = bdc_hw_init(bdc);
572 if (ret) {
573 dev_err(dev, "BDC init failure:%d\n", ret);
574 goto phycleanup;
575 }
576 ret = bdc_udc_init(bdc);
577 if (ret) {
578 dev_err(dev, "BDC Gadget init failure:%d\n", ret);
579 goto cleanup;
580 }
581 return 0;
582
583 cleanup:
584 bdc_hw_exit(bdc);
585 phycleanup:
586 bdc_phy_exit(bdc);
587 return ret;
588 }
589
bdc_remove(struct platform_device * pdev)590 static int bdc_remove(struct platform_device *pdev)
591 {
592 struct bdc *bdc;
593
594 bdc = platform_get_drvdata(pdev);
595 dev_dbg(bdc->dev, "%s ()\n", __func__);
596 bdc_udc_exit(bdc);
597 bdc_hw_exit(bdc);
598 bdc_phy_exit(bdc);
599 clk_disable_unprepare(bdc->clk);
600 return 0;
601 }
602
603 #ifdef CONFIG_PM_SLEEP
bdc_suspend(struct device * dev)604 static int bdc_suspend(struct device *dev)
605 {
606 struct bdc *bdc = dev_get_drvdata(dev);
607
608 clk_disable_unprepare(bdc->clk);
609 return 0;
610 }
611
bdc_resume(struct device * dev)612 static int bdc_resume(struct device *dev)
613 {
614 struct bdc *bdc = dev_get_drvdata(dev);
615 int ret;
616
617 ret = clk_prepare_enable(bdc->clk);
618 if (ret) {
619 dev_err(bdc->dev, "err enabling the clock\n");
620 return ret;
621 }
622 ret = bdc_reinit(bdc);
623 if (ret) {
624 dev_err(bdc->dev, "err in bdc reinit\n");
625 return ret;
626 }
627
628 return 0;
629 }
630
631 #endif /* CONFIG_PM_SLEEP */
632
633 static SIMPLE_DEV_PM_OPS(bdc_pm_ops, bdc_suspend,
634 bdc_resume);
635
636 static const struct of_device_id bdc_of_match[] = {
637 { .compatible = "brcm,bdc-v0.16" },
638 { .compatible = "brcm,bdc" },
639 { /* sentinel */ }
640 };
641
642 static struct platform_driver bdc_driver = {
643 .driver = {
644 .name = BRCM_BDC_NAME,
645 .pm = &bdc_pm_ops,
646 .of_match_table = bdc_of_match,
647 },
648 .probe = bdc_probe,
649 .remove = bdc_remove,
650 };
651
652 module_platform_driver(bdc_driver);
653 MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
654 MODULE_LICENSE("GPL");
655 MODULE_DESCRIPTION(BRCM_BDC_DESC);
656