xref: /linux/drivers/usb/gadget/udc/bdc/bdc_core.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * bdc_core.c - BRCM BDC USB3.0 device controller core operations
4  *
5  * Copyright (C) 2014 Broadcom Corporation
6  *
7  * Author: Ashwini Pahuja
8  */
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
16 #include <linux/io.h>
17 #include <linux/list.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmapool.h>
21 #include <linux/of.h>
22 #include <linux/phy/phy.h>
23 #include <linux/moduleparam.h>
24 #include <linux/usb/ch9.h>
25 #include <linux/usb/gadget.h>
26 #include <linux/clk.h>
27 
28 #include "bdc.h"
29 #include "bdc_dbg.h"
30 
31 /* Poll till controller status is not OIP */
32 static int poll_oip(struct bdc *bdc, int usec)
33 {
34 	u32 status;
35 	/* Poll till STS!= OIP */
36 	while (usec) {
37 		status = bdc_readl(bdc->regs, BDC_BDCSC);
38 		if (BDC_CSTS(status) != BDC_OIP) {
39 			dev_dbg(bdc->dev,
40 				"poll_oip complete status=%d",
41 				BDC_CSTS(status));
42 			return 0;
43 		}
44 		udelay(10);
45 		usec -= 10;
46 	}
47 	dev_err(bdc->dev, "Err: operation timedout BDCSC: 0x%08x\n", status);
48 
49 	return -ETIMEDOUT;
50 }
51 
52 /* Stop the BDC controller */
53 int bdc_stop(struct bdc *bdc)
54 {
55 	int ret;
56 	u32 temp;
57 
58 	dev_dbg(bdc->dev, "%s ()\n\n", __func__);
59 	temp = bdc_readl(bdc->regs, BDC_BDCSC);
60 	/* Check if BDC is already halted */
61 	if (BDC_CSTS(temp) == BDC_HLT) {
62 		dev_vdbg(bdc->dev, "BDC already halted\n");
63 		return 0;
64 	}
65 	temp &= ~BDC_COP_MASK;
66 	temp |= BDC_COS|BDC_COP_STP;
67 	bdc_writel(bdc->regs, BDC_BDCSC, temp);
68 
69 	ret = poll_oip(bdc, BDC_COP_TIMEOUT);
70 	if (ret)
71 		dev_err(bdc->dev, "bdc stop operation failed");
72 
73 	return ret;
74 }
75 
76 /* Issue a reset to BDC controller */
77 int bdc_reset(struct bdc *bdc)
78 {
79 	u32 temp;
80 	int ret;
81 
82 	dev_dbg(bdc->dev, "%s ()\n", __func__);
83 	/* First halt the controller */
84 	ret = bdc_stop(bdc);
85 	if (ret)
86 		return ret;
87 
88 	temp = bdc_readl(bdc->regs, BDC_BDCSC);
89 	temp &= ~BDC_COP_MASK;
90 	temp |= BDC_COS|BDC_COP_RST;
91 	bdc_writel(bdc->regs, BDC_BDCSC, temp);
92 	ret = poll_oip(bdc, BDC_COP_TIMEOUT);
93 	if (ret)
94 		dev_err(bdc->dev, "bdc reset operation failed");
95 
96 	return ret;
97 }
98 
99 /* Run the BDC controller */
100 int bdc_run(struct bdc *bdc)
101 {
102 	u32 temp;
103 	int ret;
104 
105 	dev_dbg(bdc->dev, "%s ()\n", __func__);
106 	temp = bdc_readl(bdc->regs, BDC_BDCSC);
107 	/* if BDC is already in running state then do not do anything */
108 	if (BDC_CSTS(temp) == BDC_NOR) {
109 		dev_warn(bdc->dev, "bdc is already in running state\n");
110 		return 0;
111 	}
112 	temp &= ~BDC_COP_MASK;
113 	temp |= BDC_COP_RUN;
114 	temp |= BDC_COS;
115 	bdc_writel(bdc->regs, BDC_BDCSC, temp);
116 	ret = poll_oip(bdc, BDC_COP_TIMEOUT);
117 	if (ret) {
118 		dev_err(bdc->dev, "bdc run operation failed:%d", ret);
119 		return ret;
120 	}
121 	temp = bdc_readl(bdc->regs, BDC_BDCSC);
122 	if (BDC_CSTS(temp) != BDC_NOR) {
123 		dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
124 								BDC_CSTS(temp));
125 		return -ESHUTDOWN;
126 	}
127 
128 	return 0;
129 }
130 
131 /*
132  * Present the termination to the host, typically called from upstream port
133  * event with Vbus present =1
134  */
135 void bdc_softconn(struct bdc *bdc)
136 {
137 	u32 uspc;
138 
139 	uspc = bdc_readl(bdc->regs, BDC_USPC);
140 	uspc &= ~BDC_PST_MASK;
141 	uspc |= BDC_LINK_STATE_RX_DET;
142 	uspc |= BDC_SWS;
143 	dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
144 	bdc_writel(bdc->regs, BDC_USPC, uspc);
145 }
146 
147 /* Remove the termination */
148 void bdc_softdisconn(struct bdc *bdc)
149 {
150 	u32 uspc;
151 
152 	uspc = bdc_readl(bdc->regs, BDC_USPC);
153 	uspc |= BDC_SDC;
154 	uspc &= ~BDC_SCN;
155 	dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
156 	bdc_writel(bdc->regs, BDC_USPC, uspc);
157 }
158 
159 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
160 static int scratchpad_setup(struct bdc *bdc)
161 {
162 	int sp_buff_size;
163 	u32 low32;
164 	u32 upp32;
165 
166 	sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
167 	dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
168 	if (!sp_buff_size) {
169 		dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
170 		return 0;
171 	}
172 	/* Refer to BDC spec, Table 4 for description of SPB */
173 	sp_buff_size = 1 << (sp_buff_size + 5);
174 	dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
175 	bdc->scratchpad.buff  =  dma_alloc_coherent(bdc->dev, sp_buff_size,
176 						    &bdc->scratchpad.sp_dma,
177 						    GFP_KERNEL);
178 
179 	if (!bdc->scratchpad.buff)
180 		goto fail;
181 
182 	bdc->sp_buff_size = sp_buff_size;
183 	bdc->scratchpad.size = sp_buff_size;
184 	low32 = lower_32_bits(bdc->scratchpad.sp_dma);
185 	upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
186 	cpu_to_le32s(&low32);
187 	cpu_to_le32s(&upp32);
188 	bdc_writel(bdc->regs, BDC_SPBBAL, low32);
189 	bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
190 	return 0;
191 
192 fail:
193 	bdc->scratchpad.buff = NULL;
194 
195 	return -ENOMEM;
196 }
197 
198 /* Allocate the status report ring */
199 static int setup_srr(struct bdc *bdc, int interrupter)
200 {
201 	dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
202 	/* Reset the SRR */
203 	bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
204 	bdc->srr.dqp_index = 0;
205 	/* allocate the status report descriptors */
206 	bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
207 					     NUM_SR_ENTRIES * sizeof(struct bdc_bd),
208 					     &bdc->srr.dma_addr, GFP_KERNEL);
209 	if (!bdc->srr.sr_bds)
210 		return -ENOMEM;
211 
212 	return 0;
213 }
214 
215 /* Initialize the HW regs and internal data structures */
216 static void bdc_mem_init(struct bdc *bdc, bool reinit)
217 {
218 	u8 size = 0;
219 	u32 usb2_pm;
220 	u32 low32;
221 	u32 upp32;
222 	u32 temp;
223 
224 	dev_dbg(bdc->dev, "%s ()\n", __func__);
225 	bdc->ep0_state = WAIT_FOR_SETUP;
226 	bdc->dev_addr = 0;
227 	bdc->srr.eqp_index = 0;
228 	bdc->srr.dqp_index = 0;
229 	bdc->zlp_needed = false;
230 	bdc->delayed_status = false;
231 
232 	bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
233 	/* Init the SRR */
234 	temp = BDC_SRR_RWS | BDC_SRR_RST;
235 	/* Reset the SRR */
236 	bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
237 	dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
238 	temp = lower_32_bits(bdc->srr.dma_addr);
239 	size = fls(NUM_SR_ENTRIES) - 2;
240 	temp |= size;
241 	dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
242 						temp, NUM_SR_ENTRIES, size);
243 
244 	low32 = lower_32_bits(temp);
245 	upp32 = upper_32_bits(bdc->srr.dma_addr);
246 	cpu_to_le32s(&low32);
247 	cpu_to_le32s(&upp32);
248 
249 	/* Write the dma addresses into regs*/
250 	bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
251 	bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
252 
253 	temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
254 	temp |= BDC_SRR_IE;
255 	temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
256 	bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
257 
258 	/* Set the Interrupt Coalescence ~500 usec */
259 	temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
260 	temp &= ~0xffff;
261 	temp |= INT_CLS;
262 	bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
263 
264 	usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
265 	dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
266 	/* Enable hardware LPM Enable */
267 	usb2_pm |= BDC_HLE;
268 	bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
269 
270 	/* readback for debug */
271 	usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
272 	dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
273 
274 	/* Disable any unwanted SR's on SRR */
275 	temp = bdc_readl(bdc->regs, BDC_BDCSC);
276 	/* We don't want Microframe counter wrap SR */
277 	temp |= BDC_MASK_MCW;
278 	bdc_writel(bdc->regs, BDC_BDCSC, temp);
279 
280 	/*
281 	 * In some error cases, driver has to reset the entire BDC controller
282 	 * in that case reinit is passed as 1
283 	 */
284 	if (reinit) {
285 		/* Enable interrupts */
286 		temp = bdc_readl(bdc->regs, BDC_BDCSC);
287 		temp |= BDC_GIE;
288 		bdc_writel(bdc->regs, BDC_BDCSC, temp);
289 		/* Init scratchpad to 0 */
290 		memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
291 		/* Initialize SRR to 0 */
292 		memset(bdc->srr.sr_bds, 0,
293 					NUM_SR_ENTRIES * sizeof(struct bdc_bd));
294 	} else {
295 		/* One time initiaization only */
296 		/* Enable status report function pointers */
297 		bdc->sr_handler[0] = bdc_sr_xsf;
298 		bdc->sr_handler[1] = bdc_sr_uspc;
299 
300 		/* EP0 status report function pointers */
301 		bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
302 		bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
303 		bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
304 	}
305 }
306 
307 /* Free the dynamic memory */
308 static void bdc_mem_free(struct bdc *bdc)
309 {
310 	dev_dbg(bdc->dev, "%s\n", __func__);
311 	/* Free SRR */
312 	if (bdc->srr.sr_bds)
313 		dma_free_coherent(bdc->dev,
314 					NUM_SR_ENTRIES * sizeof(struct bdc_bd),
315 					bdc->srr.sr_bds, bdc->srr.dma_addr);
316 
317 	/* Free scratchpad */
318 	if (bdc->scratchpad.buff)
319 		dma_free_coherent(bdc->dev, bdc->sp_buff_size,
320 				bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
321 
322 	/* Destroy the dma pools */
323 	dma_pool_destroy(bdc->bd_table_pool);
324 
325 	/* Free the bdc_ep array */
326 	kfree(bdc->bdc_ep_array);
327 
328 	bdc->srr.sr_bds = NULL;
329 	bdc->scratchpad.buff = NULL;
330 	bdc->bd_table_pool = NULL;
331 	bdc->bdc_ep_array = NULL;
332 }
333 
334 /*
335  * bdc reinit gives a controller reset and reinitialize the registers,
336  * called from disconnect/bus reset scenario's, to ensure proper HW cleanup
337  */
338 int bdc_reinit(struct bdc *bdc)
339 {
340 	int ret;
341 
342 	dev_dbg(bdc->dev, "%s\n", __func__);
343 	ret = bdc_stop(bdc);
344 	if (ret)
345 		goto out;
346 
347 	ret = bdc_reset(bdc);
348 	if (ret)
349 		goto out;
350 
351 	/* the reinit flag is 1 */
352 	bdc_mem_init(bdc, true);
353 	ret = bdc_run(bdc);
354 out:
355 	bdc->reinit = false;
356 
357 	return ret;
358 }
359 
360 /* Allocate all the dyanmic memory */
361 static int bdc_mem_alloc(struct bdc *bdc)
362 {
363 	u32 page_size;
364 	unsigned int num_ieps, num_oeps;
365 
366 	dev_dbg(bdc->dev,
367 		"%s() NUM_BDS_PER_TABLE:%d\n", __func__,
368 		NUM_BDS_PER_TABLE);
369 	page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
370 	/* page size is 2^pgs KB */
371 	page_size = 1 << page_size;
372 	/* KB */
373 	page_size <<= 10;
374 	dev_dbg(bdc->dev, "page_size=%d\n", page_size);
375 
376 	/* Create a pool of bd tables */
377 	bdc->bd_table_pool =
378 	    dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
379 								16, page_size);
380 
381 	if (!bdc->bd_table_pool)
382 		goto fail;
383 
384 	if (scratchpad_setup(bdc))
385 		goto fail;
386 
387 	/* read from regs */
388 	num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
389 	num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
390 	/* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
391 	bdc->num_eps = num_ieps + num_oeps + 2;
392 	dev_dbg(bdc->dev,
393 		"ieps:%d eops:%d num_eps:%d\n",
394 		num_ieps, num_oeps, bdc->num_eps);
395 	/* allocate array of ep pointers */
396 	bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
397 								GFP_KERNEL);
398 	if (!bdc->bdc_ep_array)
399 		goto fail;
400 
401 	dev_dbg(bdc->dev, "Allocating sr report0\n");
402 	if (setup_srr(bdc, 0))
403 		goto fail;
404 
405 	return 0;
406 fail:
407 	dev_warn(bdc->dev, "Couldn't initialize memory\n");
408 	bdc_mem_free(bdc);
409 
410 	return -ENOMEM;
411 }
412 
413 /* opposite to bdc_hw_init */
414 static void bdc_hw_exit(struct bdc *bdc)
415 {
416 	dev_dbg(bdc->dev, "%s ()\n", __func__);
417 	bdc_mem_free(bdc);
418 }
419 
420 /* Initialize the bdc HW and memory */
421 static int bdc_hw_init(struct bdc *bdc)
422 {
423 	int ret;
424 
425 	dev_dbg(bdc->dev, "%s ()\n", __func__);
426 	ret = bdc_reset(bdc);
427 	if (ret) {
428 		dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
429 		return ret;
430 	}
431 	ret = bdc_mem_alloc(bdc);
432 	if (ret) {
433 		dev_err(bdc->dev, "Mem alloc failed, aborting\n");
434 		return -ENOMEM;
435 	}
436 	bdc_mem_init(bdc, 0);
437 	bdc_dbg_regs(bdc);
438 	dev_dbg(bdc->dev, "HW Init done\n");
439 
440 	return 0;
441 }
442 
443 static int bdc_phy_init(struct bdc *bdc)
444 {
445 	int phy_num;
446 	int ret;
447 
448 	for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
449 		ret = phy_init(bdc->phys[phy_num]);
450 		if (ret)
451 			goto err_exit_phy;
452 		ret = phy_power_on(bdc->phys[phy_num]);
453 		if (ret) {
454 			phy_exit(bdc->phys[phy_num]);
455 			goto err_exit_phy;
456 		}
457 	}
458 
459 	return 0;
460 
461 err_exit_phy:
462 	while (--phy_num >= 0) {
463 		phy_power_off(bdc->phys[phy_num]);
464 		phy_exit(bdc->phys[phy_num]);
465 	}
466 
467 	return ret;
468 }
469 
470 static void bdc_phy_exit(struct bdc *bdc)
471 {
472 	int phy_num;
473 
474 	for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
475 		phy_power_off(bdc->phys[phy_num]);
476 		phy_exit(bdc->phys[phy_num]);
477 	}
478 }
479 
480 static int bdc_probe(struct platform_device *pdev)
481 {
482 	struct bdc *bdc;
483 	int ret = -ENOMEM;
484 	int irq;
485 	u32 temp;
486 	struct device *dev = &pdev->dev;
487 	struct clk *clk;
488 	int phy_num;
489 
490 	dev_dbg(dev, "%s()\n", __func__);
491 
492 	clk = devm_clk_get(dev, "sw_usbd");
493 	if (IS_ERR(clk)) {
494 		dev_info(dev, "Clock not found in Device Tree\n");
495 		clk = NULL;
496 	}
497 
498 	ret = clk_prepare_enable(clk);
499 	if (ret) {
500 		dev_err(dev, "could not enable clock\n");
501 		return ret;
502 	}
503 
504 	bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
505 	if (!bdc)
506 		return -ENOMEM;
507 
508 	bdc->clk = clk;
509 
510 	bdc->regs = devm_platform_ioremap_resource(pdev, 0);
511 	if (IS_ERR(bdc->regs)) {
512 		dev_err(dev, "ioremap error\n");
513 		return -ENOMEM;
514 	}
515 	irq = platform_get_irq(pdev, 0);
516 	if (irq < 0)
517 		return irq;
518 	spin_lock_init(&bdc->lock);
519 	platform_set_drvdata(pdev, bdc);
520 	bdc->irq = irq;
521 	bdc->dev = dev;
522 	dev_dbg(dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
523 
524 	bdc->num_phys = of_count_phandle_with_args(dev->of_node,
525 						"phys", "#phy-cells");
526 	if (bdc->num_phys > 0) {
527 		bdc->phys = devm_kcalloc(dev, bdc->num_phys,
528 					sizeof(struct phy *), GFP_KERNEL);
529 		if (!bdc->phys)
530 			return -ENOMEM;
531 	} else {
532 		bdc->num_phys = 0;
533 	}
534 	dev_info(dev, "Using %d phy(s)\n", bdc->num_phys);
535 
536 	for (phy_num = 0; phy_num < bdc->num_phys; phy_num++) {
537 		bdc->phys[phy_num] = devm_of_phy_get_by_index(
538 			dev, dev->of_node, phy_num);
539 		if (IS_ERR(bdc->phys[phy_num])) {
540 			ret = PTR_ERR(bdc->phys[phy_num]);
541 			dev_err(bdc->dev,
542 				"BDC phy specified but not found:%d\n", ret);
543 			return ret;
544 		}
545 	}
546 
547 	ret = bdc_phy_init(bdc);
548 	if (ret) {
549 		dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
550 		return ret;
551 	}
552 
553 	temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
554 	if ((temp & BDC_P64) &&
555 			!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
556 		dev_dbg(dev, "Using 64-bit address\n");
557 	} else {
558 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
559 		if (ret) {
560 			dev_err(dev,
561 				"No suitable DMA config available, abort\n");
562 			return -ENOTSUPP;
563 		}
564 		dev_dbg(dev, "Using 32-bit address\n");
565 	}
566 	ret = bdc_hw_init(bdc);
567 	if (ret) {
568 		dev_err(dev, "BDC init failure:%d\n", ret);
569 		goto phycleanup;
570 	}
571 	ret = bdc_udc_init(bdc);
572 	if (ret) {
573 		dev_err(dev, "BDC Gadget init failure:%d\n", ret);
574 		goto cleanup;
575 	}
576 	return 0;
577 
578 cleanup:
579 	bdc_hw_exit(bdc);
580 phycleanup:
581 	bdc_phy_exit(bdc);
582 	return ret;
583 }
584 
585 static int bdc_remove(struct platform_device *pdev)
586 {
587 	struct bdc *bdc;
588 
589 	bdc  = platform_get_drvdata(pdev);
590 	dev_dbg(bdc->dev, "%s ()\n", __func__);
591 	bdc_udc_exit(bdc);
592 	bdc_hw_exit(bdc);
593 	bdc_phy_exit(bdc);
594 	clk_disable_unprepare(bdc->clk);
595 	return 0;
596 }
597 
598 #ifdef CONFIG_PM_SLEEP
599 static int bdc_suspend(struct device *dev)
600 {
601 	struct bdc *bdc = dev_get_drvdata(dev);
602 
603 	clk_disable_unprepare(bdc->clk);
604 	return 0;
605 }
606 
607 static int bdc_resume(struct device *dev)
608 {
609 	struct bdc *bdc = dev_get_drvdata(dev);
610 	int ret;
611 
612 	ret = clk_prepare_enable(bdc->clk);
613 	if (ret) {
614 		dev_err(bdc->dev, "err enabling the clock\n");
615 		return ret;
616 	}
617 	ret = bdc_reinit(bdc);
618 	if (ret) {
619 		dev_err(bdc->dev, "err in bdc reinit\n");
620 		return ret;
621 	}
622 
623 	return 0;
624 }
625 
626 #endif /* CONFIG_PM_SLEEP */
627 
628 static SIMPLE_DEV_PM_OPS(bdc_pm_ops, bdc_suspend,
629 		bdc_resume);
630 
631 static const struct of_device_id bdc_of_match[] = {
632 	{ .compatible = "brcm,bdc-v0.16" },
633 	{ .compatible = "brcm,bdc" },
634 	{ /* sentinel */ }
635 };
636 
637 static struct platform_driver bdc_driver = {
638 	.driver		= {
639 		.name	= BRCM_BDC_NAME,
640 		.pm = &bdc_pm_ops,
641 		.of_match_table	= bdc_of_match,
642 	},
643 	.probe		= bdc_probe,
644 	.remove		= bdc_remove,
645 };
646 
647 module_platform_driver(bdc_driver);
648 MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
649 MODULE_LICENSE("GPL");
650 MODULE_DESCRIPTION(BRCM_BDC_DESC);
651