1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2010 Broadcom Corporation
4  */
5 /* ****************** SDIO CARD Interface Functions **************************/
6 
7 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/pci.h>
10 #include <linux/pci_ids.h>
11 #include <linux/sched.h>
12 #include <linux/completion.h>
13 #include <linux/interrupt.h>
14 #include <linux/scatterlist.h>
15 #include <linux/mmc/sdio.h>
16 #include <linux/mmc/core.h>
17 #include <linux/mmc/sdio_func.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/suspend.h>
22 #include <linux/errno.h>
23 #include <linux/module.h>
24 #include <linux/acpi.h>
25 #include <net/cfg80211.h>
26 
27 #include <defs.h>
28 #include <brcm_hw_ids.h>
29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h>
31 #include <chipcommon.h>
32 #include <soc.h>
33 #include "chip.h"
34 #include "bus.h"
35 #include "debug.h"
36 #include "sdio.h"
37 #include "core.h"
38 #include "common.h"
39 
40 #define SDIOH_API_ACCESS_RETRY_LIMIT	2
41 
42 #define DMA_ALIGN_MASK	0x03
43 
44 #define SDIO_FUNC1_BLOCKSIZE		64
45 #define SDIO_FUNC2_BLOCKSIZE		512
46 #define SDIO_4359_FUNC2_BLOCKSIZE	256
47 /* Maximum milliseconds to wait for F2 to come up */
48 #define SDIO_WAIT_F2RDY	3000
49 
50 #define BRCMF_DEFAULT_RXGLOM_SIZE	32  /* max rx frames in glom chain */
51 
52 struct brcmf_sdiod_freezer {
53 	atomic_t freezing;
54 	atomic_t thread_count;
55 	u32 frozen_count;
56 	wait_queue_head_t thread_freeze;
57 	struct completion resumed;
58 };
59 
60 static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
61 {
62 	struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
63 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
64 
65 	brcmf_dbg(INTR, "OOB intr triggered\n");
66 
67 	/* out-of-band interrupt is level-triggered which won't
68 	 * be cleared until dpc
69 	 */
70 	if (sdiodev->irq_en) {
71 		disable_irq_nosync(irq);
72 		sdiodev->irq_en = false;
73 	}
74 
75 	brcmf_sdio_isr(sdiodev->bus);
76 
77 	return IRQ_HANDLED;
78 }
79 
80 static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
81 {
82 	struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
83 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
84 
85 	brcmf_dbg(INTR, "IB intr triggered\n");
86 
87 	brcmf_sdio_isr(sdiodev->bus);
88 }
89 
90 /* dummy handler for SDIO function 2 interrupt */
91 static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
92 {
93 }
94 
95 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
96 {
97 	struct brcmfmac_sdio_pd *pdata;
98 	int ret = 0;
99 	u8 data;
100 	u32 addr, gpiocontrol;
101 
102 	pdata = &sdiodev->settings->bus.sdio;
103 	if (pdata->oob_irq_supported) {
104 		brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
105 			  pdata->oob_irq_nr);
106 		spin_lock_init(&sdiodev->irq_en_lock);
107 		sdiodev->irq_en = true;
108 
109 		ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
110 				  pdata->oob_irq_flags, "brcmf_oob_intr",
111 				  &sdiodev->func1->dev);
112 		if (ret != 0) {
113 			brcmf_err("request_irq failed %d\n", ret);
114 			return ret;
115 		}
116 		sdiodev->oob_irq_requested = true;
117 
118 		ret = enable_irq_wake(pdata->oob_irq_nr);
119 		if (ret != 0) {
120 			brcmf_err("enable_irq_wake failed %d\n", ret);
121 			return ret;
122 		}
123 		disable_irq_wake(pdata->oob_irq_nr);
124 
125 		sdio_claim_host(sdiodev->func1);
126 
127 		if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
128 			/* assign GPIO to SDIO core */
129 			addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
130 			gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
131 			gpiocontrol |= 0x2;
132 			brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
133 
134 			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
135 					   0xf, &ret);
136 			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
137 			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
138 		}
139 
140 		/* must configure SDIO_CCCR_IENx to enable irq */
141 		data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
142 		data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
143 			SDIO_CCCR_IEN_FUNC0;
144 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
145 
146 		/* redirect, configure and enable io for interrupt signal */
147 		data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
148 		if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
149 			data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
150 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
151 				     data, &ret);
152 		sdio_release_host(sdiodev->func1);
153 	} else {
154 		brcmf_dbg(SDIO, "Entering\n");
155 		sdio_claim_host(sdiodev->func1);
156 		sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
157 		sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
158 		sdio_release_host(sdiodev->func1);
159 		sdiodev->sd_irq_requested = true;
160 	}
161 
162 	return 0;
163 }
164 
165 void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
166 {
167 
168 	brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
169 		  sdiodev->oob_irq_requested,
170 		  sdiodev->sd_irq_requested);
171 
172 	if (sdiodev->oob_irq_requested) {
173 		struct brcmfmac_sdio_pd *pdata;
174 
175 		pdata = &sdiodev->settings->bus.sdio;
176 		sdio_claim_host(sdiodev->func1);
177 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
178 		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
179 		sdio_release_host(sdiodev->func1);
180 
181 		sdiodev->oob_irq_requested = false;
182 		free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
183 		sdiodev->irq_en = false;
184 		sdiodev->oob_irq_requested = false;
185 	}
186 
187 	if (sdiodev->sd_irq_requested) {
188 		sdio_claim_host(sdiodev->func1);
189 		sdio_release_irq(sdiodev->func2);
190 		sdio_release_irq(sdiodev->func1);
191 		sdio_release_host(sdiodev->func1);
192 		sdiodev->sd_irq_requested = false;
193 	}
194 }
195 
196 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
197 			      enum brcmf_sdiod_state state)
198 {
199 	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
200 	    state == sdiodev->state)
201 		return;
202 
203 	brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
204 	switch (sdiodev->state) {
205 	case BRCMF_SDIOD_DATA:
206 		/* any other state means bus interface is down */
207 		brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
208 		break;
209 	case BRCMF_SDIOD_DOWN:
210 		/* transition from DOWN to DATA means bus interface is up */
211 		if (state == BRCMF_SDIOD_DATA)
212 			brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
213 		break;
214 	default:
215 		break;
216 	}
217 	sdiodev->state = state;
218 }
219 
220 static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
221 					    u32 addr)
222 {
223 	u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
224 	int err = 0, i;
225 
226 	if (bar0 == sdiodev->sbwad)
227 		return 0;
228 
229 	v = bar0 >> 8;
230 
231 	for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
232 		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
233 				   v & 0xff, &err);
234 
235 	if (!err)
236 		sdiodev->sbwad = bar0;
237 
238 	return err;
239 }
240 
241 u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
242 {
243 	u32 data = 0;
244 	int retval;
245 
246 	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
247 	if (retval)
248 		goto out;
249 
250 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
251 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
252 
253 	data = sdio_readl(sdiodev->func1, addr, &retval);
254 
255 out:
256 	if (ret)
257 		*ret = retval;
258 
259 	return data;
260 }
261 
262 void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
263 			u32 data, int *ret)
264 {
265 	int retval;
266 
267 	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
268 	if (retval)
269 		goto out;
270 
271 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
272 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
273 
274 	sdio_writel(sdiodev->func1, data, addr, &retval);
275 
276 out:
277 	if (ret)
278 		*ret = retval;
279 }
280 
281 static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
282 				   struct sdio_func *func, u32 addr,
283 				   struct sk_buff *skb)
284 {
285 	unsigned int req_sz;
286 	int err;
287 
288 	/* Single skb use the standard mmc interface */
289 	req_sz = skb->len + 3;
290 	req_sz &= (uint)~3;
291 
292 	switch (func->num) {
293 	case 1:
294 		err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
295 					 req_sz);
296 		break;
297 	case 2:
298 		err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
299 		break;
300 	default:
301 		/* bail out as things are really fishy here */
302 		WARN(1, "invalid sdio function number: %d\n", func->num);
303 		err = -ENOMEDIUM;
304 	}
305 
306 	if (err == -ENOMEDIUM)
307 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
308 
309 	return err;
310 }
311 
312 static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
313 				    struct sdio_func *func, u32 addr,
314 				    struct sk_buff *skb)
315 {
316 	unsigned int req_sz;
317 	int err;
318 
319 	/* Single skb use the standard mmc interface */
320 	req_sz = skb->len + 3;
321 	req_sz &= (uint)~3;
322 
323 	err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
324 
325 	if (err == -ENOMEDIUM)
326 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
327 
328 	return err;
329 }
330 
331 static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
332 			  struct mmc_command *mc, int sg_cnt, int req_sz,
333 			  int func_blk_sz, u32 *addr,
334 			  struct brcmf_sdio_dev *sdiodev,
335 			  struct sdio_func *func, int write)
336 {
337 	int ret;
338 
339 	md->sg_len = sg_cnt;
340 	md->blocks = req_sz / func_blk_sz;
341 	mc->arg |= (*addr & 0x1FFFF) << 9;	/* address */
342 	mc->arg |= md->blocks & 0x1FF;	/* block count */
343 	/* incrementing addr for function 1 */
344 	if (func->num == 1)
345 		*addr += req_sz;
346 
347 	mmc_set_data_timeout(md, func->card);
348 	mmc_wait_for_req(func->card->host, mr);
349 
350 	ret = mc->error ? mc->error : md->error;
351 	if (ret == -ENOMEDIUM) {
352 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
353 	} else if (ret != 0) {
354 		brcmf_err("CMD53 sg block %s failed %d\n",
355 			  write ? "write" : "read", ret);
356 		ret = -EIO;
357 	}
358 
359 	return ret;
360 }
361 
362 /**
363  * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
364  * @sdiodev: brcmfmac sdio device
365  * @func: SDIO function
366  * @write: direction flag
367  * @addr: dongle memory address as source/destination
368  * @pkt: skb pointer
369  *
370  * This function takes the respbonsibility as the interface function to MMC
371  * stack for block data access. It assumes that the skb passed down by the
372  * caller has already been padded and aligned.
373  */
374 static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
375 				 struct sdio_func *func,
376 				 bool write, u32 addr,
377 				 struct sk_buff_head *pktlist)
378 {
379 	unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
380 	unsigned int max_req_sz, src_offset, dst_offset;
381 	unsigned char *pkt_data, *orig_data, *dst_data;
382 	struct sk_buff_head local_list, *target_list;
383 	struct sk_buff *pkt_next = NULL, *src;
384 	unsigned short max_seg_cnt;
385 	struct mmc_request mmc_req;
386 	struct mmc_command mmc_cmd;
387 	struct mmc_data mmc_dat;
388 	struct scatterlist *sgl;
389 	int ret = 0;
390 
391 	if (!pktlist->qlen)
392 		return -EINVAL;
393 
394 	target_list = pktlist;
395 	/* for host with broken sg support, prepare a page aligned list */
396 	__skb_queue_head_init(&local_list);
397 	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
398 		req_sz = 0;
399 		skb_queue_walk(pktlist, pkt_next)
400 			req_sz += pkt_next->len;
401 		req_sz = ALIGN(req_sz, func->cur_blksize);
402 		while (req_sz > PAGE_SIZE) {
403 			pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
404 			if (pkt_next == NULL) {
405 				ret = -ENOMEM;
406 				goto exit;
407 			}
408 			__skb_queue_tail(&local_list, pkt_next);
409 			req_sz -= PAGE_SIZE;
410 		}
411 		pkt_next = brcmu_pkt_buf_get_skb(req_sz);
412 		if (pkt_next == NULL) {
413 			ret = -ENOMEM;
414 			goto exit;
415 		}
416 		__skb_queue_tail(&local_list, pkt_next);
417 		target_list = &local_list;
418 	}
419 
420 	func_blk_sz = func->cur_blksize;
421 	max_req_sz = sdiodev->max_request_size;
422 	max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
423 			    target_list->qlen);
424 
425 	memset(&mmc_req, 0, sizeof(struct mmc_request));
426 	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
427 	memset(&mmc_dat, 0, sizeof(struct mmc_data));
428 
429 	mmc_dat.sg = sdiodev->sgtable.sgl;
430 	mmc_dat.blksz = func_blk_sz;
431 	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
432 	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
433 	mmc_cmd.arg = write ? 1<<31 : 0;	/* write flag  */
434 	mmc_cmd.arg |= (func->num & 0x7) << 28;	/* SDIO func num */
435 	mmc_cmd.arg |= 1 << 27;			/* block mode */
436 	/* for function 1 the addr will be incremented */
437 	mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
438 	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
439 	mmc_req.cmd = &mmc_cmd;
440 	mmc_req.data = &mmc_dat;
441 
442 	req_sz = 0;
443 	sg_cnt = 0;
444 	sgl = sdiodev->sgtable.sgl;
445 	skb_queue_walk(target_list, pkt_next) {
446 		pkt_offset = 0;
447 		while (pkt_offset < pkt_next->len) {
448 			pkt_data = pkt_next->data + pkt_offset;
449 			sg_data_sz = pkt_next->len - pkt_offset;
450 			if (sg_data_sz > sdiodev->max_segment_size)
451 				sg_data_sz = sdiodev->max_segment_size;
452 			if (sg_data_sz > max_req_sz - req_sz)
453 				sg_data_sz = max_req_sz - req_sz;
454 
455 			sg_set_buf(sgl, pkt_data, sg_data_sz);
456 			sg_cnt++;
457 
458 			sgl = sg_next(sgl);
459 			req_sz += sg_data_sz;
460 			pkt_offset += sg_data_sz;
461 			if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
462 				ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
463 						     sg_cnt, req_sz, func_blk_sz,
464 						     &addr, sdiodev, func, write);
465 				if (ret)
466 					goto exit_queue_walk;
467 				req_sz = 0;
468 				sg_cnt = 0;
469 				sgl = sdiodev->sgtable.sgl;
470 			}
471 		}
472 	}
473 	if (sg_cnt)
474 		ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
475 				     sg_cnt, req_sz, func_blk_sz,
476 				     &addr, sdiodev, func, write);
477 exit_queue_walk:
478 	if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
479 		src = __skb_peek(&local_list);
480 		src_offset = 0;
481 		skb_queue_walk(pktlist, pkt_next) {
482 			dst_offset = 0;
483 
484 			/* This is safe because we must have enough SKB data
485 			 * in the local list to cover everything in pktlist.
486 			 */
487 			while (1) {
488 				req_sz = pkt_next->len - dst_offset;
489 				if (req_sz > src->len - src_offset)
490 					req_sz = src->len - src_offset;
491 
492 				orig_data = src->data + src_offset;
493 				dst_data = pkt_next->data + dst_offset;
494 				memcpy(dst_data, orig_data, req_sz);
495 
496 				src_offset += req_sz;
497 				if (src_offset == src->len) {
498 					src_offset = 0;
499 					src = skb_peek_next(src, &local_list);
500 				}
501 				dst_offset += req_sz;
502 				if (dst_offset == pkt_next->len)
503 					break;
504 			}
505 		}
506 	}
507 
508 exit:
509 	sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
510 	while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
511 		brcmu_pkt_buf_free_skb(pkt_next);
512 
513 	return ret;
514 }
515 
516 int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
517 {
518 	struct sk_buff *mypkt;
519 	int err;
520 
521 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
522 	if (!mypkt) {
523 		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
524 			  nbytes);
525 		return -EIO;
526 	}
527 
528 	err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
529 	if (!err)
530 		memcpy(buf, mypkt->data, nbytes);
531 
532 	brcmu_pkt_buf_free_skb(mypkt);
533 	return err;
534 }
535 
536 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
537 {
538 	u32 addr = sdiodev->cc_core->base;
539 	int err = 0;
540 
541 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
542 
543 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
544 	if (err)
545 		goto done;
546 
547 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
548 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
549 
550 	err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
551 
552 done:
553 	return err;
554 }
555 
556 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
557 			   struct sk_buff_head *pktq, uint totlen)
558 {
559 	struct sk_buff *glom_skb = NULL;
560 	struct sk_buff *skb;
561 	u32 addr = sdiodev->cc_core->base;
562 	int err = 0;
563 
564 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
565 		  addr, pktq->qlen);
566 
567 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
568 	if (err)
569 		goto done;
570 
571 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
572 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
573 
574 	if (pktq->qlen == 1)
575 		err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
576 					      __skb_peek(pktq));
577 	else if (!sdiodev->sg_support) {
578 		glom_skb = brcmu_pkt_buf_get_skb(totlen);
579 		if (!glom_skb)
580 			return -ENOMEM;
581 		err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
582 					      glom_skb);
583 		if (err)
584 			goto done;
585 
586 		skb_queue_walk(pktq, skb) {
587 			memcpy(skb->data, glom_skb->data, skb->len);
588 			skb_pull(glom_skb, skb->len);
589 		}
590 	} else
591 		err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
592 					    addr, pktq);
593 
594 done:
595 	brcmu_pkt_buf_free_skb(glom_skb);
596 	return err;
597 }
598 
599 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
600 {
601 	struct sk_buff *mypkt;
602 	u32 addr = sdiodev->cc_core->base;
603 	int err;
604 
605 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
606 
607 	if (!mypkt) {
608 		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
609 			  nbytes);
610 		return -EIO;
611 	}
612 
613 	memcpy(mypkt->data, buf, nbytes);
614 
615 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
616 	if (err)
617 		goto out;
618 
619 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
620 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
621 
622 	err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
623 out:
624 	brcmu_pkt_buf_free_skb(mypkt);
625 
626 	return err;
627 }
628 
629 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
630 			 struct sk_buff_head *pktq)
631 {
632 	struct sk_buff *skb;
633 	u32 addr = sdiodev->cc_core->base;
634 	int err;
635 
636 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
637 
638 	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
639 	if (err)
640 		return err;
641 
642 	addr &= SBSDIO_SB_OFT_ADDR_MASK;
643 	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
644 
645 	if (pktq->qlen == 1 || !sdiodev->sg_support) {
646 		skb_queue_walk(pktq, skb) {
647 			err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
648 						       addr, skb);
649 			if (err)
650 				break;
651 		}
652 	} else {
653 		err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
654 					    addr, pktq);
655 	}
656 
657 	return err;
658 }
659 
660 int
661 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
662 		  u8 *data, uint size)
663 {
664 	int err = 0;
665 	struct sk_buff *pkt;
666 	u32 sdaddr;
667 	uint dsize;
668 
669 	dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
670 	pkt = dev_alloc_skb(dsize);
671 	if (!pkt) {
672 		brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
673 		return -EIO;
674 	}
675 	pkt->priority = 0;
676 
677 	/* Determine initial transfer parameters */
678 	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
679 	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
680 		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
681 	else
682 		dsize = size;
683 
684 	sdio_claim_host(sdiodev->func1);
685 
686 	/* Do the transfer(s) */
687 	while (size) {
688 		/* Set the backplane window to include the start address */
689 		err = brcmf_sdiod_set_backplane_window(sdiodev, address);
690 		if (err)
691 			break;
692 
693 		brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
694 			  write ? "write" : "read", dsize,
695 			  sdaddr, address & SBSDIO_SBWINDOW_MASK);
696 
697 		sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
698 		sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
699 
700 		skb_put(pkt, dsize);
701 
702 		if (write) {
703 			memcpy(pkt->data, data, dsize);
704 			err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
705 						       sdaddr, pkt);
706 		} else {
707 			err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
708 						      sdaddr, pkt);
709 		}
710 
711 		if (err) {
712 			brcmf_err("membytes transfer failed\n");
713 			break;
714 		}
715 		if (!write)
716 			memcpy(data, pkt->data, dsize);
717 		skb_trim(pkt, 0);
718 
719 		/* Adjust for next transfer (if any) */
720 		size -= dsize;
721 		if (size) {
722 			data += dsize;
723 			address += dsize;
724 			sdaddr = 0;
725 			dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
726 		}
727 	}
728 
729 	dev_kfree_skb(pkt);
730 
731 	sdio_release_host(sdiodev->func1);
732 
733 	return err;
734 }
735 
736 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
737 {
738 	brcmf_dbg(SDIO, "Enter\n");
739 
740 	/* Issue abort cmd52 command through F0 */
741 	brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
742 
743 	brcmf_dbg(SDIO, "Exit\n");
744 	return 0;
745 }
746 
747 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
748 {
749 	struct sdio_func *func;
750 	struct mmc_host *host;
751 	uint max_blocks;
752 	uint nents;
753 	int err;
754 
755 	func = sdiodev->func2;
756 	host = func->card->host;
757 	sdiodev->sg_support = host->max_segs > 1;
758 	max_blocks = min_t(uint, host->max_blk_count, 511u);
759 	sdiodev->max_request_size = min_t(uint, host->max_req_size,
760 					  max_blocks * func->cur_blksize);
761 	sdiodev->max_segment_count = min_t(uint, host->max_segs,
762 					   SG_MAX_SINGLE_ALLOC);
763 	sdiodev->max_segment_size = host->max_seg_size;
764 
765 	if (!sdiodev->sg_support)
766 		return;
767 
768 	nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
769 		      sdiodev->settings->bus.sdio.txglomsz);
770 	nents += (nents >> 4) + 1;
771 
772 	WARN_ON(nents > sdiodev->max_segment_count);
773 
774 	brcmf_dbg(TRACE, "nents=%d\n", nents);
775 	err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
776 	if (err < 0) {
777 		brcmf_err("allocation failed: disable scatter-gather");
778 		sdiodev->sg_support = false;
779 	}
780 
781 	sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
782 }
783 
784 #ifdef CONFIG_PM_SLEEP
785 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
786 {
787 	sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
788 	if (!sdiodev->freezer)
789 		return -ENOMEM;
790 	atomic_set(&sdiodev->freezer->thread_count, 0);
791 	atomic_set(&sdiodev->freezer->freezing, 0);
792 	init_waitqueue_head(&sdiodev->freezer->thread_freeze);
793 	init_completion(&sdiodev->freezer->resumed);
794 	return 0;
795 }
796 
797 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
798 {
799 	if (sdiodev->freezer) {
800 		WARN_ON(atomic_read(&sdiodev->freezer->freezing));
801 		kfree(sdiodev->freezer);
802 	}
803 }
804 
805 static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
806 {
807 	atomic_t *expect = &sdiodev->freezer->thread_count;
808 	int res = 0;
809 
810 	sdiodev->freezer->frozen_count = 0;
811 	reinit_completion(&sdiodev->freezer->resumed);
812 	atomic_set(&sdiodev->freezer->freezing, 1);
813 	brcmf_sdio_trigger_dpc(sdiodev->bus);
814 	wait_event(sdiodev->freezer->thread_freeze,
815 		   atomic_read(expect) == sdiodev->freezer->frozen_count);
816 	sdio_claim_host(sdiodev->func1);
817 	res = brcmf_sdio_sleep(sdiodev->bus, true);
818 	sdio_release_host(sdiodev->func1);
819 	return res;
820 }
821 
822 static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
823 {
824 	sdio_claim_host(sdiodev->func1);
825 	brcmf_sdio_sleep(sdiodev->bus, false);
826 	sdio_release_host(sdiodev->func1);
827 	atomic_set(&sdiodev->freezer->freezing, 0);
828 	complete_all(&sdiodev->freezer->resumed);
829 }
830 
831 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
832 {
833 	return atomic_read(&sdiodev->freezer->freezing);
834 }
835 
836 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
837 {
838 	if (!brcmf_sdiod_freezing(sdiodev))
839 		return;
840 	sdiodev->freezer->frozen_count++;
841 	wake_up(&sdiodev->freezer->thread_freeze);
842 	wait_for_completion(&sdiodev->freezer->resumed);
843 }
844 
845 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
846 {
847 	atomic_inc(&sdiodev->freezer->thread_count);
848 }
849 
850 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
851 {
852 	atomic_dec(&sdiodev->freezer->thread_count);
853 }
854 #else
855 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
856 {
857 	return 0;
858 }
859 
860 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
861 {
862 }
863 #endif /* CONFIG_PM_SLEEP */
864 
865 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
866 {
867 	sdiodev->state = BRCMF_SDIOD_DOWN;
868 	if (sdiodev->bus) {
869 		brcmf_sdio_remove(sdiodev->bus);
870 		sdiodev->bus = NULL;
871 	}
872 
873 	brcmf_sdiod_freezer_detach(sdiodev);
874 
875 	/* Disable Function 2 */
876 	sdio_claim_host(sdiodev->func2);
877 	sdio_disable_func(sdiodev->func2);
878 	sdio_release_host(sdiodev->func2);
879 
880 	/* Disable Function 1 */
881 	sdio_claim_host(sdiodev->func1);
882 	sdio_disable_func(sdiodev->func1);
883 	sdio_release_host(sdiodev->func1);
884 
885 	sg_free_table(&sdiodev->sgtable);
886 	sdiodev->sbwad = 0;
887 
888 	pm_runtime_allow(sdiodev->func1->card->host->parent);
889 	return 0;
890 }
891 
892 static void brcmf_sdiod_host_fixup(struct mmc_host *host)
893 {
894 	/* runtime-pm powers off the device */
895 	pm_runtime_forbid(host->parent);
896 	/* avoid removal detection upon resume */
897 	host->caps |= MMC_CAP_NONREMOVABLE;
898 }
899 
900 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
901 {
902 	int ret = 0;
903 	unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
904 
905 	sdio_claim_host(sdiodev->func1);
906 
907 	ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
908 	if (ret) {
909 		brcmf_err("Failed to set F1 blocksize\n");
910 		sdio_release_host(sdiodev->func1);
911 		goto out;
912 	}
913 	if (sdiodev->func2->device == SDIO_DEVICE_ID_BROADCOM_4359)
914 		f2_blksz = SDIO_4359_FUNC2_BLOCKSIZE;
915 	ret = sdio_set_block_size(sdiodev->func2, f2_blksz);
916 	if (ret) {
917 		brcmf_err("Failed to set F2 blocksize\n");
918 		sdio_release_host(sdiodev->func1);
919 		goto out;
920 	}
921 
922 	/* increase F2 timeout */
923 	sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
924 
925 	/* Enable Function 1 */
926 	ret = sdio_enable_func(sdiodev->func1);
927 	sdio_release_host(sdiodev->func1);
928 	if (ret) {
929 		brcmf_err("Failed to enable F1: err=%d\n", ret);
930 		goto out;
931 	}
932 
933 	ret = brcmf_sdiod_freezer_attach(sdiodev);
934 	if (ret)
935 		goto out;
936 
937 	/* try to attach to the target device */
938 	sdiodev->bus = brcmf_sdio_probe(sdiodev);
939 	if (!sdiodev->bus) {
940 		ret = -ENODEV;
941 		goto out;
942 	}
943 	brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
944 out:
945 	if (ret)
946 		brcmf_sdiod_remove(sdiodev);
947 
948 	return ret;
949 }
950 
951 #define BRCMF_SDIO_DEVICE(dev_id)	\
952 	{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
953 
954 /* devices we support, null terminated */
955 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
956 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
957 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
958 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
959 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
960 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
961 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
962 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
963 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
964  	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
965 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
966 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
967 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
968 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
969 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
970 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
971 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
972 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
973 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
974 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43012),
975 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_89359),
976 	{ /* end: all zeroes */ }
977 };
978 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
979 
980 
981 static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
982 						  int val)
983 {
984 #if IS_ENABLED(CONFIG_ACPI)
985 	struct acpi_device *adev;
986 
987 	adev = ACPI_COMPANION(dev);
988 	if (adev)
989 		adev->flags.power_manageable = 0;
990 #endif
991 }
992 
993 static int brcmf_ops_sdio_probe(struct sdio_func *func,
994 				const struct sdio_device_id *id)
995 {
996 	int err;
997 	struct brcmf_sdio_dev *sdiodev;
998 	struct brcmf_bus *bus_if;
999 	struct device *dev;
1000 
1001 	brcmf_dbg(SDIO, "Enter\n");
1002 	brcmf_dbg(SDIO, "Class=%x\n", func->class);
1003 	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1004 	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1005 	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
1006 
1007 	dev = &func->dev;
1008 
1009 	/* Set MMC_QUIRK_LENIENT_FN0 for this card */
1010 	func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
1011 
1012 	/* prohibit ACPI power management for this device */
1013 	brcmf_sdiod_acpi_set_power_manageable(dev, 0);
1014 
1015 	/* Consume func num 1 but dont do anything with it. */
1016 	if (func->num == 1)
1017 		return 0;
1018 
1019 	/* Ignore anything but func 2 */
1020 	if (func->num != 2)
1021 		return -ENODEV;
1022 
1023 	bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
1024 	if (!bus_if)
1025 		return -ENOMEM;
1026 	sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
1027 	if (!sdiodev) {
1028 		kfree(bus_if);
1029 		return -ENOMEM;
1030 	}
1031 
1032 	/* store refs to functions used. mmc_card does
1033 	 * not hold the F0 function pointer.
1034 	 */
1035 	sdiodev->func1 = func->card->sdio_func[0];
1036 	sdiodev->func2 = func;
1037 
1038 	sdiodev->bus_if = bus_if;
1039 	bus_if->bus_priv.sdio = sdiodev;
1040 	bus_if->proto_type = BRCMF_PROTO_BCDC;
1041 	dev_set_drvdata(&func->dev, bus_if);
1042 	dev_set_drvdata(&sdiodev->func1->dev, bus_if);
1043 	sdiodev->dev = &sdiodev->func1->dev;
1044 
1045 	brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
1046 
1047 	brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
1048 	err = brcmf_sdiod_probe(sdiodev);
1049 	if (err) {
1050 		brcmf_err("F2 error, probe failed %d...\n", err);
1051 		goto fail;
1052 	}
1053 
1054 	brcmf_dbg(SDIO, "F2 init completed...\n");
1055 	return 0;
1056 
1057 fail:
1058 	dev_set_drvdata(&func->dev, NULL);
1059 	dev_set_drvdata(&sdiodev->func1->dev, NULL);
1060 	kfree(sdiodev);
1061 	kfree(bus_if);
1062 	return err;
1063 }
1064 
1065 static void brcmf_ops_sdio_remove(struct sdio_func *func)
1066 {
1067 	struct brcmf_bus *bus_if;
1068 	struct brcmf_sdio_dev *sdiodev;
1069 
1070 	brcmf_dbg(SDIO, "Enter\n");
1071 	brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1072 	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1073 	brcmf_dbg(SDIO, "Function: %d\n", func->num);
1074 
1075 	bus_if = dev_get_drvdata(&func->dev);
1076 	if (bus_if) {
1077 		sdiodev = bus_if->bus_priv.sdio;
1078 
1079 		/* start by unregistering irqs */
1080 		brcmf_sdiod_intr_unregister(sdiodev);
1081 
1082 		if (func->num != 1)
1083 			return;
1084 
1085 		/* only proceed with rest of cleanup if func 1 */
1086 		brcmf_sdiod_remove(sdiodev);
1087 
1088 		dev_set_drvdata(&sdiodev->func1->dev, NULL);
1089 		dev_set_drvdata(&sdiodev->func2->dev, NULL);
1090 
1091 		kfree(bus_if);
1092 		kfree(sdiodev);
1093 	}
1094 
1095 	brcmf_dbg(SDIO, "Exit\n");
1096 }
1097 
1098 void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
1099 {
1100 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1101 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1102 
1103 	brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
1104 	sdiodev->wowl_enabled = enabled;
1105 }
1106 
1107 #ifdef CONFIG_PM_SLEEP
1108 static int brcmf_ops_sdio_suspend(struct device *dev)
1109 {
1110 	struct sdio_func *func;
1111 	struct brcmf_bus *bus_if;
1112 	struct brcmf_sdio_dev *sdiodev;
1113 	mmc_pm_flag_t pm_caps, sdio_flags;
1114 	int ret = 0;
1115 
1116 	func = container_of(dev, struct sdio_func, dev);
1117 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1118 	if (func->num != 1)
1119 		return 0;
1120 
1121 
1122 	bus_if = dev_get_drvdata(dev);
1123 	sdiodev = bus_if->bus_priv.sdio;
1124 
1125 	pm_caps = sdio_get_host_pm_caps(func);
1126 
1127 	if (pm_caps & MMC_PM_KEEP_POWER) {
1128 		/* preserve card power during suspend */
1129 		brcmf_sdiod_freezer_on(sdiodev);
1130 		brcmf_sdio_wd_timer(sdiodev->bus, 0);
1131 
1132 		sdio_flags = MMC_PM_KEEP_POWER;
1133 		if (sdiodev->wowl_enabled) {
1134 			if (sdiodev->settings->bus.sdio.oob_irq_supported)
1135 				enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
1136 			else
1137 				sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
1138 		}
1139 
1140 		if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
1141 			brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
1142 
1143 	} else {
1144 		/* power will be cut so remove device, probe again in resume */
1145 		brcmf_sdiod_intr_unregister(sdiodev);
1146 		ret = brcmf_sdiod_remove(sdiodev);
1147 		if (ret)
1148 			brcmf_err("Failed to remove device on suspend\n");
1149 	}
1150 
1151 	return ret;
1152 }
1153 
1154 static int brcmf_ops_sdio_resume(struct device *dev)
1155 {
1156 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1157 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1158 	struct sdio_func *func = container_of(dev, struct sdio_func, dev);
1159 	mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func);
1160 	int ret = 0;
1161 
1162 	brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1163 	if (func->num != 2)
1164 		return 0;
1165 
1166 	if (!(pm_caps & MMC_PM_KEEP_POWER)) {
1167 		/* bus was powered off and device removed, probe again */
1168 		ret = brcmf_sdiod_probe(sdiodev);
1169 		if (ret)
1170 			brcmf_err("Failed to probe device on resume\n");
1171 	} else {
1172 		if (sdiodev->wowl_enabled &&
1173 		    sdiodev->settings->bus.sdio.oob_irq_supported)
1174 			disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
1175 
1176 		brcmf_sdiod_freezer_off(sdiodev);
1177 	}
1178 
1179 	return ret;
1180 }
1181 
1182 static const struct dev_pm_ops brcmf_sdio_pm_ops = {
1183 	.suspend	= brcmf_ops_sdio_suspend,
1184 	.resume		= brcmf_ops_sdio_resume,
1185 };
1186 #endif	/* CONFIG_PM_SLEEP */
1187 
1188 static struct sdio_driver brcmf_sdmmc_driver = {
1189 	.probe = brcmf_ops_sdio_probe,
1190 	.remove = brcmf_ops_sdio_remove,
1191 	.name = KBUILD_MODNAME,
1192 	.id_table = brcmf_sdmmc_ids,
1193 	.drv = {
1194 		.owner = THIS_MODULE,
1195 #ifdef CONFIG_PM_SLEEP
1196 		.pm = &brcmf_sdio_pm_ops,
1197 #endif	/* CONFIG_PM_SLEEP */
1198 		.coredump = brcmf_dev_coredump,
1199 	},
1200 };
1201 
1202 void brcmf_sdio_register(void)
1203 {
1204 	int ret;
1205 
1206 	ret = sdio_register_driver(&brcmf_sdmmc_driver);
1207 	if (ret)
1208 		brcmf_err("sdio_register_driver failed: %d\n", ret);
1209 }
1210 
1211 void brcmf_sdio_exit(void)
1212 {
1213 	brcmf_dbg(SDIO, "Enter\n");
1214 
1215 	sdio_unregister_driver(&brcmf_sdmmc_driver);
1216 }
1217 
1218