xref: /freebsd/sys/contrib/dev/athk/ath10k/sdio.c (revision 07724ba6)
1da8fa4e3SBjoern A. Zeeb // SPDX-License-Identifier: ISC
2da8fa4e3SBjoern A. Zeeb /*
3da8fa4e3SBjoern A. Zeeb  * Copyright (c) 2004-2011 Atheros Communications Inc.
4da8fa4e3SBjoern A. Zeeb  * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
5da8fa4e3SBjoern A. Zeeb  * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
6da8fa4e3SBjoern A. Zeeb  */
7da8fa4e3SBjoern A. Zeeb 
8da8fa4e3SBjoern A. Zeeb #include <linux/module.h>
9da8fa4e3SBjoern A. Zeeb #include <linux/mmc/card.h>
10da8fa4e3SBjoern A. Zeeb #include <linux/mmc/mmc.h>
11da8fa4e3SBjoern A. Zeeb #include <linux/mmc/host.h>
12da8fa4e3SBjoern A. Zeeb #include <linux/mmc/sdio_func.h>
13da8fa4e3SBjoern A. Zeeb #include <linux/mmc/sdio_ids.h>
14da8fa4e3SBjoern A. Zeeb #include <linux/mmc/sdio.h>
15da8fa4e3SBjoern A. Zeeb #include <linux/mmc/sd.h>
16da8fa4e3SBjoern A. Zeeb #include <linux/bitfield.h>
17da8fa4e3SBjoern A. Zeeb #include "core.h"
18da8fa4e3SBjoern A. Zeeb #include "bmi.h"
19da8fa4e3SBjoern A. Zeeb #include "debug.h"
20da8fa4e3SBjoern A. Zeeb #include "hif.h"
21da8fa4e3SBjoern A. Zeeb #include "htc.h"
22da8fa4e3SBjoern A. Zeeb #include "mac.h"
23da8fa4e3SBjoern A. Zeeb #include "targaddrs.h"
24da8fa4e3SBjoern A. Zeeb #include "trace.h"
25da8fa4e3SBjoern A. Zeeb #include "sdio.h"
26da8fa4e3SBjoern A. Zeeb #include "coredump.h"
27da8fa4e3SBjoern A. Zeeb 
28da8fa4e3SBjoern A. Zeeb void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
29da8fa4e3SBjoern A. Zeeb 
30da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_VSG_BUF_SIZE	(64 * 1024)
31da8fa4e3SBjoern A. Zeeb 
32da8fa4e3SBjoern A. Zeeb /* inlined helper functions */
33da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio * ar_sdio,size_t len)34da8fa4e3SBjoern A. Zeeb static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
35da8fa4e3SBjoern A. Zeeb 						   size_t len)
36da8fa4e3SBjoern A. Zeeb {
37da8fa4e3SBjoern A. Zeeb 	return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
38da8fa4e3SBjoern A. Zeeb }
39da8fa4e3SBjoern A. Zeeb 
pipe_id_to_eid(u8 pipe_id)40da8fa4e3SBjoern A. Zeeb static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
41da8fa4e3SBjoern A. Zeeb {
42da8fa4e3SBjoern A. Zeeb 	return (enum ath10k_htc_ep_id)pipe_id;
43da8fa4e3SBjoern A. Zeeb }
44da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data * pkt)45da8fa4e3SBjoern A. Zeeb static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
46da8fa4e3SBjoern A. Zeeb {
47da8fa4e3SBjoern A. Zeeb 	dev_kfree_skb(pkt->skb);
48da8fa4e3SBjoern A. Zeeb 	pkt->skb = NULL;
49da8fa4e3SBjoern A. Zeeb 	pkt->alloc_len = 0;
50da8fa4e3SBjoern A. Zeeb 	pkt->act_len = 0;
51da8fa4e3SBjoern A. Zeeb 	pkt->trailer_only = false;
52da8fa4e3SBjoern A. Zeeb }
53da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data * pkt,size_t act_len,size_t full_len,bool part_of_bundle,bool last_in_bundle)54da8fa4e3SBjoern A. Zeeb static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
55da8fa4e3SBjoern A. Zeeb 						size_t act_len, size_t full_len,
56da8fa4e3SBjoern A. Zeeb 						bool part_of_bundle,
57da8fa4e3SBjoern A. Zeeb 						bool last_in_bundle)
58da8fa4e3SBjoern A. Zeeb {
59da8fa4e3SBjoern A. Zeeb 	pkt->skb = dev_alloc_skb(full_len);
60da8fa4e3SBjoern A. Zeeb 	if (!pkt->skb)
61da8fa4e3SBjoern A. Zeeb 		return -ENOMEM;
62da8fa4e3SBjoern A. Zeeb 
63da8fa4e3SBjoern A. Zeeb 	pkt->act_len = act_len;
64da8fa4e3SBjoern A. Zeeb 	pkt->alloc_len = full_len;
65da8fa4e3SBjoern A. Zeeb 	pkt->part_of_bundle = part_of_bundle;
66da8fa4e3SBjoern A. Zeeb 	pkt->last_in_bundle = last_in_bundle;
67da8fa4e3SBjoern A. Zeeb 	pkt->trailer_only = false;
68da8fa4e3SBjoern A. Zeeb 
69da8fa4e3SBjoern A. Zeeb 	return 0;
70da8fa4e3SBjoern A. Zeeb }
71da8fa4e3SBjoern A. Zeeb 
is_trailer_only_msg(struct ath10k_sdio_rx_data * pkt)72da8fa4e3SBjoern A. Zeeb static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
73da8fa4e3SBjoern A. Zeeb {
74da8fa4e3SBjoern A. Zeeb 	bool trailer_only = false;
75da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_hdr *htc_hdr =
76da8fa4e3SBjoern A. Zeeb 		(struct ath10k_htc_hdr *)pkt->skb->data;
77da8fa4e3SBjoern A. Zeeb 	u16 len = __le16_to_cpu(htc_hdr->len);
78da8fa4e3SBjoern A. Zeeb 
79da8fa4e3SBjoern A. Zeeb 	if (len == htc_hdr->trailer_len)
80da8fa4e3SBjoern A. Zeeb 		trailer_only = true;
81da8fa4e3SBjoern A. Zeeb 
82da8fa4e3SBjoern A. Zeeb 	return trailer_only;
83da8fa4e3SBjoern A. Zeeb }
84da8fa4e3SBjoern A. Zeeb 
85da8fa4e3SBjoern A. Zeeb /* sdio/mmc functions */
86da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_set_cmd52_arg(u32 * arg,u8 write,u8 raw,unsigned int address,unsigned char val)87da8fa4e3SBjoern A. Zeeb static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
88da8fa4e3SBjoern A. Zeeb 					     unsigned int address,
89da8fa4e3SBjoern A. Zeeb 					     unsigned char val)
90da8fa4e3SBjoern A. Zeeb {
91da8fa4e3SBjoern A. Zeeb 	*arg = FIELD_PREP(BIT(31), write) |
92da8fa4e3SBjoern A. Zeeb 	       FIELD_PREP(BIT(27), raw) |
93da8fa4e3SBjoern A. Zeeb 	       FIELD_PREP(BIT(26), 1) |
94da8fa4e3SBjoern A. Zeeb 	       FIELD_PREP(GENMASK(25, 9), address) |
95da8fa4e3SBjoern A. Zeeb 	       FIELD_PREP(BIT(8), 1) |
96da8fa4e3SBjoern A. Zeeb 	       FIELD_PREP(GENMASK(7, 0), val);
97da8fa4e3SBjoern A. Zeeb }
98da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card * card,unsigned int address,unsigned char byte)99da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
100da8fa4e3SBjoern A. Zeeb 					   unsigned int address,
101da8fa4e3SBjoern A. Zeeb 					   unsigned char byte)
102da8fa4e3SBjoern A. Zeeb {
103da8fa4e3SBjoern A. Zeeb 	struct mmc_command io_cmd;
104da8fa4e3SBjoern A. Zeeb 
105da8fa4e3SBjoern A. Zeeb 	memset(&io_cmd, 0, sizeof(io_cmd));
106da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
107da8fa4e3SBjoern A. Zeeb 	io_cmd.opcode = SD_IO_RW_DIRECT;
108da8fa4e3SBjoern A. Zeeb 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
109da8fa4e3SBjoern A. Zeeb 
110da8fa4e3SBjoern A. Zeeb 	return mmc_wait_for_cmd(card->host, &io_cmd, 0);
111da8fa4e3SBjoern A. Zeeb }
112da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card * card,unsigned int address,unsigned char * byte)113da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
114da8fa4e3SBjoern A. Zeeb 					   unsigned int address,
115da8fa4e3SBjoern A. Zeeb 					   unsigned char *byte)
116da8fa4e3SBjoern A. Zeeb {
117da8fa4e3SBjoern A. Zeeb 	struct mmc_command io_cmd;
118da8fa4e3SBjoern A. Zeeb 	int ret;
119da8fa4e3SBjoern A. Zeeb 
120da8fa4e3SBjoern A. Zeeb 	memset(&io_cmd, 0, sizeof(io_cmd));
121da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
122da8fa4e3SBjoern A. Zeeb 	io_cmd.opcode = SD_IO_RW_DIRECT;
123da8fa4e3SBjoern A. Zeeb 	io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
124da8fa4e3SBjoern A. Zeeb 
125da8fa4e3SBjoern A. Zeeb 	ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
126da8fa4e3SBjoern A. Zeeb 	if (!ret)
127da8fa4e3SBjoern A. Zeeb 		*byte = io_cmd.resp[0];
128da8fa4e3SBjoern A. Zeeb 
129da8fa4e3SBjoern A. Zeeb 	return ret;
130da8fa4e3SBjoern A. Zeeb }
131da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_config(struct ath10k * ar)132da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_config(struct ath10k *ar)
133da8fa4e3SBjoern A. Zeeb {
134da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
135da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
136da8fa4e3SBjoern A. Zeeb 	unsigned char byte, asyncintdelay = 2;
137da8fa4e3SBjoern A. Zeeb 	int ret;
138da8fa4e3SBjoern A. Zeeb 
139da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
140da8fa4e3SBjoern A. Zeeb 
141da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
142da8fa4e3SBjoern A. Zeeb 
143da8fa4e3SBjoern A. Zeeb 	byte = 0;
144da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
145da8fa4e3SBjoern A. Zeeb 					      SDIO_CCCR_DRIVE_STRENGTH,
146da8fa4e3SBjoern A. Zeeb 					      &byte);
147da8fa4e3SBjoern A. Zeeb 
148da8fa4e3SBjoern A. Zeeb 	byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
149da8fa4e3SBjoern A. Zeeb 	byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
150da8fa4e3SBjoern A. Zeeb 			   ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
151da8fa4e3SBjoern A. Zeeb 
152da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
153da8fa4e3SBjoern A. Zeeb 					      SDIO_CCCR_DRIVE_STRENGTH,
154da8fa4e3SBjoern A. Zeeb 					      byte);
155da8fa4e3SBjoern A. Zeeb 
156da8fa4e3SBjoern A. Zeeb 	byte = 0;
157da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_rd_byte(
158da8fa4e3SBjoern A. Zeeb 		func->card,
159da8fa4e3SBjoern A. Zeeb 		CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
160da8fa4e3SBjoern A. Zeeb 		&byte);
161da8fa4e3SBjoern A. Zeeb 
162da8fa4e3SBjoern A. Zeeb 	byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
163da8fa4e3SBjoern A. Zeeb 		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
164da8fa4e3SBjoern A. Zeeb 		 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
165da8fa4e3SBjoern A. Zeeb 
166da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
167da8fa4e3SBjoern A. Zeeb 					      CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
168da8fa4e3SBjoern A. Zeeb 					      byte);
169da8fa4e3SBjoern A. Zeeb 	if (ret) {
170da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
171da8fa4e3SBjoern A. Zeeb 		goto out;
172da8fa4e3SBjoern A. Zeeb 	}
173da8fa4e3SBjoern A. Zeeb 
174da8fa4e3SBjoern A. Zeeb 	byte = 0;
175da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
176da8fa4e3SBjoern A. Zeeb 					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
177da8fa4e3SBjoern A. Zeeb 					      &byte);
178da8fa4e3SBjoern A. Zeeb 
179da8fa4e3SBjoern A. Zeeb 	byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
180da8fa4e3SBjoern A. Zeeb 
181da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
182da8fa4e3SBjoern A. Zeeb 					      CCCR_SDIO_IRQ_MODE_REG_SDIO3,
183da8fa4e3SBjoern A. Zeeb 					      byte);
184da8fa4e3SBjoern A. Zeeb 	if (ret) {
185da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
186da8fa4e3SBjoern A. Zeeb 			    ret);
187da8fa4e3SBjoern A. Zeeb 		goto out;
188da8fa4e3SBjoern A. Zeeb 	}
189da8fa4e3SBjoern A. Zeeb 
190da8fa4e3SBjoern A. Zeeb 	byte = 0;
191da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
192da8fa4e3SBjoern A. Zeeb 					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
193da8fa4e3SBjoern A. Zeeb 					      &byte);
194da8fa4e3SBjoern A. Zeeb 
195da8fa4e3SBjoern A. Zeeb 	byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
196da8fa4e3SBjoern A. Zeeb 	byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
197da8fa4e3SBjoern A. Zeeb 
198da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
199da8fa4e3SBjoern A. Zeeb 					      CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
200da8fa4e3SBjoern A. Zeeb 					      byte);
201da8fa4e3SBjoern A. Zeeb 
202da8fa4e3SBjoern A. Zeeb 	/* give us some time to enable, in ms */
203da8fa4e3SBjoern A. Zeeb 	func->enable_timeout = 100;
204da8fa4e3SBjoern A. Zeeb 
205da8fa4e3SBjoern A. Zeeb 	ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
206da8fa4e3SBjoern A. Zeeb 	if (ret) {
207da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
208da8fa4e3SBjoern A. Zeeb 			    ar_sdio->mbox_info.block_size, ret);
209da8fa4e3SBjoern A. Zeeb 		goto out;
210da8fa4e3SBjoern A. Zeeb 	}
211da8fa4e3SBjoern A. Zeeb 
212da8fa4e3SBjoern A. Zeeb out:
213da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
214da8fa4e3SBjoern A. Zeeb 	return ret;
215da8fa4e3SBjoern A. Zeeb }
216da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_write32(struct ath10k * ar,u32 addr,u32 val)217da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
218da8fa4e3SBjoern A. Zeeb {
219da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
220da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
221da8fa4e3SBjoern A. Zeeb 	int ret;
222da8fa4e3SBjoern A. Zeeb 
223da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
224da8fa4e3SBjoern A. Zeeb 
225da8fa4e3SBjoern A. Zeeb 	sdio_writel(func, val, addr, &ret);
226da8fa4e3SBjoern A. Zeeb 	if (ret) {
227da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
228da8fa4e3SBjoern A. Zeeb 			    val, addr, ret);
229da8fa4e3SBjoern A. Zeeb 		goto out;
230da8fa4e3SBjoern A. Zeeb 	}
231da8fa4e3SBjoern A. Zeeb 
232da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
233da8fa4e3SBjoern A. Zeeb 		   addr, val);
234da8fa4e3SBjoern A. Zeeb 
235da8fa4e3SBjoern A. Zeeb out:
236da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
237da8fa4e3SBjoern A. Zeeb 
238da8fa4e3SBjoern A. Zeeb 	return ret;
239da8fa4e3SBjoern A. Zeeb }
240da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_writesb32(struct ath10k * ar,u32 addr,u32 val)241da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
242da8fa4e3SBjoern A. Zeeb {
243da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
244da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
245da8fa4e3SBjoern A. Zeeb 	__le32 *buf;
246da8fa4e3SBjoern A. Zeeb 	int ret;
247da8fa4e3SBjoern A. Zeeb 
248da8fa4e3SBjoern A. Zeeb 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
249da8fa4e3SBjoern A. Zeeb 	if (!buf)
250da8fa4e3SBjoern A. Zeeb 		return -ENOMEM;
251da8fa4e3SBjoern A. Zeeb 
252da8fa4e3SBjoern A. Zeeb 	*buf = cpu_to_le32(val);
253da8fa4e3SBjoern A. Zeeb 
254da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
255da8fa4e3SBjoern A. Zeeb 
256da8fa4e3SBjoern A. Zeeb 	ret = sdio_writesb(func, addr, buf, sizeof(*buf));
257da8fa4e3SBjoern A. Zeeb 	if (ret) {
258da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
259da8fa4e3SBjoern A. Zeeb 			    val, addr, ret);
260da8fa4e3SBjoern A. Zeeb 		goto out;
261da8fa4e3SBjoern A. Zeeb 	}
262da8fa4e3SBjoern A. Zeeb 
263da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
264da8fa4e3SBjoern A. Zeeb 		   addr, val);
265da8fa4e3SBjoern A. Zeeb 
266da8fa4e3SBjoern A. Zeeb out:
267da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
268da8fa4e3SBjoern A. Zeeb 
269da8fa4e3SBjoern A. Zeeb 	kfree(buf);
270da8fa4e3SBjoern A. Zeeb 
271da8fa4e3SBjoern A. Zeeb 	return ret;
272da8fa4e3SBjoern A. Zeeb }
273da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_read32(struct ath10k * ar,u32 addr,u32 * val)274da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
275da8fa4e3SBjoern A. Zeeb {
276da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
277da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
278da8fa4e3SBjoern A. Zeeb 	int ret;
279da8fa4e3SBjoern A. Zeeb 
280da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
281da8fa4e3SBjoern A. Zeeb 	*val = sdio_readl(func, addr, &ret);
282da8fa4e3SBjoern A. Zeeb 	if (ret) {
283da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
284da8fa4e3SBjoern A. Zeeb 			    addr, ret);
285da8fa4e3SBjoern A. Zeeb 		goto out;
286da8fa4e3SBjoern A. Zeeb 	}
287da8fa4e3SBjoern A. Zeeb 
288da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
289da8fa4e3SBjoern A. Zeeb 		   addr, *val);
290da8fa4e3SBjoern A. Zeeb 
291da8fa4e3SBjoern A. Zeeb out:
292da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
293da8fa4e3SBjoern A. Zeeb 
294da8fa4e3SBjoern A. Zeeb 	return ret;
295da8fa4e3SBjoern A. Zeeb }
296da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_read(struct ath10k * ar,u32 addr,void * buf,size_t len)297da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
298da8fa4e3SBjoern A. Zeeb {
299da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
300da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
301da8fa4e3SBjoern A. Zeeb 	int ret;
302da8fa4e3SBjoern A. Zeeb 
303da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
304da8fa4e3SBjoern A. Zeeb 
305da8fa4e3SBjoern A. Zeeb 	ret = sdio_memcpy_fromio(func, buf, addr, len);
306da8fa4e3SBjoern A. Zeeb 	if (ret) {
307da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
308da8fa4e3SBjoern A. Zeeb 			    addr, ret);
309da8fa4e3SBjoern A. Zeeb 		goto out;
310da8fa4e3SBjoern A. Zeeb 	}
311da8fa4e3SBjoern A. Zeeb 
312da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
313da8fa4e3SBjoern A. Zeeb 		   addr, buf, len);
314da8fa4e3SBjoern A. Zeeb 	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
315da8fa4e3SBjoern A. Zeeb 
316da8fa4e3SBjoern A. Zeeb out:
317da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
318da8fa4e3SBjoern A. Zeeb 
319da8fa4e3SBjoern A. Zeeb 	return ret;
320da8fa4e3SBjoern A. Zeeb }
321da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_write(struct ath10k * ar,u32 addr,const void * buf,size_t len)322da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
323da8fa4e3SBjoern A. Zeeb {
324da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
325da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
326da8fa4e3SBjoern A. Zeeb 	int ret;
327da8fa4e3SBjoern A. Zeeb 
328da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
329da8fa4e3SBjoern A. Zeeb 
330da8fa4e3SBjoern A. Zeeb 	/* For some reason toio() doesn't have const for the buffer, need
331da8fa4e3SBjoern A. Zeeb 	 * an ugly hack to workaround that.
332da8fa4e3SBjoern A. Zeeb 	 */
333da8fa4e3SBjoern A. Zeeb 	ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
334da8fa4e3SBjoern A. Zeeb 	if (ret) {
335da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
336da8fa4e3SBjoern A. Zeeb 			    addr, ret);
337da8fa4e3SBjoern A. Zeeb 		goto out;
338da8fa4e3SBjoern A. Zeeb 	}
339da8fa4e3SBjoern A. Zeeb 
340da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
341da8fa4e3SBjoern A. Zeeb 		   addr, buf, len);
342da8fa4e3SBjoern A. Zeeb 	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
343da8fa4e3SBjoern A. Zeeb 
344da8fa4e3SBjoern A. Zeeb out:
345da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
346da8fa4e3SBjoern A. Zeeb 
347da8fa4e3SBjoern A. Zeeb 	return ret;
348da8fa4e3SBjoern A. Zeeb }
349da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_readsb(struct ath10k * ar,u32 addr,void * buf,size_t len)350da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
351da8fa4e3SBjoern A. Zeeb {
352da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
353da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
354da8fa4e3SBjoern A. Zeeb 	int ret;
355da8fa4e3SBjoern A. Zeeb 
356da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
357da8fa4e3SBjoern A. Zeeb 
358da8fa4e3SBjoern A. Zeeb 	len = round_down(len, ar_sdio->mbox_info.block_size);
359da8fa4e3SBjoern A. Zeeb 
360da8fa4e3SBjoern A. Zeeb 	ret = sdio_readsb(func, buf, addr, len);
361da8fa4e3SBjoern A. Zeeb 	if (ret) {
362da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
363da8fa4e3SBjoern A. Zeeb 			    addr, ret);
364da8fa4e3SBjoern A. Zeeb 		goto out;
365da8fa4e3SBjoern A. Zeeb 	}
366da8fa4e3SBjoern A. Zeeb 
367da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
368da8fa4e3SBjoern A. Zeeb 		   addr, buf, len);
369da8fa4e3SBjoern A. Zeeb 	ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
370da8fa4e3SBjoern A. Zeeb 
371da8fa4e3SBjoern A. Zeeb out:
372da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
373da8fa4e3SBjoern A. Zeeb 
374da8fa4e3SBjoern A. Zeeb 	return ret;
375da8fa4e3SBjoern A. Zeeb }
376da8fa4e3SBjoern A. Zeeb 
377da8fa4e3SBjoern A. Zeeb /* HIF mbox functions */
378da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_rx_process_packet(struct ath10k * ar,struct ath10k_sdio_rx_data * pkt,u32 * lookaheads,int * n_lookaheads)379da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
380da8fa4e3SBjoern A. Zeeb 					      struct ath10k_sdio_rx_data *pkt,
381da8fa4e3SBjoern A. Zeeb 					      u32 *lookaheads,
382da8fa4e3SBjoern A. Zeeb 					      int *n_lookaheads)
383da8fa4e3SBjoern A. Zeeb {
384da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc *htc = &ar->htc;
385da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb = pkt->skb;
386da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
387da8fa4e3SBjoern A. Zeeb 	bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
388da8fa4e3SBjoern A. Zeeb 	enum ath10k_htc_ep_id eid;
389da8fa4e3SBjoern A. Zeeb 	u8 *trailer;
390da8fa4e3SBjoern A. Zeeb 	int ret;
391da8fa4e3SBjoern A. Zeeb 
392da8fa4e3SBjoern A. Zeeb 	if (trailer_present) {
393da8fa4e3SBjoern A. Zeeb 		trailer = skb->data + skb->len - htc_hdr->trailer_len;
394da8fa4e3SBjoern A. Zeeb 
395da8fa4e3SBjoern A. Zeeb 		eid = pipe_id_to_eid(htc_hdr->eid);
396da8fa4e3SBjoern A. Zeeb 
397da8fa4e3SBjoern A. Zeeb 		ret = ath10k_htc_process_trailer(htc,
398da8fa4e3SBjoern A. Zeeb 						 trailer,
399da8fa4e3SBjoern A. Zeeb 						 htc_hdr->trailer_len,
400da8fa4e3SBjoern A. Zeeb 						 eid,
401da8fa4e3SBjoern A. Zeeb 						 lookaheads,
402da8fa4e3SBjoern A. Zeeb 						 n_lookaheads);
403da8fa4e3SBjoern A. Zeeb 		if (ret)
404da8fa4e3SBjoern A. Zeeb 			return ret;
405da8fa4e3SBjoern A. Zeeb 
406da8fa4e3SBjoern A. Zeeb 		if (is_trailer_only_msg(pkt))
407da8fa4e3SBjoern A. Zeeb 			pkt->trailer_only = true;
408da8fa4e3SBjoern A. Zeeb 
409da8fa4e3SBjoern A. Zeeb 		skb_trim(skb, skb->len - htc_hdr->trailer_len);
410da8fa4e3SBjoern A. Zeeb 	}
411da8fa4e3SBjoern A. Zeeb 
412da8fa4e3SBjoern A. Zeeb 	skb_pull(skb, sizeof(*htc_hdr));
413da8fa4e3SBjoern A. Zeeb 
414da8fa4e3SBjoern A. Zeeb 	return 0;
415da8fa4e3SBjoern A. Zeeb }
416da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_rx_process_packets(struct ath10k * ar,u32 lookaheads[],int * n_lookahead)417da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
418da8fa4e3SBjoern A. Zeeb 					       u32 lookaheads[],
419da8fa4e3SBjoern A. Zeeb 					       int *n_lookahead)
420da8fa4e3SBjoern A. Zeeb {
421da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
422da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc *htc = &ar->htc;
423da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_rx_data *pkt;
424da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_ep *ep;
425da8fa4e3SBjoern A. Zeeb 	struct ath10k_skb_rxcb *cb;
426da8fa4e3SBjoern A. Zeeb 	enum ath10k_htc_ep_id id;
427da8fa4e3SBjoern A. Zeeb 	int ret, i, *n_lookahead_local;
428da8fa4e3SBjoern A. Zeeb 	u32 *lookaheads_local;
429da8fa4e3SBjoern A. Zeeb 	int lookahead_idx = 0;
430da8fa4e3SBjoern A. Zeeb 
431da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
432da8fa4e3SBjoern A. Zeeb 		lookaheads_local = lookaheads;
433da8fa4e3SBjoern A. Zeeb 		n_lookahead_local = n_lookahead;
434da8fa4e3SBjoern A. Zeeb 
435da8fa4e3SBjoern A. Zeeb 		id = ((struct ath10k_htc_hdr *)
436da8fa4e3SBjoern A. Zeeb 		      &lookaheads[lookahead_idx++])->eid;
437da8fa4e3SBjoern A. Zeeb 
438da8fa4e3SBjoern A. Zeeb 		if (id >= ATH10K_HTC_EP_COUNT) {
439da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
440da8fa4e3SBjoern A. Zeeb 				    id);
441da8fa4e3SBjoern A. Zeeb 			ret = -ENOMEM;
442da8fa4e3SBjoern A. Zeeb 			goto out;
443da8fa4e3SBjoern A. Zeeb 		}
444da8fa4e3SBjoern A. Zeeb 
445da8fa4e3SBjoern A. Zeeb 		ep = &htc->endpoint[id];
446da8fa4e3SBjoern A. Zeeb 
447da8fa4e3SBjoern A. Zeeb 		if (ep->service_id == 0) {
448da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "ep %d is not connected\n", id);
449da8fa4e3SBjoern A. Zeeb 			ret = -ENOMEM;
450da8fa4e3SBjoern A. Zeeb 			goto out;
451da8fa4e3SBjoern A. Zeeb 		}
452da8fa4e3SBjoern A. Zeeb 
453da8fa4e3SBjoern A. Zeeb 		pkt = &ar_sdio->rx_pkts[i];
454da8fa4e3SBjoern A. Zeeb 
455da8fa4e3SBjoern A. Zeeb 		if (pkt->part_of_bundle && !pkt->last_in_bundle) {
456da8fa4e3SBjoern A. Zeeb 			/* Only read lookahead's from RX trailers
457da8fa4e3SBjoern A. Zeeb 			 * for the last packet in a bundle.
458da8fa4e3SBjoern A. Zeeb 			 */
459da8fa4e3SBjoern A. Zeeb 			lookahead_idx--;
460da8fa4e3SBjoern A. Zeeb 			lookaheads_local = NULL;
461da8fa4e3SBjoern A. Zeeb 			n_lookahead_local = NULL;
462da8fa4e3SBjoern A. Zeeb 		}
463da8fa4e3SBjoern A. Zeeb 
464da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_rx_process_packet(ar,
465da8fa4e3SBjoern A. Zeeb 							 pkt,
466da8fa4e3SBjoern A. Zeeb 							 lookaheads_local,
467da8fa4e3SBjoern A. Zeeb 							 n_lookahead_local);
468da8fa4e3SBjoern A. Zeeb 		if (ret)
469da8fa4e3SBjoern A. Zeeb 			goto out;
470da8fa4e3SBjoern A. Zeeb 
471da8fa4e3SBjoern A. Zeeb 		if (!pkt->trailer_only) {
472da8fa4e3SBjoern A. Zeeb 			cb = ATH10K_SKB_RXCB(pkt->skb);
473da8fa4e3SBjoern A. Zeeb 			cb->eid = id;
474da8fa4e3SBjoern A. Zeeb 
475da8fa4e3SBjoern A. Zeeb 			skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
476da8fa4e3SBjoern A. Zeeb 			queue_work(ar->workqueue_aux,
477da8fa4e3SBjoern A. Zeeb 				   &ar_sdio->async_work_rx);
478da8fa4e3SBjoern A. Zeeb 		} else {
479da8fa4e3SBjoern A. Zeeb 			kfree_skb(pkt->skb);
480da8fa4e3SBjoern A. Zeeb 		}
481da8fa4e3SBjoern A. Zeeb 
482da8fa4e3SBjoern A. Zeeb 		/* The RX complete handler now owns the skb...*/
483da8fa4e3SBjoern A. Zeeb 		pkt->skb = NULL;
484da8fa4e3SBjoern A. Zeeb 		pkt->alloc_len = 0;
485da8fa4e3SBjoern A. Zeeb 	}
486da8fa4e3SBjoern A. Zeeb 
487da8fa4e3SBjoern A. Zeeb 	ret = 0;
488da8fa4e3SBjoern A. Zeeb 
489da8fa4e3SBjoern A. Zeeb out:
490da8fa4e3SBjoern A. Zeeb 	/* Free all packets that was not passed on to the RX completion
491da8fa4e3SBjoern A. Zeeb 	 * handler...
492da8fa4e3SBjoern A. Zeeb 	 */
493da8fa4e3SBjoern A. Zeeb 	for (; i < ar_sdio->n_rx_pkts; i++)
494da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
495da8fa4e3SBjoern A. Zeeb 
496da8fa4e3SBjoern A. Zeeb 	return ret;
497da8fa4e3SBjoern A. Zeeb }
498da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_alloc_bundle(struct ath10k * ar,struct ath10k_sdio_rx_data * rx_pkts,struct ath10k_htc_hdr * htc_hdr,size_t full_len,size_t act_len,size_t * bndl_cnt)499da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
500da8fa4e3SBjoern A. Zeeb 					 struct ath10k_sdio_rx_data *rx_pkts,
501da8fa4e3SBjoern A. Zeeb 					 struct ath10k_htc_hdr *htc_hdr,
502da8fa4e3SBjoern A. Zeeb 					 size_t full_len, size_t act_len,
503da8fa4e3SBjoern A. Zeeb 					 size_t *bndl_cnt)
504da8fa4e3SBjoern A. Zeeb {
505da8fa4e3SBjoern A. Zeeb 	int ret, i;
506da8fa4e3SBjoern A. Zeeb 	u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
507da8fa4e3SBjoern A. Zeeb 
508da8fa4e3SBjoern A. Zeeb 	*bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
509da8fa4e3SBjoern A. Zeeb 
510da8fa4e3SBjoern A. Zeeb 	if (*bndl_cnt > max_msgs) {
511da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar,
512da8fa4e3SBjoern A. Zeeb 			    "HTC bundle length %u exceeds maximum %u\n",
513da8fa4e3SBjoern A. Zeeb 			    le16_to_cpu(htc_hdr->len),
514da8fa4e3SBjoern A. Zeeb 			    max_msgs);
515da8fa4e3SBjoern A. Zeeb 		return -ENOMEM;
516da8fa4e3SBjoern A. Zeeb 	}
517da8fa4e3SBjoern A. Zeeb 
518da8fa4e3SBjoern A. Zeeb 	/* Allocate bndl_cnt extra skb's for the bundle.
519da8fa4e3SBjoern A. Zeeb 	 * The package containing the
520da8fa4e3SBjoern A. Zeeb 	 * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
521da8fa4e3SBjoern A. Zeeb 	 * in bndl_cnt. The skb for that packet will be
522da8fa4e3SBjoern A. Zeeb 	 * allocated separately.
523da8fa4e3SBjoern A. Zeeb 	 */
524da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < *bndl_cnt; i++) {
525da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
526da8fa4e3SBjoern A. Zeeb 						    act_len,
527da8fa4e3SBjoern A. Zeeb 						    full_len,
528da8fa4e3SBjoern A. Zeeb 						    true,
529da8fa4e3SBjoern A. Zeeb 						    false);
530da8fa4e3SBjoern A. Zeeb 		if (ret)
531da8fa4e3SBjoern A. Zeeb 			return ret;
532da8fa4e3SBjoern A. Zeeb 	}
533da8fa4e3SBjoern A. Zeeb 
534da8fa4e3SBjoern A. Zeeb 	return 0;
535da8fa4e3SBjoern A. Zeeb }
536da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_rx_alloc(struct ath10k * ar,u32 lookaheads[],int n_lookaheads)537da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
538da8fa4e3SBjoern A. Zeeb 				     u32 lookaheads[], int n_lookaheads)
539da8fa4e3SBjoern A. Zeeb {
540da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
541da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_hdr *htc_hdr;
542da8fa4e3SBjoern A. Zeeb 	size_t full_len, act_len;
543da8fa4e3SBjoern A. Zeeb 	bool last_in_bundle;
544da8fa4e3SBjoern A. Zeeb 	int ret, i;
545da8fa4e3SBjoern A. Zeeb 	int pkt_cnt = 0;
546da8fa4e3SBjoern A. Zeeb 
547da8fa4e3SBjoern A. Zeeb 	if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
548da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
549da8fa4e3SBjoern A. Zeeb 			    n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
550da8fa4e3SBjoern A. Zeeb 		ret = -ENOMEM;
551da8fa4e3SBjoern A. Zeeb 		goto err;
552da8fa4e3SBjoern A. Zeeb 	}
553da8fa4e3SBjoern A. Zeeb 
554da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < n_lookaheads; i++) {
555da8fa4e3SBjoern A. Zeeb 		htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
556da8fa4e3SBjoern A. Zeeb 		last_in_bundle = false;
557da8fa4e3SBjoern A. Zeeb 
558da8fa4e3SBjoern A. Zeeb 		if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
559da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
560da8fa4e3SBjoern A. Zeeb 				    le16_to_cpu(htc_hdr->len),
561da8fa4e3SBjoern A. Zeeb 				    ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
562da8fa4e3SBjoern A. Zeeb 			ret = -ENOMEM;
563da8fa4e3SBjoern A. Zeeb 
564da8fa4e3SBjoern A. Zeeb 			ath10k_core_start_recovery(ar);
565da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "exceeds length, start recovery\n");
566da8fa4e3SBjoern A. Zeeb 
567da8fa4e3SBjoern A. Zeeb 			goto err;
568da8fa4e3SBjoern A. Zeeb 		}
569da8fa4e3SBjoern A. Zeeb 
570da8fa4e3SBjoern A. Zeeb 		act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
571da8fa4e3SBjoern A. Zeeb 		full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
572da8fa4e3SBjoern A. Zeeb 
573da8fa4e3SBjoern A. Zeeb 		if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
574da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
575da8fa4e3SBjoern A. Zeeb 				    htc_hdr->eid, htc_hdr->flags,
576da8fa4e3SBjoern A. Zeeb 				    le16_to_cpu(htc_hdr->len));
577da8fa4e3SBjoern A. Zeeb 			ret = -EINVAL;
578da8fa4e3SBjoern A. Zeeb 			goto err;
579da8fa4e3SBjoern A. Zeeb 		}
580da8fa4e3SBjoern A. Zeeb 
581da8fa4e3SBjoern A. Zeeb 		if (ath10k_htc_get_bundle_count(
582da8fa4e3SBjoern A. Zeeb 			ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
583da8fa4e3SBjoern A. Zeeb 			/* HTC header indicates that every packet to follow
584da8fa4e3SBjoern A. Zeeb 			 * has the same padded length so that it can be
585da8fa4e3SBjoern A. Zeeb 			 * optimally fetched as a full bundle.
586da8fa4e3SBjoern A. Zeeb 			 */
587da8fa4e3SBjoern A. Zeeb 			size_t bndl_cnt;
588da8fa4e3SBjoern A. Zeeb 
589da8fa4e3SBjoern A. Zeeb 			ret = ath10k_sdio_mbox_alloc_bundle(ar,
590da8fa4e3SBjoern A. Zeeb 							    &ar_sdio->rx_pkts[pkt_cnt],
591da8fa4e3SBjoern A. Zeeb 							    htc_hdr,
592da8fa4e3SBjoern A. Zeeb 							    full_len,
593da8fa4e3SBjoern A. Zeeb 							    act_len,
594da8fa4e3SBjoern A. Zeeb 							    &bndl_cnt);
595da8fa4e3SBjoern A. Zeeb 
596da8fa4e3SBjoern A. Zeeb 			if (ret) {
597da8fa4e3SBjoern A. Zeeb 				ath10k_warn(ar, "failed to allocate a bundle: %d\n",
598da8fa4e3SBjoern A. Zeeb 					    ret);
599da8fa4e3SBjoern A. Zeeb 				goto err;
600da8fa4e3SBjoern A. Zeeb 			}
601da8fa4e3SBjoern A. Zeeb 
602da8fa4e3SBjoern A. Zeeb 			pkt_cnt += bndl_cnt;
603da8fa4e3SBjoern A. Zeeb 
604da8fa4e3SBjoern A. Zeeb 			/* next buffer will be the last in the bundle */
605da8fa4e3SBjoern A. Zeeb 			last_in_bundle = true;
606da8fa4e3SBjoern A. Zeeb 		}
607da8fa4e3SBjoern A. Zeeb 
608da8fa4e3SBjoern A. Zeeb 		/* Allocate skb for packet. If the packet had the
609da8fa4e3SBjoern A. Zeeb 		 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
610da8fa4e3SBjoern A. Zeeb 		 * packet skb's have been allocated in the previous step.
611da8fa4e3SBjoern A. Zeeb 		 */
612da8fa4e3SBjoern A. Zeeb 		if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
613da8fa4e3SBjoern A. Zeeb 			full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
614da8fa4e3SBjoern A. Zeeb 
615da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
616da8fa4e3SBjoern A. Zeeb 						    act_len,
617da8fa4e3SBjoern A. Zeeb 						    full_len,
618da8fa4e3SBjoern A. Zeeb 						    last_in_bundle,
619da8fa4e3SBjoern A. Zeeb 						    last_in_bundle);
620da8fa4e3SBjoern A. Zeeb 		if (ret) {
621da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
622da8fa4e3SBjoern A. Zeeb 			goto err;
623da8fa4e3SBjoern A. Zeeb 		}
624da8fa4e3SBjoern A. Zeeb 
625da8fa4e3SBjoern A. Zeeb 		pkt_cnt++;
626da8fa4e3SBjoern A. Zeeb 	}
627da8fa4e3SBjoern A. Zeeb 
628da8fa4e3SBjoern A. Zeeb 	ar_sdio->n_rx_pkts = pkt_cnt;
629da8fa4e3SBjoern A. Zeeb 
630da8fa4e3SBjoern A. Zeeb 	return 0;
631da8fa4e3SBjoern A. Zeeb 
632da8fa4e3SBjoern A. Zeeb err:
633da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
634da8fa4e3SBjoern A. Zeeb 		if (!ar_sdio->rx_pkts[i].alloc_len)
635da8fa4e3SBjoern A. Zeeb 			break;
636da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
637da8fa4e3SBjoern A. Zeeb 	}
638da8fa4e3SBjoern A. Zeeb 
639da8fa4e3SBjoern A. Zeeb 	return ret;
640da8fa4e3SBjoern A. Zeeb }
641da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_rx_fetch(struct ath10k * ar)642da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
643da8fa4e3SBjoern A. Zeeb {
644da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
645da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
646da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb = pkt->skb;
647da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_hdr *htc_hdr;
648da8fa4e3SBjoern A. Zeeb 	int ret;
649da8fa4e3SBjoern A. Zeeb 
650da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
651da8fa4e3SBjoern A. Zeeb 				 skb->data, pkt->alloc_len);
652da8fa4e3SBjoern A. Zeeb 	if (ret)
653da8fa4e3SBjoern A. Zeeb 		goto err;
654da8fa4e3SBjoern A. Zeeb 
655da8fa4e3SBjoern A. Zeeb 	htc_hdr = (struct ath10k_htc_hdr *)skb->data;
656da8fa4e3SBjoern A. Zeeb 	pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
657da8fa4e3SBjoern A. Zeeb 
658da8fa4e3SBjoern A. Zeeb 	if (pkt->act_len > pkt->alloc_len) {
659da8fa4e3SBjoern A. Zeeb 		ret = -EINVAL;
660da8fa4e3SBjoern A. Zeeb 		goto err;
661da8fa4e3SBjoern A. Zeeb 	}
662da8fa4e3SBjoern A. Zeeb 
663da8fa4e3SBjoern A. Zeeb 	skb_put(skb, pkt->act_len);
664da8fa4e3SBjoern A. Zeeb 	return 0;
665da8fa4e3SBjoern A. Zeeb 
666da8fa4e3SBjoern A. Zeeb err:
667da8fa4e3SBjoern A. Zeeb 	ar_sdio->n_rx_pkts = 0;
668da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_mbox_free_rx_pkt(pkt);
669da8fa4e3SBjoern A. Zeeb 
670da8fa4e3SBjoern A. Zeeb 	return ret;
671da8fa4e3SBjoern A. Zeeb }
672da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k * ar)673da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
674da8fa4e3SBjoern A. Zeeb {
675da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
676da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_rx_data *pkt;
677da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_hdr *htc_hdr;
678da8fa4e3SBjoern A. Zeeb 	int ret, i;
679da8fa4e3SBjoern A. Zeeb 	u32 pkt_offset, virt_pkt_len;
680da8fa4e3SBjoern A. Zeeb 
681da8fa4e3SBjoern A. Zeeb 	virt_pkt_len = 0;
682da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ar_sdio->n_rx_pkts; i++)
683da8fa4e3SBjoern A. Zeeb 		virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
684da8fa4e3SBjoern A. Zeeb 
685da8fa4e3SBjoern A. Zeeb 	if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
686da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
687da8fa4e3SBjoern A. Zeeb 		ret = -E2BIG;
688da8fa4e3SBjoern A. Zeeb 		goto err;
689da8fa4e3SBjoern A. Zeeb 	}
690da8fa4e3SBjoern A. Zeeb 
691da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
692da8fa4e3SBjoern A. Zeeb 				 ar_sdio->vsg_buffer, virt_pkt_len);
693da8fa4e3SBjoern A. Zeeb 	if (ret) {
694da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read bundle packets: %d", ret);
695da8fa4e3SBjoern A. Zeeb 		goto err;
696da8fa4e3SBjoern A. Zeeb 	}
697da8fa4e3SBjoern A. Zeeb 
698da8fa4e3SBjoern A. Zeeb 	pkt_offset = 0;
699da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
700da8fa4e3SBjoern A. Zeeb 		pkt = &ar_sdio->rx_pkts[i];
701da8fa4e3SBjoern A. Zeeb 		htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
702da8fa4e3SBjoern A. Zeeb 		pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
703da8fa4e3SBjoern A. Zeeb 
704da8fa4e3SBjoern A. Zeeb 		if (pkt->act_len > pkt->alloc_len) {
705da8fa4e3SBjoern A. Zeeb 			ret = -EINVAL;
706da8fa4e3SBjoern A. Zeeb 			goto err;
707da8fa4e3SBjoern A. Zeeb 		}
708da8fa4e3SBjoern A. Zeeb 
709da8fa4e3SBjoern A. Zeeb 		skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
710da8fa4e3SBjoern A. Zeeb 		pkt_offset += pkt->alloc_len;
711da8fa4e3SBjoern A. Zeeb 	}
712da8fa4e3SBjoern A. Zeeb 
713da8fa4e3SBjoern A. Zeeb 	return 0;
714da8fa4e3SBjoern A. Zeeb 
715da8fa4e3SBjoern A. Zeeb err:
716da8fa4e3SBjoern A. Zeeb 	/* Free all packets that was not successfully fetched. */
717da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ar_sdio->n_rx_pkts; i++)
718da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
719da8fa4e3SBjoern A. Zeeb 
720da8fa4e3SBjoern A. Zeeb 	ar_sdio->n_rx_pkts = 0;
721da8fa4e3SBjoern A. Zeeb 
722da8fa4e3SBjoern A. Zeeb 	return ret;
723da8fa4e3SBjoern A. Zeeb }
724da8fa4e3SBjoern A. Zeeb 
725da8fa4e3SBjoern A. Zeeb /* This is the timeout for mailbox processing done in the sdio irq
726da8fa4e3SBjoern A. Zeeb  * handler. The timeout is deliberately set quite high since SDIO dump logs
727da8fa4e3SBjoern A. Zeeb  * over serial port can/will add a substantial overhead to the processing
728da8fa4e3SBjoern A. Zeeb  * (if enabled).
729da8fa4e3SBjoern A. Zeeb  */
730da8fa4e3SBjoern A. Zeeb #define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
731da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k * ar,u32 msg_lookahead,bool * done)732da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
733da8fa4e3SBjoern A. Zeeb 						  u32 msg_lookahead, bool *done)
734da8fa4e3SBjoern A. Zeeb {
735da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
736da8fa4e3SBjoern A. Zeeb 	u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
737da8fa4e3SBjoern A. Zeeb 	int n_lookaheads = 1;
738da8fa4e3SBjoern A. Zeeb 	unsigned long timeout;
739da8fa4e3SBjoern A. Zeeb 	int ret;
740da8fa4e3SBjoern A. Zeeb 
741da8fa4e3SBjoern A. Zeeb 	*done = true;
742da8fa4e3SBjoern A. Zeeb 
743da8fa4e3SBjoern A. Zeeb 	/* Copy the lookahead obtained from the HTC register table into our
744da8fa4e3SBjoern A. Zeeb 	 * temp array as a start value.
745da8fa4e3SBjoern A. Zeeb 	 */
746da8fa4e3SBjoern A. Zeeb 	lookaheads[0] = msg_lookahead;
747da8fa4e3SBjoern A. Zeeb 
748da8fa4e3SBjoern A. Zeeb 	timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
749da8fa4e3SBjoern A. Zeeb 	do {
750da8fa4e3SBjoern A. Zeeb 		/* Try to allocate as many HTC RX packets indicated by
751da8fa4e3SBjoern A. Zeeb 		 * n_lookaheads.
752da8fa4e3SBjoern A. Zeeb 		 */
753da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
754da8fa4e3SBjoern A. Zeeb 						n_lookaheads);
755da8fa4e3SBjoern A. Zeeb 		if (ret)
756da8fa4e3SBjoern A. Zeeb 			break;
757da8fa4e3SBjoern A. Zeeb 
758da8fa4e3SBjoern A. Zeeb 		if (ar_sdio->n_rx_pkts >= 2)
759da8fa4e3SBjoern A. Zeeb 			/* A recv bundle was detected, force IRQ status
760da8fa4e3SBjoern A. Zeeb 			 * re-check again.
761da8fa4e3SBjoern A. Zeeb 			 */
762da8fa4e3SBjoern A. Zeeb 			*done = false;
763da8fa4e3SBjoern A. Zeeb 
764da8fa4e3SBjoern A. Zeeb 		if (ar_sdio->n_rx_pkts > 1)
765da8fa4e3SBjoern A. Zeeb 			ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
766da8fa4e3SBjoern A. Zeeb 		else
767da8fa4e3SBjoern A. Zeeb 			ret = ath10k_sdio_mbox_rx_fetch(ar);
768da8fa4e3SBjoern A. Zeeb 
769da8fa4e3SBjoern A. Zeeb 		/* Process fetched packets. This will potentially update
770da8fa4e3SBjoern A. Zeeb 		 * n_lookaheads depending on if the packets contain lookahead
771da8fa4e3SBjoern A. Zeeb 		 * reports.
772da8fa4e3SBjoern A. Zeeb 		 */
773da8fa4e3SBjoern A. Zeeb 		n_lookaheads = 0;
774da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_rx_process_packets(ar,
775da8fa4e3SBjoern A. Zeeb 							  lookaheads,
776da8fa4e3SBjoern A. Zeeb 							  &n_lookaheads);
777da8fa4e3SBjoern A. Zeeb 
778da8fa4e3SBjoern A. Zeeb 		if (!n_lookaheads || ret)
779da8fa4e3SBjoern A. Zeeb 			break;
780da8fa4e3SBjoern A. Zeeb 
781da8fa4e3SBjoern A. Zeeb 		/* For SYNCH processing, if we get here, we are running
782da8fa4e3SBjoern A. Zeeb 		 * through the loop again due to updated lookaheads. Set
783da8fa4e3SBjoern A. Zeeb 		 * flag that we should re-check IRQ status registers again
784da8fa4e3SBjoern A. Zeeb 		 * before leaving IRQ processing, this can net better
785da8fa4e3SBjoern A. Zeeb 		 * performance in high throughput situations.
786da8fa4e3SBjoern A. Zeeb 		 */
787da8fa4e3SBjoern A. Zeeb 		*done = false;
788da8fa4e3SBjoern A. Zeeb 	} while (time_before(jiffies, timeout));
789da8fa4e3SBjoern A. Zeeb 
790da8fa4e3SBjoern A. Zeeb 	if (ret && (ret != -ECANCELED))
791da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to get pending recv messages: %d\n",
792da8fa4e3SBjoern A. Zeeb 			    ret);
793da8fa4e3SBjoern A. Zeeb 
794da8fa4e3SBjoern A. Zeeb 	return ret;
795da8fa4e3SBjoern A. Zeeb }
796da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_proc_dbg_intr(struct ath10k * ar)797da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
798da8fa4e3SBjoern A. Zeeb {
799da8fa4e3SBjoern A. Zeeb 	u32 val;
800da8fa4e3SBjoern A. Zeeb 	int ret;
801da8fa4e3SBjoern A. Zeeb 
802da8fa4e3SBjoern A. Zeeb 	/* TODO: Add firmware crash handling */
803da8fa4e3SBjoern A. Zeeb 	ath10k_warn(ar, "firmware crashed\n");
804da8fa4e3SBjoern A. Zeeb 
805da8fa4e3SBjoern A. Zeeb 	/* read counter to clear the interrupt, the debug error interrupt is
806da8fa4e3SBjoern A. Zeeb 	 * counter 0.
807da8fa4e3SBjoern A. Zeeb 	 */
808da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
809da8fa4e3SBjoern A. Zeeb 	if (ret)
810da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
811da8fa4e3SBjoern A. Zeeb 
812da8fa4e3SBjoern A. Zeeb 	return ret;
813da8fa4e3SBjoern A. Zeeb }
814da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_proc_counter_intr(struct ath10k * ar)815da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
816da8fa4e3SBjoern A. Zeeb {
817da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
818da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
819da8fa4e3SBjoern A. Zeeb 	u8 counter_int_status;
820da8fa4e3SBjoern A. Zeeb 	int ret;
821da8fa4e3SBjoern A. Zeeb 
822da8fa4e3SBjoern A. Zeeb 	mutex_lock(&irq_data->mtx);
823da8fa4e3SBjoern A. Zeeb 	counter_int_status = irq_data->irq_proc_reg->counter_int_status &
824da8fa4e3SBjoern A. Zeeb 			     irq_data->irq_en_reg->cntr_int_status_en;
825da8fa4e3SBjoern A. Zeeb 
826da8fa4e3SBjoern A. Zeeb 	/* NOTE: other modules like GMBOX may use the counter interrupt for
827da8fa4e3SBjoern A. Zeeb 	 * credit flow control on other counters, we only need to check for
828da8fa4e3SBjoern A. Zeeb 	 * the debug assertion counter interrupt.
829da8fa4e3SBjoern A. Zeeb 	 */
830da8fa4e3SBjoern A. Zeeb 	if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
831da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
832da8fa4e3SBjoern A. Zeeb 	else
833da8fa4e3SBjoern A. Zeeb 		ret = 0;
834da8fa4e3SBjoern A. Zeeb 
835da8fa4e3SBjoern A. Zeeb 	mutex_unlock(&irq_data->mtx);
836da8fa4e3SBjoern A. Zeeb 
837da8fa4e3SBjoern A. Zeeb 	return ret;
838da8fa4e3SBjoern A. Zeeb }
839da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_proc_err_intr(struct ath10k * ar)840da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
841da8fa4e3SBjoern A. Zeeb {
842da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
843da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
844da8fa4e3SBjoern A. Zeeb 	u8 error_int_status;
845da8fa4e3SBjoern A. Zeeb 	int ret;
846da8fa4e3SBjoern A. Zeeb 
847da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
848da8fa4e3SBjoern A. Zeeb 
849da8fa4e3SBjoern A. Zeeb 	error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
850da8fa4e3SBjoern A. Zeeb 	if (!error_int_status) {
851da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
852da8fa4e3SBjoern A. Zeeb 			    error_int_status);
853da8fa4e3SBjoern A. Zeeb 		return -EIO;
854da8fa4e3SBjoern A. Zeeb 	}
855da8fa4e3SBjoern A. Zeeb 
856da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO,
857da8fa4e3SBjoern A. Zeeb 		   "sdio error_int_status 0x%x\n", error_int_status);
858da8fa4e3SBjoern A. Zeeb 
859da8fa4e3SBjoern A. Zeeb 	if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
860da8fa4e3SBjoern A. Zeeb 		      error_int_status))
861da8fa4e3SBjoern A. Zeeb 		ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
862da8fa4e3SBjoern A. Zeeb 
863da8fa4e3SBjoern A. Zeeb 	if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
864da8fa4e3SBjoern A. Zeeb 		      error_int_status))
865da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "rx underflow interrupt error\n");
866da8fa4e3SBjoern A. Zeeb 
867da8fa4e3SBjoern A. Zeeb 	if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
868da8fa4e3SBjoern A. Zeeb 		      error_int_status))
869da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "tx overflow interrupt error\n");
870da8fa4e3SBjoern A. Zeeb 
871da8fa4e3SBjoern A. Zeeb 	/* Clear the interrupt */
872da8fa4e3SBjoern A. Zeeb 	irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
873da8fa4e3SBjoern A. Zeeb 
874da8fa4e3SBjoern A. Zeeb 	/* set W1C value to clear the interrupt, this hits the register first */
875da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
876da8fa4e3SBjoern A. Zeeb 				    error_int_status);
877da8fa4e3SBjoern A. Zeeb 	if (ret) {
878da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to write to error int status address: %d\n",
879da8fa4e3SBjoern A. Zeeb 			    ret);
880da8fa4e3SBjoern A. Zeeb 		return ret;
881da8fa4e3SBjoern A. Zeeb 	}
882da8fa4e3SBjoern A. Zeeb 
883da8fa4e3SBjoern A. Zeeb 	return 0;
884da8fa4e3SBjoern A. Zeeb }
885da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_proc_cpu_intr(struct ath10k * ar)886da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
887da8fa4e3SBjoern A. Zeeb {
888da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
889da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
890da8fa4e3SBjoern A. Zeeb 	u8 cpu_int_status;
891da8fa4e3SBjoern A. Zeeb 	int ret;
892da8fa4e3SBjoern A. Zeeb 
893da8fa4e3SBjoern A. Zeeb 	mutex_lock(&irq_data->mtx);
894da8fa4e3SBjoern A. Zeeb 	cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
895da8fa4e3SBjoern A. Zeeb 			 irq_data->irq_en_reg->cpu_int_status_en;
896da8fa4e3SBjoern A. Zeeb 	if (!cpu_int_status) {
897da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "CPU interrupt status is zero\n");
898da8fa4e3SBjoern A. Zeeb 		ret = -EIO;
899da8fa4e3SBjoern A. Zeeb 		goto out;
900da8fa4e3SBjoern A. Zeeb 	}
901da8fa4e3SBjoern A. Zeeb 
902da8fa4e3SBjoern A. Zeeb 	/* Clear the interrupt */
903da8fa4e3SBjoern A. Zeeb 	irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
904da8fa4e3SBjoern A. Zeeb 
905da8fa4e3SBjoern A. Zeeb 	/* Set up the register transfer buffer to hit the register 4 times,
906da8fa4e3SBjoern A. Zeeb 	 * this is done to make the access 4-byte aligned to mitigate issues
907da8fa4e3SBjoern A. Zeeb 	 * with host bus interconnects that restrict bus transfer lengths to
908da8fa4e3SBjoern A. Zeeb 	 * be a multiple of 4-bytes.
909da8fa4e3SBjoern A. Zeeb 	 *
910da8fa4e3SBjoern A. Zeeb 	 * Set W1C value to clear the interrupt, this hits the register first.
911da8fa4e3SBjoern A. Zeeb 	 */
912da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
913da8fa4e3SBjoern A. Zeeb 				    cpu_int_status);
914da8fa4e3SBjoern A. Zeeb 	if (ret) {
915da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
916da8fa4e3SBjoern A. Zeeb 			    ret);
917da8fa4e3SBjoern A. Zeeb 		goto out;
918da8fa4e3SBjoern A. Zeeb 	}
919da8fa4e3SBjoern A. Zeeb 
920da8fa4e3SBjoern A. Zeeb out:
921da8fa4e3SBjoern A. Zeeb 	mutex_unlock(&irq_data->mtx);
922da8fa4e3SBjoern A. Zeeb 	if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
923da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_fw_crashed_dump(ar);
924da8fa4e3SBjoern A. Zeeb 
925da8fa4e3SBjoern A. Zeeb 	return ret;
926da8fa4e3SBjoern A. Zeeb }
927da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_read_int_status(struct ath10k * ar,u8 * host_int_status,u32 * lookahead)928da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
929da8fa4e3SBjoern A. Zeeb 					    u8 *host_int_status,
930da8fa4e3SBjoern A. Zeeb 					    u32 *lookahead)
931da8fa4e3SBjoern A. Zeeb {
932da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
933da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
934da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
935da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
936da8fa4e3SBjoern A. Zeeb 	u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
937da8fa4e3SBjoern A. Zeeb 	int ret;
938da8fa4e3SBjoern A. Zeeb 
939da8fa4e3SBjoern A. Zeeb 	mutex_lock(&irq_data->mtx);
940da8fa4e3SBjoern A. Zeeb 
941da8fa4e3SBjoern A. Zeeb 	*lookahead = 0;
942da8fa4e3SBjoern A. Zeeb 	*host_int_status = 0;
943da8fa4e3SBjoern A. Zeeb 
944da8fa4e3SBjoern A. Zeeb 	/* int_status_en is supposed to be non zero, otherwise interrupts
945da8fa4e3SBjoern A. Zeeb 	 * shouldn't be enabled. There is however a short time frame during
946da8fa4e3SBjoern A. Zeeb 	 * initialization between the irq register and int_status_en init
947da8fa4e3SBjoern A. Zeeb 	 * where this can happen.
948da8fa4e3SBjoern A. Zeeb 	 * We silently ignore this condition.
949da8fa4e3SBjoern A. Zeeb 	 */
950da8fa4e3SBjoern A. Zeeb 	if (!irq_en_reg->int_status_en) {
951da8fa4e3SBjoern A. Zeeb 		ret = 0;
952da8fa4e3SBjoern A. Zeeb 		goto out;
953da8fa4e3SBjoern A. Zeeb 	}
954da8fa4e3SBjoern A. Zeeb 
955da8fa4e3SBjoern A. Zeeb 	/* Read the first sizeof(struct ath10k_irq_proc_registers)
956da8fa4e3SBjoern A. Zeeb 	 * bytes of the HTC register table. This
957da8fa4e3SBjoern A. Zeeb 	 * will yield us the value of different int status
958da8fa4e3SBjoern A. Zeeb 	 * registers and the lookahead registers.
959da8fa4e3SBjoern A. Zeeb 	 */
960da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
961da8fa4e3SBjoern A. Zeeb 			       irq_proc_reg, sizeof(*irq_proc_reg));
962da8fa4e3SBjoern A. Zeeb 	if (ret) {
963da8fa4e3SBjoern A. Zeeb 		ath10k_core_start_recovery(ar);
964da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "read int status fail, start recovery\n");
965da8fa4e3SBjoern A. Zeeb 		goto out;
966da8fa4e3SBjoern A. Zeeb 	}
967da8fa4e3SBjoern A. Zeeb 
968da8fa4e3SBjoern A. Zeeb 	/* Update only those registers that are enabled */
969da8fa4e3SBjoern A. Zeeb 	*host_int_status = irq_proc_reg->host_int_status &
970da8fa4e3SBjoern A. Zeeb 			   irq_en_reg->int_status_en;
971da8fa4e3SBjoern A. Zeeb 
972da8fa4e3SBjoern A. Zeeb 	/* Look at mbox status */
973da8fa4e3SBjoern A. Zeeb 	if (!(*host_int_status & htc_mbox)) {
974da8fa4e3SBjoern A. Zeeb 		*lookahead = 0;
975da8fa4e3SBjoern A. Zeeb 		ret = 0;
976da8fa4e3SBjoern A. Zeeb 		goto out;
977da8fa4e3SBjoern A. Zeeb 	}
978da8fa4e3SBjoern A. Zeeb 
979da8fa4e3SBjoern A. Zeeb 	/* Mask out pending mbox value, we use look ahead as
980da8fa4e3SBjoern A. Zeeb 	 * the real flag for mbox processing.
981da8fa4e3SBjoern A. Zeeb 	 */
982da8fa4e3SBjoern A. Zeeb 	*host_int_status &= ~htc_mbox;
983da8fa4e3SBjoern A. Zeeb 	if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
984da8fa4e3SBjoern A. Zeeb 		*lookahead = le32_to_cpu(
985da8fa4e3SBjoern A. Zeeb 			irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
986da8fa4e3SBjoern A. Zeeb 		if (!*lookahead)
987da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "sdio mbox lookahead is zero\n");
988da8fa4e3SBjoern A. Zeeb 	}
989da8fa4e3SBjoern A. Zeeb 
990da8fa4e3SBjoern A. Zeeb out:
991da8fa4e3SBjoern A. Zeeb 	mutex_unlock(&irq_data->mtx);
992da8fa4e3SBjoern A. Zeeb 	return ret;
993da8fa4e3SBjoern A. Zeeb }
994da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_mbox_proc_pending_irqs(struct ath10k * ar,bool * done)995da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
996da8fa4e3SBjoern A. Zeeb 					      bool *done)
997da8fa4e3SBjoern A. Zeeb {
998da8fa4e3SBjoern A. Zeeb 	u8 host_int_status;
999da8fa4e3SBjoern A. Zeeb 	u32 lookahead;
1000da8fa4e3SBjoern A. Zeeb 	int ret;
1001da8fa4e3SBjoern A. Zeeb 
1002da8fa4e3SBjoern A. Zeeb 	/* NOTE: HIF implementation guarantees that the context of this
1003da8fa4e3SBjoern A. Zeeb 	 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
1004da8fa4e3SBjoern A. Zeeb 	 * sleep or call any API that can block or switch thread/task
1005da8fa4e3SBjoern A. Zeeb 	 * contexts. This is a fully schedulable context.
1006da8fa4e3SBjoern A. Zeeb 	 */
1007da8fa4e3SBjoern A. Zeeb 
1008da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_mbox_read_int_status(ar,
1009da8fa4e3SBjoern A. Zeeb 					       &host_int_status,
1010da8fa4e3SBjoern A. Zeeb 					       &lookahead);
1011da8fa4e3SBjoern A. Zeeb 	if (ret) {
1012da8fa4e3SBjoern A. Zeeb 		*done = true;
1013da8fa4e3SBjoern A. Zeeb 		goto out;
1014da8fa4e3SBjoern A. Zeeb 	}
1015da8fa4e3SBjoern A. Zeeb 
1016da8fa4e3SBjoern A. Zeeb 	if (!host_int_status && !lookahead) {
1017da8fa4e3SBjoern A. Zeeb 		ret = 0;
1018da8fa4e3SBjoern A. Zeeb 		*done = true;
1019da8fa4e3SBjoern A. Zeeb 		goto out;
1020da8fa4e3SBjoern A. Zeeb 	}
1021da8fa4e3SBjoern A. Zeeb 
1022da8fa4e3SBjoern A. Zeeb 	if (lookahead) {
1023da8fa4e3SBjoern A. Zeeb 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1024da8fa4e3SBjoern A. Zeeb 			   "sdio pending mailbox msg lookahead 0x%08x\n",
1025da8fa4e3SBjoern A. Zeeb 			   lookahead);
1026da8fa4e3SBjoern A. Zeeb 
1027da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
1028da8fa4e3SBjoern A. Zeeb 							     lookahead,
1029da8fa4e3SBjoern A. Zeeb 							     done);
1030da8fa4e3SBjoern A. Zeeb 		if (ret)
1031da8fa4e3SBjoern A. Zeeb 			goto out;
1032da8fa4e3SBjoern A. Zeeb 	}
1033da8fa4e3SBjoern A. Zeeb 
1034da8fa4e3SBjoern A. Zeeb 	/* now, handle the rest of the interrupts */
1035da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO,
1036da8fa4e3SBjoern A. Zeeb 		   "sdio host_int_status 0x%x\n", host_int_status);
1037da8fa4e3SBjoern A. Zeeb 
1038da8fa4e3SBjoern A. Zeeb 	if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
1039da8fa4e3SBjoern A. Zeeb 		/* CPU Interrupt */
1040da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
1041da8fa4e3SBjoern A. Zeeb 		if (ret)
1042da8fa4e3SBjoern A. Zeeb 			goto out;
1043da8fa4e3SBjoern A. Zeeb 	}
1044da8fa4e3SBjoern A. Zeeb 
1045da8fa4e3SBjoern A. Zeeb 	if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
1046da8fa4e3SBjoern A. Zeeb 		/* Error Interrupt */
1047da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_proc_err_intr(ar);
1048da8fa4e3SBjoern A. Zeeb 		if (ret)
1049da8fa4e3SBjoern A. Zeeb 			goto out;
1050da8fa4e3SBjoern A. Zeeb 	}
1051da8fa4e3SBjoern A. Zeeb 
1052da8fa4e3SBjoern A. Zeeb 	if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
1053da8fa4e3SBjoern A. Zeeb 		/* Counter Interrupt */
1054da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_proc_counter_intr(ar);
1055da8fa4e3SBjoern A. Zeeb 
1056da8fa4e3SBjoern A. Zeeb 	ret = 0;
1057da8fa4e3SBjoern A. Zeeb 
1058da8fa4e3SBjoern A. Zeeb out:
1059da8fa4e3SBjoern A. Zeeb 	/* An optimization to bypass reading the IRQ status registers
106007724ba6SBjoern A. Zeeb 	 * unnecessarily which can re-wake the target, if upper layers
1061da8fa4e3SBjoern A. Zeeb 	 * determine that we are in a low-throughput mode, we can rely on
1062da8fa4e3SBjoern A. Zeeb 	 * taking another interrupt rather than re-checking the status
1063da8fa4e3SBjoern A. Zeeb 	 * registers which can re-wake the target.
1064da8fa4e3SBjoern A. Zeeb 	 *
1065da8fa4e3SBjoern A. Zeeb 	 * NOTE : for host interfaces that makes use of detecting pending
1066da8fa4e3SBjoern A. Zeeb 	 * mbox messages at hif can not use this optimization due to
1067da8fa4e3SBjoern A. Zeeb 	 * possible side effects, SPI requires the host to drain all
1068da8fa4e3SBjoern A. Zeeb 	 * messages from the mailbox before exiting the ISR routine.
1069da8fa4e3SBjoern A. Zeeb 	 */
1070da8fa4e3SBjoern A. Zeeb 
1071da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO,
1072da8fa4e3SBjoern A. Zeeb 		   "sdio pending irqs done %d status %d",
1073da8fa4e3SBjoern A. Zeeb 		   *done, ret);
1074da8fa4e3SBjoern A. Zeeb 
1075da8fa4e3SBjoern A. Zeeb 	return ret;
1076da8fa4e3SBjoern A. Zeeb }
1077da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_set_mbox_info(struct ath10k * ar)1078da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1079da8fa4e3SBjoern A. Zeeb {
1080da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1081da8fa4e3SBjoern A. Zeeb 	struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1082da8fa4e3SBjoern A. Zeeb 	u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1083da8fa4e3SBjoern A. Zeeb 
1084da8fa4e3SBjoern A. Zeeb 	mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1085da8fa4e3SBjoern A. Zeeb 	mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1086da8fa4e3SBjoern A. Zeeb 	mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1087da8fa4e3SBjoern A. Zeeb 	mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1088da8fa4e3SBjoern A. Zeeb 	mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1089da8fa4e3SBjoern A. Zeeb 
1090da8fa4e3SBjoern A. Zeeb 	mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1091da8fa4e3SBjoern A. Zeeb 
1092da8fa4e3SBjoern A. Zeeb 	dev_id_base = (device & 0x0F00);
1093da8fa4e3SBjoern A. Zeeb 	dev_id_chiprev = (device & 0x00FF);
1094da8fa4e3SBjoern A. Zeeb 	switch (dev_id_base) {
1095da8fa4e3SBjoern A. Zeeb 	case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
1096da8fa4e3SBjoern A. Zeeb 		if (dev_id_chiprev < 4)
1097da8fa4e3SBjoern A. Zeeb 			mbox_info->ext_info[0].htc_ext_sz =
1098da8fa4e3SBjoern A. Zeeb 				ATH10K_HIF_MBOX0_EXT_WIDTH;
1099da8fa4e3SBjoern A. Zeeb 		else
1100da8fa4e3SBjoern A. Zeeb 			/* from QCA6174 2.0(0x504), the width has been extended
1101da8fa4e3SBjoern A. Zeeb 			 * to 56K
1102da8fa4e3SBjoern A. Zeeb 			 */
1103da8fa4e3SBjoern A. Zeeb 			mbox_info->ext_info[0].htc_ext_sz =
1104da8fa4e3SBjoern A. Zeeb 				ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1105da8fa4e3SBjoern A. Zeeb 		break;
1106da8fa4e3SBjoern A. Zeeb 	case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
1107da8fa4e3SBjoern A. Zeeb 		mbox_info->ext_info[0].htc_ext_sz =
1108da8fa4e3SBjoern A. Zeeb 			ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1109da8fa4e3SBjoern A. Zeeb 		break;
1110da8fa4e3SBjoern A. Zeeb 	default:
1111da8fa4e3SBjoern A. Zeeb 		mbox_info->ext_info[0].htc_ext_sz =
1112da8fa4e3SBjoern A. Zeeb 				ATH10K_HIF_MBOX0_EXT_WIDTH;
1113da8fa4e3SBjoern A. Zeeb 	}
1114da8fa4e3SBjoern A. Zeeb 
1115da8fa4e3SBjoern A. Zeeb 	mbox_info->ext_info[1].htc_ext_addr =
1116da8fa4e3SBjoern A. Zeeb 		mbox_info->ext_info[0].htc_ext_addr +
1117da8fa4e3SBjoern A. Zeeb 		mbox_info->ext_info[0].htc_ext_sz +
1118da8fa4e3SBjoern A. Zeeb 		ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1119da8fa4e3SBjoern A. Zeeb 	mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1120da8fa4e3SBjoern A. Zeeb }
1121da8fa4e3SBjoern A. Zeeb 
1122da8fa4e3SBjoern A. Zeeb /* BMI functions */
1123da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_bmi_credits(struct ath10k * ar)1124da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1125da8fa4e3SBjoern A. Zeeb {
1126da8fa4e3SBjoern A. Zeeb 	u32 addr, cmd_credits;
1127da8fa4e3SBjoern A. Zeeb 	unsigned long timeout;
1128da8fa4e3SBjoern A. Zeeb 	int ret;
1129da8fa4e3SBjoern A. Zeeb 
1130da8fa4e3SBjoern A. Zeeb 	/* Read the counter register to get the command credits */
1131da8fa4e3SBjoern A. Zeeb 	addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1132da8fa4e3SBjoern A. Zeeb 	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1133da8fa4e3SBjoern A. Zeeb 	cmd_credits = 0;
1134da8fa4e3SBjoern A. Zeeb 
1135da8fa4e3SBjoern A. Zeeb 	while (time_before(jiffies, timeout) && !cmd_credits) {
1136da8fa4e3SBjoern A. Zeeb 		/* Hit the credit counter with a 4-byte access, the first byte
1137da8fa4e3SBjoern A. Zeeb 		 * read will hit the counter and cause a decrement, while the
1138da8fa4e3SBjoern A. Zeeb 		 * remaining 3 bytes has no effect. The rationale behind this
1139da8fa4e3SBjoern A. Zeeb 		 * is to make all HIF accesses 4-byte aligned.
1140da8fa4e3SBjoern A. Zeeb 		 */
1141da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1142da8fa4e3SBjoern A. Zeeb 		if (ret) {
1143da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar,
1144da8fa4e3SBjoern A. Zeeb 				    "unable to decrement the command credit count register: %d\n",
1145da8fa4e3SBjoern A. Zeeb 				    ret);
1146da8fa4e3SBjoern A. Zeeb 			return ret;
1147da8fa4e3SBjoern A. Zeeb 		}
1148da8fa4e3SBjoern A. Zeeb 
1149da8fa4e3SBjoern A. Zeeb 		/* The counter is only 8 bits.
1150da8fa4e3SBjoern A. Zeeb 		 * Ignore anything in the upper 3 bytes
1151da8fa4e3SBjoern A. Zeeb 		 */
1152da8fa4e3SBjoern A. Zeeb 		cmd_credits &= 0xFF;
1153da8fa4e3SBjoern A. Zeeb 	}
1154da8fa4e3SBjoern A. Zeeb 
1155da8fa4e3SBjoern A. Zeeb 	if (!cmd_credits) {
1156da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "bmi communication timeout\n");
1157da8fa4e3SBjoern A. Zeeb 		return -ETIMEDOUT;
1158da8fa4e3SBjoern A. Zeeb 	}
1159da8fa4e3SBjoern A. Zeeb 
1160da8fa4e3SBjoern A. Zeeb 	return 0;
1161da8fa4e3SBjoern A. Zeeb }
1162da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_bmi_get_rx_lookahead(struct ath10k * ar)1163da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1164da8fa4e3SBjoern A. Zeeb {
1165da8fa4e3SBjoern A. Zeeb 	unsigned long timeout;
1166da8fa4e3SBjoern A. Zeeb 	u32 rx_word;
1167da8fa4e3SBjoern A. Zeeb 	int ret;
1168da8fa4e3SBjoern A. Zeeb 
1169da8fa4e3SBjoern A. Zeeb 	timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1170da8fa4e3SBjoern A. Zeeb 	rx_word = 0;
1171da8fa4e3SBjoern A. Zeeb 
1172da8fa4e3SBjoern A. Zeeb 	while ((time_before(jiffies, timeout)) && !rx_word) {
1173da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_read32(ar,
1174da8fa4e3SBjoern A. Zeeb 					 MBOX_HOST_INT_STATUS_ADDRESS,
1175da8fa4e3SBjoern A. Zeeb 					 &rx_word);
1176da8fa4e3SBjoern A. Zeeb 		if (ret) {
1177da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1178da8fa4e3SBjoern A. Zeeb 			return ret;
1179da8fa4e3SBjoern A. Zeeb 		}
1180da8fa4e3SBjoern A. Zeeb 
1181da8fa4e3SBjoern A. Zeeb 		 /* all we really want is one bit */
1182da8fa4e3SBjoern A. Zeeb 		rx_word &= 1;
1183da8fa4e3SBjoern A. Zeeb 	}
1184da8fa4e3SBjoern A. Zeeb 
1185da8fa4e3SBjoern A. Zeeb 	if (!rx_word) {
1186da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1187da8fa4e3SBjoern A. Zeeb 		return -EINVAL;
1188da8fa4e3SBjoern A. Zeeb 	}
1189da8fa4e3SBjoern A. Zeeb 
1190da8fa4e3SBjoern A. Zeeb 	return ret;
1191da8fa4e3SBjoern A. Zeeb }
1192da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_bmi_exchange_msg(struct ath10k * ar,void * req,u32 req_len,void * resp,u32 * resp_len)1193da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1194da8fa4e3SBjoern A. Zeeb 					void *req, u32 req_len,
1195da8fa4e3SBjoern A. Zeeb 					void *resp, u32 *resp_len)
1196da8fa4e3SBjoern A. Zeeb {
1197da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1198da8fa4e3SBjoern A. Zeeb 	u32 addr;
1199da8fa4e3SBjoern A. Zeeb 	int ret;
1200da8fa4e3SBjoern A. Zeeb 
1201da8fa4e3SBjoern A. Zeeb 	if (req) {
1202da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_bmi_credits(ar);
1203da8fa4e3SBjoern A. Zeeb 		if (ret)
1204da8fa4e3SBjoern A. Zeeb 			return ret;
1205da8fa4e3SBjoern A. Zeeb 
1206da8fa4e3SBjoern A. Zeeb 		addr = ar_sdio->mbox_info.htc_addr;
1207da8fa4e3SBjoern A. Zeeb 
1208da8fa4e3SBjoern A. Zeeb 		memcpy(ar_sdio->bmi_buf, req, req_len);
1209da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1210da8fa4e3SBjoern A. Zeeb 		if (ret) {
1211da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar,
1212da8fa4e3SBjoern A. Zeeb 				    "unable to send the bmi data to the device: %d\n",
1213da8fa4e3SBjoern A. Zeeb 				    ret);
1214da8fa4e3SBjoern A. Zeeb 			return ret;
1215da8fa4e3SBjoern A. Zeeb 		}
1216da8fa4e3SBjoern A. Zeeb 	}
1217da8fa4e3SBjoern A. Zeeb 
1218da8fa4e3SBjoern A. Zeeb 	if (!resp || !resp_len)
1219da8fa4e3SBjoern A. Zeeb 		/* No response expected */
1220da8fa4e3SBjoern A. Zeeb 		return 0;
1221da8fa4e3SBjoern A. Zeeb 
1222da8fa4e3SBjoern A. Zeeb 	/* During normal bootup, small reads may be required.
1223da8fa4e3SBjoern A. Zeeb 	 * Rather than issue an HIF Read and then wait as the Target
1224da8fa4e3SBjoern A. Zeeb 	 * adds successive bytes to the FIFO, we wait here until
1225da8fa4e3SBjoern A. Zeeb 	 * we know that response data is available.
1226da8fa4e3SBjoern A. Zeeb 	 *
1227da8fa4e3SBjoern A. Zeeb 	 * This allows us to cleanly timeout on an unexpected
1228da8fa4e3SBjoern A. Zeeb 	 * Target failure rather than risk problems at the HIF level.
1229da8fa4e3SBjoern A. Zeeb 	 * In particular, this avoids SDIO timeouts and possibly garbage
1230da8fa4e3SBjoern A. Zeeb 	 * data on some host controllers.  And on an interconnect
1231da8fa4e3SBjoern A. Zeeb 	 * such as Compact Flash (as well as some SDIO masters) which
1232da8fa4e3SBjoern A. Zeeb 	 * does not provide any indication on data timeout, it avoids
1233da8fa4e3SBjoern A. Zeeb 	 * a potential hang or garbage response.
1234da8fa4e3SBjoern A. Zeeb 	 *
1235da8fa4e3SBjoern A. Zeeb 	 * Synchronization is more difficult for reads larger than the
1236da8fa4e3SBjoern A. Zeeb 	 * size of the MBOX FIFO (128B), because the Target is unable
1237da8fa4e3SBjoern A. Zeeb 	 * to push the 129th byte of data until AFTER the Host posts an
1238da8fa4e3SBjoern A. Zeeb 	 * HIF Read and removes some FIFO data.  So for large reads the
1239da8fa4e3SBjoern A. Zeeb 	 * Host proceeds to post an HIF Read BEFORE all the data is
1240da8fa4e3SBjoern A. Zeeb 	 * actually available to read.  Fortunately, large BMI reads do
1241da8fa4e3SBjoern A. Zeeb 	 * not occur in practice -- they're supported for debug/development.
1242da8fa4e3SBjoern A. Zeeb 	 *
1243da8fa4e3SBjoern A. Zeeb 	 * So Host/Target BMI synchronization is divided into these cases:
1244da8fa4e3SBjoern A. Zeeb 	 *  CASE 1: length < 4
1245da8fa4e3SBjoern A. Zeeb 	 *        Should not happen
1246da8fa4e3SBjoern A. Zeeb 	 *
1247da8fa4e3SBjoern A. Zeeb 	 *  CASE 2: 4 <= length <= 128
1248da8fa4e3SBjoern A. Zeeb 	 *        Wait for first 4 bytes to be in FIFO
1249da8fa4e3SBjoern A. Zeeb 	 *        If CONSERVATIVE_BMI_READ is enabled, also wait for
1250da8fa4e3SBjoern A. Zeeb 	 *        a BMI command credit, which indicates that the ENTIRE
1251da8fa4e3SBjoern A. Zeeb 	 *        response is available in the FIFO
1252da8fa4e3SBjoern A. Zeeb 	 *
1253da8fa4e3SBjoern A. Zeeb 	 *  CASE 3: length > 128
1254da8fa4e3SBjoern A. Zeeb 	 *        Wait for the first 4 bytes to be in FIFO
1255da8fa4e3SBjoern A. Zeeb 	 *
1256da8fa4e3SBjoern A. Zeeb 	 * For most uses, a small timeout should be sufficient and we will
1257da8fa4e3SBjoern A. Zeeb 	 * usually see a response quickly; but there may be some unusual
1258da8fa4e3SBjoern A. Zeeb 	 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1259da8fa4e3SBjoern A. Zeeb 	 * For now, we use an unbounded busy loop while waiting for
1260da8fa4e3SBjoern A. Zeeb 	 * BMI_EXECUTE.
1261da8fa4e3SBjoern A. Zeeb 	 *
1262da8fa4e3SBjoern A. Zeeb 	 * If BMI_EXECUTE ever needs to support longer-latency execution,
1263da8fa4e3SBjoern A. Zeeb 	 * especially in production, this code needs to be enhanced to sleep
1264da8fa4e3SBjoern A. Zeeb 	 * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
1265da8fa4e3SBjoern A. Zeeb 	 * a function of Host processor speed.
1266da8fa4e3SBjoern A. Zeeb 	 */
1267da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1268da8fa4e3SBjoern A. Zeeb 	if (ret)
1269da8fa4e3SBjoern A. Zeeb 		return ret;
1270da8fa4e3SBjoern A. Zeeb 
1271da8fa4e3SBjoern A. Zeeb 	/* We always read from the start of the mbox address */
1272da8fa4e3SBjoern A. Zeeb 	addr = ar_sdio->mbox_info.htc_addr;
1273da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1274da8fa4e3SBjoern A. Zeeb 	if (ret) {
1275da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar,
1276da8fa4e3SBjoern A. Zeeb 			    "unable to read the bmi data from the device: %d\n",
1277da8fa4e3SBjoern A. Zeeb 			    ret);
1278da8fa4e3SBjoern A. Zeeb 		return ret;
1279da8fa4e3SBjoern A. Zeeb 	}
1280da8fa4e3SBjoern A. Zeeb 
1281da8fa4e3SBjoern A. Zeeb 	memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1282da8fa4e3SBjoern A. Zeeb 
1283da8fa4e3SBjoern A. Zeeb 	return 0;
1284da8fa4e3SBjoern A. Zeeb }
1285da8fa4e3SBjoern A. Zeeb 
1286da8fa4e3SBjoern A. Zeeb /* sdio async handling functions */
1287da8fa4e3SBjoern A. Zeeb 
1288da8fa4e3SBjoern A. Zeeb static struct ath10k_sdio_bus_request
ath10k_sdio_alloc_busreq(struct ath10k * ar)1289da8fa4e3SBjoern A. Zeeb *ath10k_sdio_alloc_busreq(struct ath10k *ar)
1290da8fa4e3SBjoern A. Zeeb {
1291da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1292da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_bus_request *bus_req;
1293da8fa4e3SBjoern A. Zeeb 
1294da8fa4e3SBjoern A. Zeeb 	spin_lock_bh(&ar_sdio->lock);
1295da8fa4e3SBjoern A. Zeeb 
1296da8fa4e3SBjoern A. Zeeb 	if (list_empty(&ar_sdio->bus_req_freeq)) {
1297da8fa4e3SBjoern A. Zeeb 		bus_req = NULL;
1298da8fa4e3SBjoern A. Zeeb 		goto out;
1299da8fa4e3SBjoern A. Zeeb 	}
1300da8fa4e3SBjoern A. Zeeb 
1301da8fa4e3SBjoern A. Zeeb 	bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1302da8fa4e3SBjoern A. Zeeb 				   struct ath10k_sdio_bus_request, list);
1303da8fa4e3SBjoern A. Zeeb 	list_del(&bus_req->list);
1304da8fa4e3SBjoern A. Zeeb 
1305da8fa4e3SBjoern A. Zeeb out:
1306da8fa4e3SBjoern A. Zeeb 	spin_unlock_bh(&ar_sdio->lock);
1307da8fa4e3SBjoern A. Zeeb 	return bus_req;
1308da8fa4e3SBjoern A. Zeeb }
1309da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_free_bus_req(struct ath10k * ar,struct ath10k_sdio_bus_request * bus_req)1310da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1311da8fa4e3SBjoern A. Zeeb 				     struct ath10k_sdio_bus_request *bus_req)
1312da8fa4e3SBjoern A. Zeeb {
1313da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1314da8fa4e3SBjoern A. Zeeb 
1315da8fa4e3SBjoern A. Zeeb 	memset(bus_req, 0, sizeof(*bus_req));
1316da8fa4e3SBjoern A. Zeeb 
1317da8fa4e3SBjoern A. Zeeb 	spin_lock_bh(&ar_sdio->lock);
1318da8fa4e3SBjoern A. Zeeb 	list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1319da8fa4e3SBjoern A. Zeeb 	spin_unlock_bh(&ar_sdio->lock);
1320da8fa4e3SBjoern A. Zeeb }
1321da8fa4e3SBjoern A. Zeeb 
__ath10k_sdio_write_async(struct ath10k * ar,struct ath10k_sdio_bus_request * req)1322da8fa4e3SBjoern A. Zeeb static void __ath10k_sdio_write_async(struct ath10k *ar,
1323da8fa4e3SBjoern A. Zeeb 				      struct ath10k_sdio_bus_request *req)
1324da8fa4e3SBjoern A. Zeeb {
1325da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_ep *ep;
1326da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb;
1327da8fa4e3SBjoern A. Zeeb 	int ret;
1328da8fa4e3SBjoern A. Zeeb 
1329da8fa4e3SBjoern A. Zeeb 	skb = req->skb;
1330da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1331da8fa4e3SBjoern A. Zeeb 	if (ret)
1332da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1333da8fa4e3SBjoern A. Zeeb 			    req->address, ret);
1334da8fa4e3SBjoern A. Zeeb 
1335da8fa4e3SBjoern A. Zeeb 	if (req->htc_msg) {
1336da8fa4e3SBjoern A. Zeeb 		ep = &ar->htc.endpoint[req->eid];
1337da8fa4e3SBjoern A. Zeeb 		ath10k_htc_notify_tx_completion(ep, skb);
1338da8fa4e3SBjoern A. Zeeb 	} else if (req->comp) {
1339da8fa4e3SBjoern A. Zeeb 		complete(req->comp);
1340da8fa4e3SBjoern A. Zeeb 	}
1341da8fa4e3SBjoern A. Zeeb 
1342da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_free_bus_req(ar, req);
1343da8fa4e3SBjoern A. Zeeb }
1344da8fa4e3SBjoern A. Zeeb 
1345da8fa4e3SBjoern A. Zeeb /* To improve throughput use workqueue to deliver packets to HTC layer,
1346da8fa4e3SBjoern A. Zeeb  * this way SDIO bus is utilised much better.
1347da8fa4e3SBjoern A. Zeeb  */
ath10k_rx_indication_async_work(struct work_struct * work)1348da8fa4e3SBjoern A. Zeeb static void ath10k_rx_indication_async_work(struct work_struct *work)
1349da8fa4e3SBjoern A. Zeeb {
1350da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1351da8fa4e3SBjoern A. Zeeb 						   async_work_rx);
1352da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar = ar_sdio->ar;
1353da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc_ep *ep;
1354da8fa4e3SBjoern A. Zeeb 	struct ath10k_skb_rxcb *cb;
1355da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb;
1356da8fa4e3SBjoern A. Zeeb 
1357da8fa4e3SBjoern A. Zeeb 	while (true) {
1358da8fa4e3SBjoern A. Zeeb 		skb = skb_dequeue(&ar_sdio->rx_head);
1359da8fa4e3SBjoern A. Zeeb 		if (!skb)
1360da8fa4e3SBjoern A. Zeeb 			break;
1361da8fa4e3SBjoern A. Zeeb 		cb = ATH10K_SKB_RXCB(skb);
1362da8fa4e3SBjoern A. Zeeb 		ep = &ar->htc.endpoint[cb->eid];
1363da8fa4e3SBjoern A. Zeeb 		ep->ep_ops.ep_rx_complete(ar, skb);
1364da8fa4e3SBjoern A. Zeeb 	}
1365da8fa4e3SBjoern A. Zeeb 
1366da8fa4e3SBjoern A. Zeeb 	if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
1367da8fa4e3SBjoern A. Zeeb 		local_bh_disable();
1368da8fa4e3SBjoern A. Zeeb 		napi_schedule(&ar->napi);
1369da8fa4e3SBjoern A. Zeeb 		local_bh_enable();
1370da8fa4e3SBjoern A. Zeeb 	}
1371da8fa4e3SBjoern A. Zeeb }
1372da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_read_rtc_state(struct ath10k_sdio * ar_sdio,unsigned char * state)1373da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
1374da8fa4e3SBjoern A. Zeeb {
1375da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar = ar_sdio->ar;
1376da8fa4e3SBjoern A. Zeeb 	unsigned char rtc_state = 0;
1377da8fa4e3SBjoern A. Zeeb 	int ret = 0;
1378da8fa4e3SBjoern A. Zeeb 
1379da8fa4e3SBjoern A. Zeeb 	rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
1380da8fa4e3SBjoern A. Zeeb 	if (ret) {
1381da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
1382da8fa4e3SBjoern A. Zeeb 		return ret;
1383da8fa4e3SBjoern A. Zeeb 	}
1384da8fa4e3SBjoern A. Zeeb 
1385da8fa4e3SBjoern A. Zeeb 	*state = rtc_state & 0x3;
1386da8fa4e3SBjoern A. Zeeb 
1387da8fa4e3SBjoern A. Zeeb 	return ret;
1388da8fa4e3SBjoern A. Zeeb }
1389da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_set_mbox_sleep(struct ath10k * ar,bool enable_sleep)1390da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1391da8fa4e3SBjoern A. Zeeb {
1392da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1393da8fa4e3SBjoern A. Zeeb 	u32 val;
1394da8fa4e3SBjoern A. Zeeb 	int retry = ATH10K_CIS_READ_RETRY, ret = 0;
1395da8fa4e3SBjoern A. Zeeb 	unsigned char rtc_state = 0;
1396da8fa4e3SBjoern A. Zeeb 
1397da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(ar_sdio->func);
1398da8fa4e3SBjoern A. Zeeb 
1399da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1400da8fa4e3SBjoern A. Zeeb 	if (ret) {
1401da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1402da8fa4e3SBjoern A. Zeeb 			    ret);
1403da8fa4e3SBjoern A. Zeeb 		goto release;
1404da8fa4e3SBjoern A. Zeeb 	}
1405da8fa4e3SBjoern A. Zeeb 
1406da8fa4e3SBjoern A. Zeeb 	if (enable_sleep) {
1407da8fa4e3SBjoern A. Zeeb 		val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1408da8fa4e3SBjoern A. Zeeb 		ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
1409da8fa4e3SBjoern A. Zeeb 	} else {
1410da8fa4e3SBjoern A. Zeeb 		val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1411da8fa4e3SBjoern A. Zeeb 		ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
1412da8fa4e3SBjoern A. Zeeb 	}
1413da8fa4e3SBjoern A. Zeeb 
1414da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1415da8fa4e3SBjoern A. Zeeb 	if (ret) {
1416da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1417da8fa4e3SBjoern A. Zeeb 			    ret);
1418da8fa4e3SBjoern A. Zeeb 	}
1419da8fa4e3SBjoern A. Zeeb 
1420da8fa4e3SBjoern A. Zeeb 	if (!enable_sleep) {
1421da8fa4e3SBjoern A. Zeeb 		do {
1422da8fa4e3SBjoern A. Zeeb 			udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
1423da8fa4e3SBjoern A. Zeeb 			ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
1424da8fa4e3SBjoern A. Zeeb 
1425da8fa4e3SBjoern A. Zeeb 			if (ret) {
1426da8fa4e3SBjoern A. Zeeb 				ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
1427da8fa4e3SBjoern A. Zeeb 				break;
1428da8fa4e3SBjoern A. Zeeb 			}
1429da8fa4e3SBjoern A. Zeeb 
1430da8fa4e3SBjoern A. Zeeb 			ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
1431da8fa4e3SBjoern A. Zeeb 				   rtc_state);
1432da8fa4e3SBjoern A. Zeeb 
1433da8fa4e3SBjoern A. Zeeb 			if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
1434da8fa4e3SBjoern A. Zeeb 				break;
1435da8fa4e3SBjoern A. Zeeb 
1436da8fa4e3SBjoern A. Zeeb 			udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
1437da8fa4e3SBjoern A. Zeeb 			retry--;
1438da8fa4e3SBjoern A. Zeeb 		} while (retry > 0);
1439da8fa4e3SBjoern A. Zeeb 	}
1440da8fa4e3SBjoern A. Zeeb 
1441da8fa4e3SBjoern A. Zeeb release:
1442da8fa4e3SBjoern A. Zeeb 	sdio_release_host(ar_sdio->func);
1443da8fa4e3SBjoern A. Zeeb 
1444da8fa4e3SBjoern A. Zeeb 	return ret;
1445da8fa4e3SBjoern A. Zeeb }
1446da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_sleep_timer_handler(struct timer_list * t)1447da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
1448da8fa4e3SBjoern A. Zeeb {
1449da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
1450da8fa4e3SBjoern A. Zeeb 
1451da8fa4e3SBjoern A. Zeeb 	ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
1452da8fa4e3SBjoern A. Zeeb 	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1453da8fa4e3SBjoern A. Zeeb }
1454da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_write_async_work(struct work_struct * work)1455da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_write_async_work(struct work_struct *work)
1456da8fa4e3SBjoern A. Zeeb {
1457da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1458da8fa4e3SBjoern A. Zeeb 						   wr_async_work);
1459da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar = ar_sdio->ar;
1460da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_bus_request *req, *tmp_req;
1461da8fa4e3SBjoern A. Zeeb 	struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1462da8fa4e3SBjoern A. Zeeb 
1463da8fa4e3SBjoern A. Zeeb 	spin_lock_bh(&ar_sdio->wr_async_lock);
1464da8fa4e3SBjoern A. Zeeb 
1465da8fa4e3SBjoern A. Zeeb 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1466da8fa4e3SBjoern A. Zeeb 		list_del(&req->list);
1467da8fa4e3SBjoern A. Zeeb 		spin_unlock_bh(&ar_sdio->wr_async_lock);
1468da8fa4e3SBjoern A. Zeeb 
1469da8fa4e3SBjoern A. Zeeb 		if (req->address >= mbox_info->htc_addr &&
1470da8fa4e3SBjoern A. Zeeb 		    ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
1471da8fa4e3SBjoern A. Zeeb 			ath10k_sdio_set_mbox_sleep(ar, false);
1472da8fa4e3SBjoern A. Zeeb 			mod_timer(&ar_sdio->sleep_timer, jiffies +
1473da8fa4e3SBjoern A. Zeeb 				  msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
1474da8fa4e3SBjoern A. Zeeb 		}
1475da8fa4e3SBjoern A. Zeeb 
1476da8fa4e3SBjoern A. Zeeb 		__ath10k_sdio_write_async(ar, req);
1477da8fa4e3SBjoern A. Zeeb 		spin_lock_bh(&ar_sdio->wr_async_lock);
1478da8fa4e3SBjoern A. Zeeb 	}
1479da8fa4e3SBjoern A. Zeeb 
1480da8fa4e3SBjoern A. Zeeb 	spin_unlock_bh(&ar_sdio->wr_async_lock);
1481da8fa4e3SBjoern A. Zeeb 
1482da8fa4e3SBjoern A. Zeeb 	if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
1483da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_set_mbox_sleep(ar, true);
1484da8fa4e3SBjoern A. Zeeb }
1485da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_prep_async_req(struct ath10k * ar,u32 addr,struct sk_buff * skb,struct completion * comp,bool htc_msg,enum ath10k_htc_ep_id eid)1486da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1487da8fa4e3SBjoern A. Zeeb 				      struct sk_buff *skb,
1488da8fa4e3SBjoern A. Zeeb 				      struct completion *comp,
1489da8fa4e3SBjoern A. Zeeb 				      bool htc_msg, enum ath10k_htc_ep_id eid)
1490da8fa4e3SBjoern A. Zeeb {
1491da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1492da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_bus_request *bus_req;
1493da8fa4e3SBjoern A. Zeeb 
1494da8fa4e3SBjoern A. Zeeb 	/* Allocate a bus request for the message and queue it on the
1495da8fa4e3SBjoern A. Zeeb 	 * SDIO workqueue.
1496da8fa4e3SBjoern A. Zeeb 	 */
1497da8fa4e3SBjoern A. Zeeb 	bus_req = ath10k_sdio_alloc_busreq(ar);
1498da8fa4e3SBjoern A. Zeeb 	if (!bus_req) {
1499da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar,
1500da8fa4e3SBjoern A. Zeeb 			    "unable to allocate bus request for async request\n");
1501da8fa4e3SBjoern A. Zeeb 		return -ENOMEM;
1502da8fa4e3SBjoern A. Zeeb 	}
1503da8fa4e3SBjoern A. Zeeb 
1504da8fa4e3SBjoern A. Zeeb 	bus_req->skb = skb;
1505da8fa4e3SBjoern A. Zeeb 	bus_req->eid = eid;
1506da8fa4e3SBjoern A. Zeeb 	bus_req->address = addr;
1507da8fa4e3SBjoern A. Zeeb 	bus_req->htc_msg = htc_msg;
1508da8fa4e3SBjoern A. Zeeb 	bus_req->comp = comp;
1509da8fa4e3SBjoern A. Zeeb 
1510da8fa4e3SBjoern A. Zeeb 	spin_lock_bh(&ar_sdio->wr_async_lock);
1511da8fa4e3SBjoern A. Zeeb 	list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1512da8fa4e3SBjoern A. Zeeb 	spin_unlock_bh(&ar_sdio->wr_async_lock);
1513da8fa4e3SBjoern A. Zeeb 
1514da8fa4e3SBjoern A. Zeeb 	return 0;
1515da8fa4e3SBjoern A. Zeeb }
1516da8fa4e3SBjoern A. Zeeb 
1517da8fa4e3SBjoern A. Zeeb /* IRQ handler */
1518da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_irq_handler(struct sdio_func * func)1519da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_irq_handler(struct sdio_func *func)
1520da8fa4e3SBjoern A. Zeeb {
1521da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1522da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar = ar_sdio->ar;
1523da8fa4e3SBjoern A. Zeeb 	unsigned long timeout;
1524da8fa4e3SBjoern A. Zeeb 	bool done = false;
1525da8fa4e3SBjoern A. Zeeb 	int ret;
1526da8fa4e3SBjoern A. Zeeb 
1527da8fa4e3SBjoern A. Zeeb 	/* Release the host during interrupts so we can pick it back up when
1528da8fa4e3SBjoern A. Zeeb 	 * we process commands.
1529da8fa4e3SBjoern A. Zeeb 	 */
1530da8fa4e3SBjoern A. Zeeb 	sdio_release_host(ar_sdio->func);
1531da8fa4e3SBjoern A. Zeeb 
1532da8fa4e3SBjoern A. Zeeb 	timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1533da8fa4e3SBjoern A. Zeeb 	do {
1534da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1535da8fa4e3SBjoern A. Zeeb 		if (ret)
1536da8fa4e3SBjoern A. Zeeb 			break;
1537da8fa4e3SBjoern A. Zeeb 	} while (time_before(jiffies, timeout) && !done);
1538da8fa4e3SBjoern A. Zeeb 
1539da8fa4e3SBjoern A. Zeeb 	ath10k_mac_tx_push_pending(ar);
1540da8fa4e3SBjoern A. Zeeb 
1541da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(ar_sdio->func);
1542da8fa4e3SBjoern A. Zeeb 
1543da8fa4e3SBjoern A. Zeeb 	if (ret && ret != -ECANCELED)
1544da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1545da8fa4e3SBjoern A. Zeeb 			    ret);
1546da8fa4e3SBjoern A. Zeeb }
1547da8fa4e3SBjoern A. Zeeb 
1548da8fa4e3SBjoern A. Zeeb /* sdio HIF functions */
1549da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_disable_intrs(struct ath10k * ar)1550da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_disable_intrs(struct ath10k *ar)
1551da8fa4e3SBjoern A. Zeeb {
1552da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1553da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1554da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1555da8fa4e3SBjoern A. Zeeb 	int ret;
1556da8fa4e3SBjoern A. Zeeb 
1557da8fa4e3SBjoern A. Zeeb 	mutex_lock(&irq_data->mtx);
1558da8fa4e3SBjoern A. Zeeb 
1559da8fa4e3SBjoern A. Zeeb 	memset(regs, 0, sizeof(*regs));
1560da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1561da8fa4e3SBjoern A. Zeeb 				&regs->int_status_en, sizeof(*regs));
1562da8fa4e3SBjoern A. Zeeb 	if (ret)
1563da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1564da8fa4e3SBjoern A. Zeeb 
1565da8fa4e3SBjoern A. Zeeb 	mutex_unlock(&irq_data->mtx);
1566da8fa4e3SBjoern A. Zeeb 
1567da8fa4e3SBjoern A. Zeeb 	return ret;
1568da8fa4e3SBjoern A. Zeeb }
1569da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_power_up(struct ath10k * ar,enum ath10k_firmware_mode fw_mode)1570da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1571da8fa4e3SBjoern A. Zeeb 				    enum ath10k_firmware_mode fw_mode)
1572da8fa4e3SBjoern A. Zeeb {
1573da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1574da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = ar_sdio->func;
1575da8fa4e3SBjoern A. Zeeb 	int ret;
1576da8fa4e3SBjoern A. Zeeb 
1577da8fa4e3SBjoern A. Zeeb 	if (!ar_sdio->is_disabled)
1578da8fa4e3SBjoern A. Zeeb 		return 0;
1579da8fa4e3SBjoern A. Zeeb 
1580da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1581da8fa4e3SBjoern A. Zeeb 
1582da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_config(ar);
1583da8fa4e3SBjoern A. Zeeb 	if (ret) {
1584da8fa4e3SBjoern A. Zeeb 		ath10k_err(ar, "failed to config sdio: %d\n", ret);
1585da8fa4e3SBjoern A. Zeeb 		return ret;
1586da8fa4e3SBjoern A. Zeeb 	}
1587da8fa4e3SBjoern A. Zeeb 
1588da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(func);
1589da8fa4e3SBjoern A. Zeeb 
1590da8fa4e3SBjoern A. Zeeb 	ret = sdio_enable_func(func);
1591da8fa4e3SBjoern A. Zeeb 	if (ret) {
1592da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1593da8fa4e3SBjoern A. Zeeb 		sdio_release_host(func);
1594da8fa4e3SBjoern A. Zeeb 		return ret;
1595da8fa4e3SBjoern A. Zeeb 	}
1596da8fa4e3SBjoern A. Zeeb 
1597da8fa4e3SBjoern A. Zeeb 	sdio_release_host(func);
1598da8fa4e3SBjoern A. Zeeb 
1599da8fa4e3SBjoern A. Zeeb 	/* Wait for hardware to initialise. It should take a lot less than
1600da8fa4e3SBjoern A. Zeeb 	 * 20 ms but let's be conservative here.
1601da8fa4e3SBjoern A. Zeeb 	 */
1602da8fa4e3SBjoern A. Zeeb 	msleep(20);
1603da8fa4e3SBjoern A. Zeeb 
1604da8fa4e3SBjoern A. Zeeb 	ar_sdio->is_disabled = false;
1605da8fa4e3SBjoern A. Zeeb 
1606da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_disable_intrs(ar);
1607da8fa4e3SBjoern A. Zeeb 	if (ret)
1608da8fa4e3SBjoern A. Zeeb 		return ret;
1609da8fa4e3SBjoern A. Zeeb 
1610da8fa4e3SBjoern A. Zeeb 	return 0;
1611da8fa4e3SBjoern A. Zeeb }
1612da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_power_down(struct ath10k * ar)1613da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1614da8fa4e3SBjoern A. Zeeb {
1615da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1616da8fa4e3SBjoern A. Zeeb 	int ret;
1617da8fa4e3SBjoern A. Zeeb 
1618da8fa4e3SBjoern A. Zeeb 	if (ar_sdio->is_disabled)
1619da8fa4e3SBjoern A. Zeeb 		return;
1620da8fa4e3SBjoern A. Zeeb 
1621da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1622da8fa4e3SBjoern A. Zeeb 
1623da8fa4e3SBjoern A. Zeeb 	del_timer_sync(&ar_sdio->sleep_timer);
1624da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_set_mbox_sleep(ar, true);
1625da8fa4e3SBjoern A. Zeeb 
1626da8fa4e3SBjoern A. Zeeb 	/* Disable the card */
1627da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(ar_sdio->func);
1628da8fa4e3SBjoern A. Zeeb 
1629da8fa4e3SBjoern A. Zeeb 	ret = sdio_disable_func(ar_sdio->func);
1630da8fa4e3SBjoern A. Zeeb 	if (ret) {
1631da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1632da8fa4e3SBjoern A. Zeeb 		sdio_release_host(ar_sdio->func);
1633da8fa4e3SBjoern A. Zeeb 		return;
1634da8fa4e3SBjoern A. Zeeb 	}
1635da8fa4e3SBjoern A. Zeeb 
163607724ba6SBjoern A. Zeeb 	ret = mmc_hw_reset(ar_sdio->func->card);
1637da8fa4e3SBjoern A. Zeeb 	if (ret)
1638da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1639da8fa4e3SBjoern A. Zeeb 
1640da8fa4e3SBjoern A. Zeeb 	sdio_release_host(ar_sdio->func);
1641da8fa4e3SBjoern A. Zeeb 
1642da8fa4e3SBjoern A. Zeeb 	ar_sdio->is_disabled = true;
1643da8fa4e3SBjoern A. Zeeb }
1644da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_tx_sg(struct ath10k * ar,u8 pipe_id,struct ath10k_hif_sg_item * items,int n_items)1645da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1646da8fa4e3SBjoern A. Zeeb 				 struct ath10k_hif_sg_item *items, int n_items)
1647da8fa4e3SBjoern A. Zeeb {
1648da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1649da8fa4e3SBjoern A. Zeeb 	enum ath10k_htc_ep_id eid;
1650da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb;
1651da8fa4e3SBjoern A. Zeeb 	int ret, i;
1652da8fa4e3SBjoern A. Zeeb 
1653da8fa4e3SBjoern A. Zeeb 	eid = pipe_id_to_eid(pipe_id);
1654da8fa4e3SBjoern A. Zeeb 
1655da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < n_items; i++) {
1656da8fa4e3SBjoern A. Zeeb 		size_t padded_len;
1657da8fa4e3SBjoern A. Zeeb 		u32 address;
1658da8fa4e3SBjoern A. Zeeb 
1659da8fa4e3SBjoern A. Zeeb 		skb = items[i].transfer_context;
1660da8fa4e3SBjoern A. Zeeb 		padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1661da8fa4e3SBjoern A. Zeeb 							      skb->len);
1662da8fa4e3SBjoern A. Zeeb 		skb_trim(skb, padded_len);
1663da8fa4e3SBjoern A. Zeeb 
1664da8fa4e3SBjoern A. Zeeb 		/* Write TX data to the end of the mbox address space */
1665da8fa4e3SBjoern A. Zeeb 		address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1666da8fa4e3SBjoern A. Zeeb 			  skb->len;
1667da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_prep_async_req(ar, address, skb,
1668da8fa4e3SBjoern A. Zeeb 						 NULL, true, eid);
1669da8fa4e3SBjoern A. Zeeb 		if (ret)
1670da8fa4e3SBjoern A. Zeeb 			return ret;
1671da8fa4e3SBjoern A. Zeeb 	}
1672da8fa4e3SBjoern A. Zeeb 
1673da8fa4e3SBjoern A. Zeeb 	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1674da8fa4e3SBjoern A. Zeeb 
1675da8fa4e3SBjoern A. Zeeb 	return 0;
1676da8fa4e3SBjoern A. Zeeb }
1677da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_enable_intrs(struct ath10k * ar)1678da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_enable_intrs(struct ath10k *ar)
1679da8fa4e3SBjoern A. Zeeb {
1680da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1681da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1682da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1683da8fa4e3SBjoern A. Zeeb 	int ret;
1684da8fa4e3SBjoern A. Zeeb 
1685da8fa4e3SBjoern A. Zeeb 	mutex_lock(&irq_data->mtx);
1686da8fa4e3SBjoern A. Zeeb 
1687da8fa4e3SBjoern A. Zeeb 	/* Enable all but CPU interrupts */
1688da8fa4e3SBjoern A. Zeeb 	regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1689da8fa4e3SBjoern A. Zeeb 			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1690da8fa4e3SBjoern A. Zeeb 			      FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1691da8fa4e3SBjoern A. Zeeb 
1692da8fa4e3SBjoern A. Zeeb 	/* NOTE: There are some cases where HIF can do detection of
1693da8fa4e3SBjoern A. Zeeb 	 * pending mbox messages which is disabled now.
1694da8fa4e3SBjoern A. Zeeb 	 */
1695da8fa4e3SBjoern A. Zeeb 	regs->int_status_en |=
1696da8fa4e3SBjoern A. Zeeb 		FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1697da8fa4e3SBjoern A. Zeeb 
1698da8fa4e3SBjoern A. Zeeb 	/* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
1699da8fa4e3SBjoern A. Zeeb 	 * #0 is used for report assertion from target
1700da8fa4e3SBjoern A. Zeeb 	 */
1701da8fa4e3SBjoern A. Zeeb 	regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
1702da8fa4e3SBjoern A. Zeeb 
1703da8fa4e3SBjoern A. Zeeb 	/* Set up the Error Interrupt status Register */
1704da8fa4e3SBjoern A. Zeeb 	regs->err_int_status_en =
1705da8fa4e3SBjoern A. Zeeb 		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1706da8fa4e3SBjoern A. Zeeb 		FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1707da8fa4e3SBjoern A. Zeeb 
1708da8fa4e3SBjoern A. Zeeb 	/* Enable Counter interrupt status register to get fatal errors for
1709da8fa4e3SBjoern A. Zeeb 	 * debugging.
1710da8fa4e3SBjoern A. Zeeb 	 */
1711da8fa4e3SBjoern A. Zeeb 	regs->cntr_int_status_en =
1712da8fa4e3SBjoern A. Zeeb 		FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1713da8fa4e3SBjoern A. Zeeb 			   ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1714da8fa4e3SBjoern A. Zeeb 
1715da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1716da8fa4e3SBjoern A. Zeeb 				&regs->int_status_en, sizeof(*regs));
1717da8fa4e3SBjoern A. Zeeb 	if (ret)
1718da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar,
1719da8fa4e3SBjoern A. Zeeb 			    "failed to update mbox interrupt status register : %d\n",
1720da8fa4e3SBjoern A. Zeeb 			    ret);
1721da8fa4e3SBjoern A. Zeeb 
1722da8fa4e3SBjoern A. Zeeb 	mutex_unlock(&irq_data->mtx);
1723da8fa4e3SBjoern A. Zeeb 	return ret;
1724da8fa4e3SBjoern A. Zeeb }
1725da8fa4e3SBjoern A. Zeeb 
1726da8fa4e3SBjoern A. Zeeb /* HIF diagnostics */
1727da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_diag_read(struct ath10k * ar,u32 address,void * buf,size_t buf_len)1728da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1729da8fa4e3SBjoern A. Zeeb 				     size_t buf_len)
1730da8fa4e3SBjoern A. Zeeb {
1731da8fa4e3SBjoern A. Zeeb 	int ret;
1732da8fa4e3SBjoern A. Zeeb 	void *mem;
1733da8fa4e3SBjoern A. Zeeb 
1734da8fa4e3SBjoern A. Zeeb 	mem = kzalloc(buf_len, GFP_KERNEL);
1735da8fa4e3SBjoern A. Zeeb 	if (!mem)
1736da8fa4e3SBjoern A. Zeeb 		return -ENOMEM;
1737da8fa4e3SBjoern A. Zeeb 
1738da8fa4e3SBjoern A. Zeeb 	/* set window register to start read cycle */
1739da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1740da8fa4e3SBjoern A. Zeeb 	if (ret) {
1741da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1742da8fa4e3SBjoern A. Zeeb 		goto out;
1743da8fa4e3SBjoern A. Zeeb 	}
1744da8fa4e3SBjoern A. Zeeb 
1745da8fa4e3SBjoern A. Zeeb 	/* read the data */
1746da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
1747da8fa4e3SBjoern A. Zeeb 	if (ret) {
1748da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1749da8fa4e3SBjoern A. Zeeb 			    ret);
1750da8fa4e3SBjoern A. Zeeb 		goto out;
1751da8fa4e3SBjoern A. Zeeb 	}
1752da8fa4e3SBjoern A. Zeeb 
1753da8fa4e3SBjoern A. Zeeb 	memcpy(buf, mem, buf_len);
1754da8fa4e3SBjoern A. Zeeb 
1755da8fa4e3SBjoern A. Zeeb out:
1756da8fa4e3SBjoern A. Zeeb 	kfree(mem);
1757da8fa4e3SBjoern A. Zeeb 
1758da8fa4e3SBjoern A. Zeeb 	return ret;
1759da8fa4e3SBjoern A. Zeeb }
1760da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_diag_read32(struct ath10k * ar,u32 address,u32 * value)1761da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
1762da8fa4e3SBjoern A. Zeeb 				   u32 *value)
1763da8fa4e3SBjoern A. Zeeb {
1764da8fa4e3SBjoern A. Zeeb 	__le32 *val;
1765da8fa4e3SBjoern A. Zeeb 	int ret;
1766da8fa4e3SBjoern A. Zeeb 
1767da8fa4e3SBjoern A. Zeeb 	val = kzalloc(sizeof(*val), GFP_KERNEL);
1768da8fa4e3SBjoern A. Zeeb 	if (!val)
1769da8fa4e3SBjoern A. Zeeb 		return -ENOMEM;
1770da8fa4e3SBjoern A. Zeeb 
1771da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1772da8fa4e3SBjoern A. Zeeb 	if (ret)
1773da8fa4e3SBjoern A. Zeeb 		goto out;
1774da8fa4e3SBjoern A. Zeeb 
1775da8fa4e3SBjoern A. Zeeb 	*value = __le32_to_cpu(*val);
1776da8fa4e3SBjoern A. Zeeb 
1777da8fa4e3SBjoern A. Zeeb out:
1778da8fa4e3SBjoern A. Zeeb 	kfree(val);
1779da8fa4e3SBjoern A. Zeeb 
1780da8fa4e3SBjoern A. Zeeb 	return ret;
1781da8fa4e3SBjoern A. Zeeb }
1782da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_diag_write_mem(struct ath10k * ar,u32 address,const void * data,int nbytes)1783da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1784da8fa4e3SBjoern A. Zeeb 					  const void *data, int nbytes)
1785da8fa4e3SBjoern A. Zeeb {
1786da8fa4e3SBjoern A. Zeeb 	int ret;
1787da8fa4e3SBjoern A. Zeeb 
1788da8fa4e3SBjoern A. Zeeb 	/* set write data */
1789da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1790da8fa4e3SBjoern A. Zeeb 	if (ret) {
1791da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar,
1792da8fa4e3SBjoern A. Zeeb 			    "failed to write 0x%p to mbox window data address: %d\n",
1793da8fa4e3SBjoern A. Zeeb 			    data, ret);
1794da8fa4e3SBjoern A. Zeeb 		return ret;
1795da8fa4e3SBjoern A. Zeeb 	}
1796da8fa4e3SBjoern A. Zeeb 
1797da8fa4e3SBjoern A. Zeeb 	/* set window register, which starts the write cycle */
1798da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1799da8fa4e3SBjoern A. Zeeb 	if (ret) {
1800da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1801da8fa4e3SBjoern A. Zeeb 		return ret;
1802da8fa4e3SBjoern A. Zeeb 	}
1803da8fa4e3SBjoern A. Zeeb 
1804da8fa4e3SBjoern A. Zeeb 	return 0;
1805da8fa4e3SBjoern A. Zeeb }
1806da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_start_post(struct ath10k * ar)1807da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_start_post(struct ath10k *ar)
1808da8fa4e3SBjoern A. Zeeb {
1809da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1810da8fa4e3SBjoern A. Zeeb 	u32 addr, val;
1811da8fa4e3SBjoern A. Zeeb 	int ret = 0;
1812da8fa4e3SBjoern A. Zeeb 
1813da8fa4e3SBjoern A. Zeeb 	addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1814da8fa4e3SBjoern A. Zeeb 
1815da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_diag_read32(ar, addr, &val);
1816da8fa4e3SBjoern A. Zeeb 	if (ret) {
1817da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1818da8fa4e3SBjoern A. Zeeb 		return ret;
1819da8fa4e3SBjoern A. Zeeb 	}
1820da8fa4e3SBjoern A. Zeeb 
1821da8fa4e3SBjoern A. Zeeb 	if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1822da8fa4e3SBjoern A. Zeeb 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1823da8fa4e3SBjoern A. Zeeb 			   "sdio mailbox swap service enabled\n");
1824da8fa4e3SBjoern A. Zeeb 		ar_sdio->swap_mbox = true;
1825da8fa4e3SBjoern A. Zeeb 	} else {
1826da8fa4e3SBjoern A. Zeeb 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
1827da8fa4e3SBjoern A. Zeeb 			   "sdio mailbox swap service disabled\n");
1828da8fa4e3SBjoern A. Zeeb 		ar_sdio->swap_mbox = false;
1829da8fa4e3SBjoern A. Zeeb 	}
1830da8fa4e3SBjoern A. Zeeb 
1831da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_set_mbox_sleep(ar, true);
1832da8fa4e3SBjoern A. Zeeb 
1833da8fa4e3SBjoern A. Zeeb 	return 0;
1834da8fa4e3SBjoern A. Zeeb }
1835da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_get_htt_tx_complete(struct ath10k * ar)1836da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
1837da8fa4e3SBjoern A. Zeeb {
1838da8fa4e3SBjoern A. Zeeb 	u32 addr, val;
1839da8fa4e3SBjoern A. Zeeb 	int ret;
1840da8fa4e3SBjoern A. Zeeb 
1841da8fa4e3SBjoern A. Zeeb 	addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1842da8fa4e3SBjoern A. Zeeb 
1843da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_diag_read32(ar, addr, &val);
1844da8fa4e3SBjoern A. Zeeb 	if (ret) {
1845da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar,
1846da8fa4e3SBjoern A. Zeeb 			    "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
1847da8fa4e3SBjoern A. Zeeb 		return ret;
1848da8fa4e3SBjoern A. Zeeb 	}
1849da8fa4e3SBjoern A. Zeeb 
1850da8fa4e3SBjoern A. Zeeb 	ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
1851da8fa4e3SBjoern A. Zeeb 
1852da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
1853da8fa4e3SBjoern A. Zeeb 		   ret ? " " : " not ");
1854da8fa4e3SBjoern A. Zeeb 
1855da8fa4e3SBjoern A. Zeeb 	return ret;
1856da8fa4e3SBjoern A. Zeeb }
1857da8fa4e3SBjoern A. Zeeb 
1858da8fa4e3SBjoern A. Zeeb /* HIF start/stop */
1859da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_start(struct ath10k * ar)1860da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_start(struct ath10k *ar)
1861da8fa4e3SBjoern A. Zeeb {
1862da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1863da8fa4e3SBjoern A. Zeeb 	int ret;
1864da8fa4e3SBjoern A. Zeeb 
1865da8fa4e3SBjoern A. Zeeb 	ath10k_core_napi_enable(ar);
1866da8fa4e3SBjoern A. Zeeb 
1867da8fa4e3SBjoern A. Zeeb 	/* Sleep 20 ms before HIF interrupts are disabled.
1868da8fa4e3SBjoern A. Zeeb 	 * This will give target plenty of time to process the BMI done
1869da8fa4e3SBjoern A. Zeeb 	 * request before interrupts are disabled.
1870da8fa4e3SBjoern A. Zeeb 	 */
1871da8fa4e3SBjoern A. Zeeb 	msleep(20);
1872da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_disable_intrs(ar);
1873da8fa4e3SBjoern A. Zeeb 	if (ret)
1874da8fa4e3SBjoern A. Zeeb 		return ret;
1875da8fa4e3SBjoern A. Zeeb 
1876da8fa4e3SBjoern A. Zeeb 	/* eid 0 always uses the lower part of the extended mailbox address
1877da8fa4e3SBjoern A. Zeeb 	 * space (ext_info[0].htc_ext_addr).
1878da8fa4e3SBjoern A. Zeeb 	 */
1879da8fa4e3SBjoern A. Zeeb 	ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1880da8fa4e3SBjoern A. Zeeb 	ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1881da8fa4e3SBjoern A. Zeeb 
1882da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(ar_sdio->func);
1883da8fa4e3SBjoern A. Zeeb 
1884da8fa4e3SBjoern A. Zeeb 	/* Register the isr */
1885da8fa4e3SBjoern A. Zeeb 	ret =  sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1886da8fa4e3SBjoern A. Zeeb 	if (ret) {
1887da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1888da8fa4e3SBjoern A. Zeeb 		sdio_release_host(ar_sdio->func);
1889da8fa4e3SBjoern A. Zeeb 		return ret;
1890da8fa4e3SBjoern A. Zeeb 	}
1891da8fa4e3SBjoern A. Zeeb 
1892da8fa4e3SBjoern A. Zeeb 	sdio_release_host(ar_sdio->func);
1893da8fa4e3SBjoern A. Zeeb 
1894da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_enable_intrs(ar);
1895da8fa4e3SBjoern A. Zeeb 	if (ret)
1896da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1897da8fa4e3SBjoern A. Zeeb 
1898da8fa4e3SBjoern A. Zeeb 	/* Enable sleep and then disable it again */
1899da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_set_mbox_sleep(ar, true);
1900da8fa4e3SBjoern A. Zeeb 	if (ret)
1901da8fa4e3SBjoern A. Zeeb 		return ret;
1902da8fa4e3SBjoern A. Zeeb 
1903da8fa4e3SBjoern A. Zeeb 	/* Wait for 20ms for the written value to take effect */
1904da8fa4e3SBjoern A. Zeeb 	msleep(20);
1905da8fa4e3SBjoern A. Zeeb 
1906da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_set_mbox_sleep(ar, false);
1907da8fa4e3SBjoern A. Zeeb 	if (ret)
1908da8fa4e3SBjoern A. Zeeb 		return ret;
1909da8fa4e3SBjoern A. Zeeb 
1910da8fa4e3SBjoern A. Zeeb 	return 0;
1911da8fa4e3SBjoern A. Zeeb }
1912da8fa4e3SBjoern A. Zeeb 
1913da8fa4e3SBjoern A. Zeeb #define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1914da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_irq_disable(struct ath10k * ar)1915da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_irq_disable(struct ath10k *ar)
1916da8fa4e3SBjoern A. Zeeb {
1917da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1918da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1919da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1920da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb;
1921da8fa4e3SBjoern A. Zeeb 	struct completion irqs_disabled_comp;
1922da8fa4e3SBjoern A. Zeeb 	int ret;
1923da8fa4e3SBjoern A. Zeeb 
1924da8fa4e3SBjoern A. Zeeb 	skb = dev_alloc_skb(sizeof(*regs));
1925da8fa4e3SBjoern A. Zeeb 	if (!skb)
1926da8fa4e3SBjoern A. Zeeb 		return;
1927da8fa4e3SBjoern A. Zeeb 
1928da8fa4e3SBjoern A. Zeeb 	mutex_lock(&irq_data->mtx);
1929da8fa4e3SBjoern A. Zeeb 
1930da8fa4e3SBjoern A. Zeeb 	memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
1931da8fa4e3SBjoern A. Zeeb 	memcpy(skb->data, regs, sizeof(*regs));
1932da8fa4e3SBjoern A. Zeeb 	skb_put(skb, sizeof(*regs));
1933da8fa4e3SBjoern A. Zeeb 
1934da8fa4e3SBjoern A. Zeeb 	mutex_unlock(&irq_data->mtx);
1935da8fa4e3SBjoern A. Zeeb 
1936da8fa4e3SBjoern A. Zeeb 	init_completion(&irqs_disabled_comp);
1937da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1938da8fa4e3SBjoern A. Zeeb 					 skb, &irqs_disabled_comp, false, 0);
1939da8fa4e3SBjoern A. Zeeb 	if (ret)
1940da8fa4e3SBjoern A. Zeeb 		goto out;
1941da8fa4e3SBjoern A. Zeeb 
1942da8fa4e3SBjoern A. Zeeb 	queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1943da8fa4e3SBjoern A. Zeeb 
1944da8fa4e3SBjoern A. Zeeb 	/* Wait for the completion of the IRQ disable request.
1945da8fa4e3SBjoern A. Zeeb 	 * If there is a timeout we will try to disable irq's anyway.
1946da8fa4e3SBjoern A. Zeeb 	 */
1947da8fa4e3SBjoern A. Zeeb 	ret = wait_for_completion_timeout(&irqs_disabled_comp,
1948da8fa4e3SBjoern A. Zeeb 					  SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1949da8fa4e3SBjoern A. Zeeb 	if (!ret)
1950da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "sdio irq disable request timed out\n");
1951da8fa4e3SBjoern A. Zeeb 
1952da8fa4e3SBjoern A. Zeeb 	sdio_claim_host(ar_sdio->func);
1953da8fa4e3SBjoern A. Zeeb 
1954da8fa4e3SBjoern A. Zeeb 	ret = sdio_release_irq(ar_sdio->func);
1955da8fa4e3SBjoern A. Zeeb 	if (ret)
1956da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1957da8fa4e3SBjoern A. Zeeb 
1958da8fa4e3SBjoern A. Zeeb 	sdio_release_host(ar_sdio->func);
1959da8fa4e3SBjoern A. Zeeb 
1960da8fa4e3SBjoern A. Zeeb out:
1961da8fa4e3SBjoern A. Zeeb 	kfree_skb(skb);
1962da8fa4e3SBjoern A. Zeeb }
1963da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_stop(struct ath10k * ar)1964da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_hif_stop(struct ath10k *ar)
1965da8fa4e3SBjoern A. Zeeb {
1966da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio_bus_request *req, *tmp_req;
1967da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1968da8fa4e3SBjoern A. Zeeb 	struct sk_buff *skb;
1969da8fa4e3SBjoern A. Zeeb 
1970da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_irq_disable(ar);
1971da8fa4e3SBjoern A. Zeeb 
1972da8fa4e3SBjoern A. Zeeb 	cancel_work_sync(&ar_sdio->async_work_rx);
1973da8fa4e3SBjoern A. Zeeb 
1974da8fa4e3SBjoern A. Zeeb 	while ((skb = skb_dequeue(&ar_sdio->rx_head)))
1975da8fa4e3SBjoern A. Zeeb 		dev_kfree_skb_any(skb);
1976da8fa4e3SBjoern A. Zeeb 
1977da8fa4e3SBjoern A. Zeeb 	cancel_work_sync(&ar_sdio->wr_async_work);
1978da8fa4e3SBjoern A. Zeeb 
1979da8fa4e3SBjoern A. Zeeb 	spin_lock_bh(&ar_sdio->wr_async_lock);
1980da8fa4e3SBjoern A. Zeeb 
1981da8fa4e3SBjoern A. Zeeb 	/* Free all bus requests that have not been handled */
1982da8fa4e3SBjoern A. Zeeb 	list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1983da8fa4e3SBjoern A. Zeeb 		struct ath10k_htc_ep *ep;
1984da8fa4e3SBjoern A. Zeeb 
1985da8fa4e3SBjoern A. Zeeb 		list_del(&req->list);
1986da8fa4e3SBjoern A. Zeeb 
1987da8fa4e3SBjoern A. Zeeb 		if (req->htc_msg) {
1988da8fa4e3SBjoern A. Zeeb 			ep = &ar->htc.endpoint[req->eid];
1989da8fa4e3SBjoern A. Zeeb 			ath10k_htc_notify_tx_completion(ep, req->skb);
1990da8fa4e3SBjoern A. Zeeb 		} else if (req->skb) {
1991da8fa4e3SBjoern A. Zeeb 			kfree_skb(req->skb);
1992da8fa4e3SBjoern A. Zeeb 		}
1993da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_free_bus_req(ar, req);
1994da8fa4e3SBjoern A. Zeeb 	}
1995da8fa4e3SBjoern A. Zeeb 
1996da8fa4e3SBjoern A. Zeeb 	spin_unlock_bh(&ar_sdio->wr_async_lock);
1997da8fa4e3SBjoern A. Zeeb 
1998da8fa4e3SBjoern A. Zeeb 	ath10k_core_napi_sync_disable(ar);
1999da8fa4e3SBjoern A. Zeeb }
2000da8fa4e3SBjoern A. Zeeb 
2001da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_PM
2002da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_suspend(struct ath10k * ar)2003da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_suspend(struct ath10k *ar)
2004da8fa4e3SBjoern A. Zeeb {
2005da8fa4e3SBjoern A. Zeeb 	return 0;
2006da8fa4e3SBjoern A. Zeeb }
2007da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_resume(struct ath10k * ar)2008da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_resume(struct ath10k *ar)
2009da8fa4e3SBjoern A. Zeeb {
2010da8fa4e3SBjoern A. Zeeb 	switch (ar->state) {
2011da8fa4e3SBjoern A. Zeeb 	case ATH10K_STATE_OFF:
2012da8fa4e3SBjoern A. Zeeb 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
2013da8fa4e3SBjoern A. Zeeb 			   "sdio resume configuring sdio\n");
2014da8fa4e3SBjoern A. Zeeb 
2015da8fa4e3SBjoern A. Zeeb 		/* need to set sdio settings after power is cut from sdio */
2016da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_config(ar);
2017da8fa4e3SBjoern A. Zeeb 		break;
2018da8fa4e3SBjoern A. Zeeb 
2019da8fa4e3SBjoern A. Zeeb 	case ATH10K_STATE_ON:
2020da8fa4e3SBjoern A. Zeeb 	default:
2021da8fa4e3SBjoern A. Zeeb 		break;
2022da8fa4e3SBjoern A. Zeeb 	}
2023da8fa4e3SBjoern A. Zeeb 
2024da8fa4e3SBjoern A. Zeeb 	return 0;
2025da8fa4e3SBjoern A. Zeeb }
2026da8fa4e3SBjoern A. Zeeb #endif
2027da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_map_service_to_pipe(struct ath10k * ar,u16 service_id,u8 * ul_pipe,u8 * dl_pipe)2028da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
2029da8fa4e3SBjoern A. Zeeb 					       u16 service_id,
2030da8fa4e3SBjoern A. Zeeb 					       u8 *ul_pipe, u8 *dl_pipe)
2031da8fa4e3SBjoern A. Zeeb {
2032da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
2033da8fa4e3SBjoern A. Zeeb 	struct ath10k_htc *htc = &ar->htc;
2034da8fa4e3SBjoern A. Zeeb 	u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
2035da8fa4e3SBjoern A. Zeeb 	enum ath10k_htc_ep_id eid;
2036da8fa4e3SBjoern A. Zeeb 	bool ep_found = false;
2037da8fa4e3SBjoern A. Zeeb 	int i;
2038da8fa4e3SBjoern A. Zeeb 
2039da8fa4e3SBjoern A. Zeeb 	/* For sdio, we are interested in the mapping between eid
2040da8fa4e3SBjoern A. Zeeb 	 * and pipeid rather than service_id to pipe_id.
2041da8fa4e3SBjoern A. Zeeb 	 * First we find out which eid has been allocated to the
2042da8fa4e3SBjoern A. Zeeb 	 * service...
2043da8fa4e3SBjoern A. Zeeb 	 */
2044da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
2045da8fa4e3SBjoern A. Zeeb 		if (htc->endpoint[i].service_id == service_id) {
2046da8fa4e3SBjoern A. Zeeb 			eid = htc->endpoint[i].eid;
2047da8fa4e3SBjoern A. Zeeb 			ep_found = true;
2048da8fa4e3SBjoern A. Zeeb 			break;
2049da8fa4e3SBjoern A. Zeeb 		}
2050da8fa4e3SBjoern A. Zeeb 	}
2051da8fa4e3SBjoern A. Zeeb 
2052da8fa4e3SBjoern A. Zeeb 	if (!ep_found)
2053da8fa4e3SBjoern A. Zeeb 		return -EINVAL;
2054da8fa4e3SBjoern A. Zeeb 
2055da8fa4e3SBjoern A. Zeeb 	/* Then we create the simplest mapping possible between pipeid
2056da8fa4e3SBjoern A. Zeeb 	 * and eid
2057da8fa4e3SBjoern A. Zeeb 	 */
2058da8fa4e3SBjoern A. Zeeb 	*ul_pipe = *dl_pipe = (u8)eid;
2059da8fa4e3SBjoern A. Zeeb 
2060da8fa4e3SBjoern A. Zeeb 	/* Normally, HTT will use the upper part of the extended
2061da8fa4e3SBjoern A. Zeeb 	 * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
2062da8fa4e3SBjoern A. Zeeb 	 * the lower part (ext_info[0].htc_ext_addr).
2063da8fa4e3SBjoern A. Zeeb 	 * If fw wants swapping of mailbox addresses, the opposite is true.
2064da8fa4e3SBjoern A. Zeeb 	 */
2065da8fa4e3SBjoern A. Zeeb 	if (ar_sdio->swap_mbox) {
2066da8fa4e3SBjoern A. Zeeb 		htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2067da8fa4e3SBjoern A. Zeeb 		wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2068da8fa4e3SBjoern A. Zeeb 		htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2069da8fa4e3SBjoern A. Zeeb 		wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2070da8fa4e3SBjoern A. Zeeb 	} else {
2071da8fa4e3SBjoern A. Zeeb 		htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2072da8fa4e3SBjoern A. Zeeb 		wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2073da8fa4e3SBjoern A. Zeeb 		htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2074da8fa4e3SBjoern A. Zeeb 		wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2075da8fa4e3SBjoern A. Zeeb 	}
2076da8fa4e3SBjoern A. Zeeb 
2077da8fa4e3SBjoern A. Zeeb 	switch (service_id) {
2078da8fa4e3SBjoern A. Zeeb 	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
2079da8fa4e3SBjoern A. Zeeb 		/* HTC ctrl ep mbox address has already been setup in
2080da8fa4e3SBjoern A. Zeeb 		 * ath10k_sdio_hif_start
2081da8fa4e3SBjoern A. Zeeb 		 */
2082da8fa4e3SBjoern A. Zeeb 		break;
2083da8fa4e3SBjoern A. Zeeb 	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
2084da8fa4e3SBjoern A. Zeeb 		ar_sdio->mbox_addr[eid] = wmi_addr;
2085da8fa4e3SBjoern A. Zeeb 		ar_sdio->mbox_size[eid] = wmi_mbox_size;
2086da8fa4e3SBjoern A. Zeeb 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
2087da8fa4e3SBjoern A. Zeeb 			   "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
2088da8fa4e3SBjoern A. Zeeb 			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2089da8fa4e3SBjoern A. Zeeb 		break;
2090da8fa4e3SBjoern A. Zeeb 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
2091da8fa4e3SBjoern A. Zeeb 		ar_sdio->mbox_addr[eid] = htt_addr;
2092da8fa4e3SBjoern A. Zeeb 		ar_sdio->mbox_size[eid] = htt_mbox_size;
2093da8fa4e3SBjoern A. Zeeb 		ath10k_dbg(ar, ATH10K_DBG_SDIO,
2094da8fa4e3SBjoern A. Zeeb 			   "sdio htt data mbox_addr 0x%x mbox_size %d\n",
2095da8fa4e3SBjoern A. Zeeb 			   ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2096da8fa4e3SBjoern A. Zeeb 		break;
2097da8fa4e3SBjoern A. Zeeb 	default:
2098da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unsupported HTC service id: %d\n",
2099da8fa4e3SBjoern A. Zeeb 			    service_id);
2100da8fa4e3SBjoern A. Zeeb 		return -EINVAL;
2101da8fa4e3SBjoern A. Zeeb 	}
2102da8fa4e3SBjoern A. Zeeb 
2103da8fa4e3SBjoern A. Zeeb 	return 0;
2104da8fa4e3SBjoern A. Zeeb }
2105da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_hif_get_default_pipe(struct ath10k * ar,u8 * ul_pipe,u8 * dl_pipe)2106da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
2107da8fa4e3SBjoern A. Zeeb 					     u8 *ul_pipe, u8 *dl_pipe)
2108da8fa4e3SBjoern A. Zeeb {
2109da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
2110da8fa4e3SBjoern A. Zeeb 
2111da8fa4e3SBjoern A. Zeeb 	/* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
2112da8fa4e3SBjoern A. Zeeb 	 * case) == 0
2113da8fa4e3SBjoern A. Zeeb 	 */
2114da8fa4e3SBjoern A. Zeeb 	*ul_pipe = 0;
2115da8fa4e3SBjoern A. Zeeb 	*dl_pipe = 0;
2116da8fa4e3SBjoern A. Zeeb }
2117da8fa4e3SBjoern A. Zeeb 
2118da8fa4e3SBjoern A. Zeeb static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
2119da8fa4e3SBjoern A. Zeeb 	.tx_sg			= ath10k_sdio_hif_tx_sg,
2120da8fa4e3SBjoern A. Zeeb 	.diag_read		= ath10k_sdio_hif_diag_read,
2121da8fa4e3SBjoern A. Zeeb 	.diag_write		= ath10k_sdio_hif_diag_write_mem,
2122da8fa4e3SBjoern A. Zeeb 	.exchange_bmi_msg	= ath10k_sdio_bmi_exchange_msg,
2123da8fa4e3SBjoern A. Zeeb 	.start			= ath10k_sdio_hif_start,
2124da8fa4e3SBjoern A. Zeeb 	.stop			= ath10k_sdio_hif_stop,
2125da8fa4e3SBjoern A. Zeeb 	.start_post		= ath10k_sdio_hif_start_post,
2126da8fa4e3SBjoern A. Zeeb 	.get_htt_tx_complete	= ath10k_sdio_get_htt_tx_complete,
2127da8fa4e3SBjoern A. Zeeb 	.map_service_to_pipe	= ath10k_sdio_hif_map_service_to_pipe,
2128da8fa4e3SBjoern A. Zeeb 	.get_default_pipe	= ath10k_sdio_hif_get_default_pipe,
2129da8fa4e3SBjoern A. Zeeb 	.power_up		= ath10k_sdio_hif_power_up,
2130da8fa4e3SBjoern A. Zeeb 	.power_down		= ath10k_sdio_hif_power_down,
2131da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_PM
2132da8fa4e3SBjoern A. Zeeb 	.suspend		= ath10k_sdio_hif_suspend,
2133da8fa4e3SBjoern A. Zeeb 	.resume			= ath10k_sdio_hif_resume,
2134da8fa4e3SBjoern A. Zeeb #endif
2135da8fa4e3SBjoern A. Zeeb };
2136da8fa4e3SBjoern A. Zeeb 
2137da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_PM_SLEEP
2138da8fa4e3SBjoern A. Zeeb 
2139da8fa4e3SBjoern A. Zeeb /* Empty handlers so that mmc subsystem doesn't remove us entirely during
2140da8fa4e3SBjoern A. Zeeb  * suspend. We instead follow cfg80211 suspend/resume handlers.
2141da8fa4e3SBjoern A. Zeeb  */
ath10k_sdio_pm_suspend(struct device * device)2142da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_pm_suspend(struct device *device)
2143da8fa4e3SBjoern A. Zeeb {
2144da8fa4e3SBjoern A. Zeeb 	struct sdio_func *func = dev_to_sdio_func(device);
2145da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2146da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar = ar_sdio->ar;
2147da8fa4e3SBjoern A. Zeeb 	mmc_pm_flag_t pm_flag, pm_caps;
2148da8fa4e3SBjoern A. Zeeb 	int ret;
2149da8fa4e3SBjoern A. Zeeb 
2150da8fa4e3SBjoern A. Zeeb 	if (!device_may_wakeup(ar->dev))
2151da8fa4e3SBjoern A. Zeeb 		return 0;
2152da8fa4e3SBjoern A. Zeeb 
2153da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_set_mbox_sleep(ar, true);
2154da8fa4e3SBjoern A. Zeeb 
2155da8fa4e3SBjoern A. Zeeb 	pm_flag = MMC_PM_KEEP_POWER;
2156da8fa4e3SBjoern A. Zeeb 
2157da8fa4e3SBjoern A. Zeeb 	ret = sdio_set_host_pm_flags(func, pm_flag);
2158da8fa4e3SBjoern A. Zeeb 	if (ret) {
2159da8fa4e3SBjoern A. Zeeb 		pm_caps = sdio_get_host_pm_caps(func);
2160da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
2161da8fa4e3SBjoern A. Zeeb 			    pm_flag, pm_caps, ret);
2162da8fa4e3SBjoern A. Zeeb 		return ret;
2163da8fa4e3SBjoern A. Zeeb 	}
2164da8fa4e3SBjoern A. Zeeb 
2165da8fa4e3SBjoern A. Zeeb 	return ret;
2166da8fa4e3SBjoern A. Zeeb }
2167da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_pm_resume(struct device * device)2168da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_pm_resume(struct device *device)
2169da8fa4e3SBjoern A. Zeeb {
2170da8fa4e3SBjoern A. Zeeb 	return 0;
2171da8fa4e3SBjoern A. Zeeb }
2172da8fa4e3SBjoern A. Zeeb 
2173da8fa4e3SBjoern A. Zeeb static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
2174da8fa4e3SBjoern A. Zeeb 			 ath10k_sdio_pm_resume);
2175da8fa4e3SBjoern A. Zeeb 
2176da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
2177da8fa4e3SBjoern A. Zeeb 
2178da8fa4e3SBjoern A. Zeeb #else
2179da8fa4e3SBjoern A. Zeeb 
2180da8fa4e3SBjoern A. Zeeb #define ATH10K_SDIO_PM_OPS NULL
2181da8fa4e3SBjoern A. Zeeb 
2182da8fa4e3SBjoern A. Zeeb #endif /* CONFIG_PM_SLEEP */
2183da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_napi_poll(struct napi_struct * ctx,int budget)2184da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
2185da8fa4e3SBjoern A. Zeeb {
2186da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2187da8fa4e3SBjoern A. Zeeb 	int done;
2188da8fa4e3SBjoern A. Zeeb 
2189da8fa4e3SBjoern A. Zeeb 	done = ath10k_htt_rx_hl_indication(ar, budget);
2190da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
2191da8fa4e3SBjoern A. Zeeb 
2192da8fa4e3SBjoern A. Zeeb 	if (done < budget)
2193da8fa4e3SBjoern A. Zeeb 		napi_complete_done(ctx, done);
2194da8fa4e3SBjoern A. Zeeb 
2195da8fa4e3SBjoern A. Zeeb 	return done;
2196da8fa4e3SBjoern A. Zeeb }
2197da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_read_host_interest_value(struct ath10k * ar,u32 item_offset,u32 * val)2198da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
2199da8fa4e3SBjoern A. Zeeb 						u32 item_offset,
2200da8fa4e3SBjoern A. Zeeb 						u32 *val)
2201da8fa4e3SBjoern A. Zeeb {
2202da8fa4e3SBjoern A. Zeeb 	u32 addr;
2203da8fa4e3SBjoern A. Zeeb 	int ret;
2204da8fa4e3SBjoern A. Zeeb 
2205da8fa4e3SBjoern A. Zeeb 	addr = host_interest_item_address(item_offset);
2206da8fa4e3SBjoern A. Zeeb 
2207da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_diag_read32(ar, addr, val);
2208da8fa4e3SBjoern A. Zeeb 
2209da8fa4e3SBjoern A. Zeeb 	if (ret)
2210da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "unable to read host interest offset %d value\n",
2211da8fa4e3SBjoern A. Zeeb 			    item_offset);
2212da8fa4e3SBjoern A. Zeeb 
2213da8fa4e3SBjoern A. Zeeb 	return ret;
2214da8fa4e3SBjoern A. Zeeb }
2215da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_read_mem(struct ath10k * ar,u32 address,void * buf,u32 buf_len)2216da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
2217da8fa4e3SBjoern A. Zeeb 				u32 buf_len)
2218da8fa4e3SBjoern A. Zeeb {
2219da8fa4e3SBjoern A. Zeeb 	u32 val;
2220da8fa4e3SBjoern A. Zeeb 	int i, ret;
2221da8fa4e3SBjoern A. Zeeb 
2222da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < buf_len; i += 4) {
2223da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_diag_read32(ar, address + i, &val);
2224da8fa4e3SBjoern A. Zeeb 		if (ret) {
2225da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "unable to read mem %d value\n", address + i);
2226da8fa4e3SBjoern A. Zeeb 			break;
2227da8fa4e3SBjoern A. Zeeb 		}
2228da8fa4e3SBjoern A. Zeeb 		memcpy(buf + i, &val, 4);
2229da8fa4e3SBjoern A. Zeeb 	}
2230da8fa4e3SBjoern A. Zeeb 
2231da8fa4e3SBjoern A. Zeeb 	return ret;
2232da8fa4e3SBjoern A. Zeeb }
2233da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_is_fast_dump_supported(struct ath10k * ar)2234da8fa4e3SBjoern A. Zeeb static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
2235da8fa4e3SBjoern A. Zeeb {
2236da8fa4e3SBjoern A. Zeeb 	u32 param;
2237da8fa4e3SBjoern A. Zeeb 
2238da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
2239da8fa4e3SBjoern A. Zeeb 
2240da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
2241da8fa4e3SBjoern A. Zeeb 
2242da8fa4e3SBjoern A. Zeeb 	return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);
2243da8fa4e3SBjoern A. Zeeb }
2244da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_dump_registers(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data,bool fast_dump)2245da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_dump_registers(struct ath10k *ar,
2246da8fa4e3SBjoern A. Zeeb 				       struct ath10k_fw_crash_data *crash_data,
2247da8fa4e3SBjoern A. Zeeb 				       bool fast_dump)
2248da8fa4e3SBjoern A. Zeeb {
2249da8fa4e3SBjoern A. Zeeb 	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
2250da8fa4e3SBjoern A. Zeeb 	int i, ret;
2251da8fa4e3SBjoern A. Zeeb 	u32 reg_dump_area;
2252da8fa4e3SBjoern A. Zeeb 
2253da8fa4e3SBjoern A. Zeeb 	ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
2254da8fa4e3SBjoern A. Zeeb 						   &reg_dump_area);
2255da8fa4e3SBjoern A. Zeeb 	if (ret) {
2256da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
2257da8fa4e3SBjoern A. Zeeb 		return;
2258da8fa4e3SBjoern A. Zeeb 	}
2259da8fa4e3SBjoern A. Zeeb 
2260da8fa4e3SBjoern A. Zeeb 	if (fast_dump)
2261da8fa4e3SBjoern A. Zeeb 		ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
2262da8fa4e3SBjoern A. Zeeb 					     sizeof(reg_dump_values));
2263da8fa4e3SBjoern A. Zeeb 	else
2264da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
2265da8fa4e3SBjoern A. Zeeb 					   sizeof(reg_dump_values));
2266da8fa4e3SBjoern A. Zeeb 
2267da8fa4e3SBjoern A. Zeeb 	if (ret) {
2268da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
2269da8fa4e3SBjoern A. Zeeb 		return;
2270da8fa4e3SBjoern A. Zeeb 	}
2271da8fa4e3SBjoern A. Zeeb 
2272da8fa4e3SBjoern A. Zeeb 	ath10k_err(ar, "firmware register dump:\n");
2273da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
2274da8fa4e3SBjoern A. Zeeb 		ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
2275da8fa4e3SBjoern A. Zeeb 			   i,
2276da8fa4e3SBjoern A. Zeeb 			   reg_dump_values[i],
2277da8fa4e3SBjoern A. Zeeb 			   reg_dump_values[i + 1],
2278da8fa4e3SBjoern A. Zeeb 			   reg_dump_values[i + 2],
2279da8fa4e3SBjoern A. Zeeb 			   reg_dump_values[i + 3]);
2280da8fa4e3SBjoern A. Zeeb 
2281da8fa4e3SBjoern A. Zeeb 	if (!crash_data)
2282da8fa4e3SBjoern A. Zeeb 		return;
2283da8fa4e3SBjoern A. Zeeb 
2284da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
2285da8fa4e3SBjoern A. Zeeb 		crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
2286da8fa4e3SBjoern A. Zeeb }
2287da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_dump_memory_section(struct ath10k * ar,const struct ath10k_mem_region * mem_region,u8 * buf,size_t buf_len)2288da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
2289da8fa4e3SBjoern A. Zeeb 					   const struct ath10k_mem_region *mem_region,
2290da8fa4e3SBjoern A. Zeeb 					   u8 *buf, size_t buf_len)
2291da8fa4e3SBjoern A. Zeeb {
2292da8fa4e3SBjoern A. Zeeb 	const struct ath10k_mem_section *cur_section, *next_section;
2293da8fa4e3SBjoern A. Zeeb 	unsigned int count, section_size, skip_size;
2294da8fa4e3SBjoern A. Zeeb 	int ret, i, j;
2295da8fa4e3SBjoern A. Zeeb 
2296da8fa4e3SBjoern A. Zeeb 	if (!mem_region || !buf)
2297da8fa4e3SBjoern A. Zeeb 		return 0;
2298da8fa4e3SBjoern A. Zeeb 
2299da8fa4e3SBjoern A. Zeeb 	cur_section = &mem_region->section_table.sections[0];
2300da8fa4e3SBjoern A. Zeeb 
2301da8fa4e3SBjoern A. Zeeb 	if (mem_region->start > cur_section->start) {
2302da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
2303da8fa4e3SBjoern A. Zeeb 			    mem_region->start, cur_section->start);
2304da8fa4e3SBjoern A. Zeeb 		return 0;
2305da8fa4e3SBjoern A. Zeeb 	}
2306da8fa4e3SBjoern A. Zeeb 
2307da8fa4e3SBjoern A. Zeeb 	skip_size = cur_section->start - mem_region->start;
2308da8fa4e3SBjoern A. Zeeb 
2309da8fa4e3SBjoern A. Zeeb 	/* fill the gap between the first register section and register
2310da8fa4e3SBjoern A. Zeeb 	 * start address
2311da8fa4e3SBjoern A. Zeeb 	 */
2312da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < skip_size; i++) {
2313da8fa4e3SBjoern A. Zeeb 		*buf = ATH10K_MAGIC_NOT_COPIED;
2314da8fa4e3SBjoern A. Zeeb 		buf++;
2315da8fa4e3SBjoern A. Zeeb 	}
2316da8fa4e3SBjoern A. Zeeb 
2317da8fa4e3SBjoern A. Zeeb 	count = 0;
2318da8fa4e3SBjoern A. Zeeb 	i = 0;
2319da8fa4e3SBjoern A. Zeeb 	for (; cur_section; cur_section = next_section) {
2320da8fa4e3SBjoern A. Zeeb 		section_size = cur_section->end - cur_section->start;
2321da8fa4e3SBjoern A. Zeeb 
2322da8fa4e3SBjoern A. Zeeb 		if (section_size <= 0) {
2323da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
2324da8fa4e3SBjoern A. Zeeb 				    cur_section->start,
2325da8fa4e3SBjoern A. Zeeb 				    cur_section->end);
2326da8fa4e3SBjoern A. Zeeb 			break;
2327da8fa4e3SBjoern A. Zeeb 		}
2328da8fa4e3SBjoern A. Zeeb 
2329da8fa4e3SBjoern A. Zeeb 		if (++i == mem_region->section_table.size) {
2330da8fa4e3SBjoern A. Zeeb 			/* last section */
2331da8fa4e3SBjoern A. Zeeb 			next_section = NULL;
2332da8fa4e3SBjoern A. Zeeb 			skip_size = 0;
2333da8fa4e3SBjoern A. Zeeb 		} else {
2334da8fa4e3SBjoern A. Zeeb 			next_section = cur_section + 1;
2335da8fa4e3SBjoern A. Zeeb 
2336da8fa4e3SBjoern A. Zeeb 			if (cur_section->end > next_section->start) {
2337da8fa4e3SBjoern A. Zeeb 				ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
2338da8fa4e3SBjoern A. Zeeb 					    next_section->start,
2339da8fa4e3SBjoern A. Zeeb 					    cur_section->end);
2340da8fa4e3SBjoern A. Zeeb 				break;
2341da8fa4e3SBjoern A. Zeeb 			}
2342da8fa4e3SBjoern A. Zeeb 
2343da8fa4e3SBjoern A. Zeeb 			skip_size = next_section->start - cur_section->end;
2344da8fa4e3SBjoern A. Zeeb 		}
2345da8fa4e3SBjoern A. Zeeb 
2346da8fa4e3SBjoern A. Zeeb 		if (buf_len < (skip_size + section_size)) {
2347da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
2348da8fa4e3SBjoern A. Zeeb 			break;
2349da8fa4e3SBjoern A. Zeeb 		}
2350da8fa4e3SBjoern A. Zeeb 
2351da8fa4e3SBjoern A. Zeeb 		buf_len -= skip_size + section_size;
2352da8fa4e3SBjoern A. Zeeb 
2353da8fa4e3SBjoern A. Zeeb 		/* read section to dest memory */
2354da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_read_mem(ar, cur_section->start,
2355da8fa4e3SBjoern A. Zeeb 					   buf, section_size);
2356da8fa4e3SBjoern A. Zeeb 		if (ret) {
2357da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
2358da8fa4e3SBjoern A. Zeeb 				    cur_section->start, ret);
2359da8fa4e3SBjoern A. Zeeb 			break;
2360da8fa4e3SBjoern A. Zeeb 		}
2361da8fa4e3SBjoern A. Zeeb 
2362da8fa4e3SBjoern A. Zeeb 		buf += section_size;
2363da8fa4e3SBjoern A. Zeeb 		count += section_size;
2364da8fa4e3SBjoern A. Zeeb 
2365da8fa4e3SBjoern A. Zeeb 		/* fill in the gap between this section and the next */
2366da8fa4e3SBjoern A. Zeeb 		for (j = 0; j < skip_size; j++) {
2367da8fa4e3SBjoern A. Zeeb 			*buf = ATH10K_MAGIC_NOT_COPIED;
2368da8fa4e3SBjoern A. Zeeb 			buf++;
2369da8fa4e3SBjoern A. Zeeb 		}
2370da8fa4e3SBjoern A. Zeeb 
2371da8fa4e3SBjoern A. Zeeb 		count += skip_size;
2372da8fa4e3SBjoern A. Zeeb 	}
2373da8fa4e3SBjoern A. Zeeb 
2374da8fa4e3SBjoern A. Zeeb 	return count;
2375da8fa4e3SBjoern A. Zeeb }
2376da8fa4e3SBjoern A. Zeeb 
2377da8fa4e3SBjoern A. Zeeb /* if an error happened returns < 0, otherwise the length */
ath10k_sdio_dump_memory_generic(struct ath10k * ar,const struct ath10k_mem_region * current_region,u8 * buf,bool fast_dump)2378da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
2379da8fa4e3SBjoern A. Zeeb 					   const struct ath10k_mem_region *current_region,
2380da8fa4e3SBjoern A. Zeeb 					   u8 *buf,
2381da8fa4e3SBjoern A. Zeeb 					   bool fast_dump)
2382da8fa4e3SBjoern A. Zeeb {
2383da8fa4e3SBjoern A. Zeeb 	int ret;
2384da8fa4e3SBjoern A. Zeeb 
2385da8fa4e3SBjoern A. Zeeb 	if (current_region->section_table.size > 0)
2386da8fa4e3SBjoern A. Zeeb 		/* Copy each section individually. */
2387da8fa4e3SBjoern A. Zeeb 		return ath10k_sdio_dump_memory_section(ar,
2388da8fa4e3SBjoern A. Zeeb 						      current_region,
2389da8fa4e3SBjoern A. Zeeb 						      buf,
2390da8fa4e3SBjoern A. Zeeb 						      current_region->len);
2391da8fa4e3SBjoern A. Zeeb 
2392da8fa4e3SBjoern A. Zeeb 	/* No individiual memory sections defined so we can
2393da8fa4e3SBjoern A. Zeeb 	 * copy the entire memory region.
2394da8fa4e3SBjoern A. Zeeb 	 */
2395da8fa4e3SBjoern A. Zeeb 	if (fast_dump)
2396da8fa4e3SBjoern A. Zeeb 		ret = ath10k_bmi_read_memory(ar,
2397da8fa4e3SBjoern A. Zeeb 					     current_region->start,
2398da8fa4e3SBjoern A. Zeeb 					     buf,
2399da8fa4e3SBjoern A. Zeeb 					     current_region->len);
2400da8fa4e3SBjoern A. Zeeb 	else
2401da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_read_mem(ar,
2402da8fa4e3SBjoern A. Zeeb 					   current_region->start,
2403da8fa4e3SBjoern A. Zeeb 					   buf,
2404da8fa4e3SBjoern A. Zeeb 					   current_region->len);
2405da8fa4e3SBjoern A. Zeeb 
2406da8fa4e3SBjoern A. Zeeb 	if (ret) {
2407da8fa4e3SBjoern A. Zeeb 		ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
2408da8fa4e3SBjoern A. Zeeb 			    current_region->name, ret);
2409da8fa4e3SBjoern A. Zeeb 		return ret;
2410da8fa4e3SBjoern A. Zeeb 	}
2411da8fa4e3SBjoern A. Zeeb 
2412da8fa4e3SBjoern A. Zeeb 	return current_region->len;
2413da8fa4e3SBjoern A. Zeeb }
2414da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_dump_memory(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data,bool fast_dump)2415da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_dump_memory(struct ath10k *ar,
2416da8fa4e3SBjoern A. Zeeb 				    struct ath10k_fw_crash_data *crash_data,
2417da8fa4e3SBjoern A. Zeeb 				    bool fast_dump)
2418da8fa4e3SBjoern A. Zeeb {
2419da8fa4e3SBjoern A. Zeeb 	const struct ath10k_hw_mem_layout *mem_layout;
2420da8fa4e3SBjoern A. Zeeb 	const struct ath10k_mem_region *current_region;
2421da8fa4e3SBjoern A. Zeeb 	struct ath10k_dump_ram_data_hdr *hdr;
2422da8fa4e3SBjoern A. Zeeb 	u32 count;
2423da8fa4e3SBjoern A. Zeeb 	size_t buf_len;
2424da8fa4e3SBjoern A. Zeeb 	int ret, i;
2425da8fa4e3SBjoern A. Zeeb 	u8 *buf;
2426da8fa4e3SBjoern A. Zeeb 
2427da8fa4e3SBjoern A. Zeeb 	if (!crash_data)
2428da8fa4e3SBjoern A. Zeeb 		return;
2429da8fa4e3SBjoern A. Zeeb 
2430da8fa4e3SBjoern A. Zeeb 	mem_layout = ath10k_coredump_get_mem_layout(ar);
2431da8fa4e3SBjoern A. Zeeb 	if (!mem_layout)
2432da8fa4e3SBjoern A. Zeeb 		return;
2433da8fa4e3SBjoern A. Zeeb 
2434da8fa4e3SBjoern A. Zeeb 	current_region = &mem_layout->region_table.regions[0];
2435da8fa4e3SBjoern A. Zeeb 
2436da8fa4e3SBjoern A. Zeeb 	buf = crash_data->ramdump_buf;
2437da8fa4e3SBjoern A. Zeeb 	buf_len = crash_data->ramdump_buf_len;
2438da8fa4e3SBjoern A. Zeeb 
2439da8fa4e3SBjoern A. Zeeb 	memset(buf, 0, buf_len);
2440da8fa4e3SBjoern A. Zeeb 
2441da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < mem_layout->region_table.size; i++) {
2442da8fa4e3SBjoern A. Zeeb 		count = 0;
2443da8fa4e3SBjoern A. Zeeb 
2444da8fa4e3SBjoern A. Zeeb 		if (current_region->len > buf_len) {
2445da8fa4e3SBjoern A. Zeeb 			ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
2446da8fa4e3SBjoern A. Zeeb 				    current_region->name,
2447da8fa4e3SBjoern A. Zeeb 				    current_region->len,
2448da8fa4e3SBjoern A. Zeeb 				    buf_len);
2449da8fa4e3SBjoern A. Zeeb 			break;
2450da8fa4e3SBjoern A. Zeeb 		}
2451da8fa4e3SBjoern A. Zeeb 
2452da8fa4e3SBjoern A. Zeeb 		/* Reserve space for the header. */
2453da8fa4e3SBjoern A. Zeeb 		hdr = (void *)buf;
2454da8fa4e3SBjoern A. Zeeb 		buf += sizeof(*hdr);
2455da8fa4e3SBjoern A. Zeeb 		buf_len -= sizeof(*hdr);
2456da8fa4e3SBjoern A. Zeeb 
2457da8fa4e3SBjoern A. Zeeb 		ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
2458da8fa4e3SBjoern A. Zeeb 						      fast_dump);
2459da8fa4e3SBjoern A. Zeeb 		if (ret >= 0)
2460da8fa4e3SBjoern A. Zeeb 			count = ret;
2461da8fa4e3SBjoern A. Zeeb 
2462da8fa4e3SBjoern A. Zeeb 		hdr->region_type = cpu_to_le32(current_region->type);
2463da8fa4e3SBjoern A. Zeeb 		hdr->start = cpu_to_le32(current_region->start);
2464da8fa4e3SBjoern A. Zeeb 		hdr->length = cpu_to_le32(count);
2465da8fa4e3SBjoern A. Zeeb 
2466da8fa4e3SBjoern A. Zeeb 		if (count == 0)
2467da8fa4e3SBjoern A. Zeeb 			/* Note: the header remains, just with zero length. */
2468da8fa4e3SBjoern A. Zeeb 			break;
2469da8fa4e3SBjoern A. Zeeb 
2470da8fa4e3SBjoern A. Zeeb 		buf += count;
2471da8fa4e3SBjoern A. Zeeb 		buf_len -= count;
2472da8fa4e3SBjoern A. Zeeb 
2473da8fa4e3SBjoern A. Zeeb 		current_region++;
2474da8fa4e3SBjoern A. Zeeb 	}
2475da8fa4e3SBjoern A. Zeeb }
2476da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_fw_crashed_dump(struct ath10k * ar)2477da8fa4e3SBjoern A. Zeeb void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
2478da8fa4e3SBjoern A. Zeeb {
2479da8fa4e3SBjoern A. Zeeb 	struct ath10k_fw_crash_data *crash_data;
2480da8fa4e3SBjoern A. Zeeb 	char guid[UUID_STRING_LEN + 1];
2481da8fa4e3SBjoern A. Zeeb 	bool fast_dump;
2482da8fa4e3SBjoern A. Zeeb 
2483da8fa4e3SBjoern A. Zeeb 	fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
2484da8fa4e3SBjoern A. Zeeb 
2485da8fa4e3SBjoern A. Zeeb 	if (fast_dump)
2486da8fa4e3SBjoern A. Zeeb 		ath10k_bmi_start(ar);
2487da8fa4e3SBjoern A. Zeeb 
2488da8fa4e3SBjoern A. Zeeb 	ar->stats.fw_crash_counter++;
2489da8fa4e3SBjoern A. Zeeb 
2490da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_disable_intrs(ar);
2491da8fa4e3SBjoern A. Zeeb 
2492da8fa4e3SBjoern A. Zeeb 	crash_data = ath10k_coredump_new(ar);
2493da8fa4e3SBjoern A. Zeeb 
2494da8fa4e3SBjoern A. Zeeb 	if (crash_data)
2495da8fa4e3SBjoern A. Zeeb 		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
2496da8fa4e3SBjoern A. Zeeb 	else
2497da8fa4e3SBjoern A. Zeeb 		scnprintf(guid, sizeof(guid), "n/a");
2498da8fa4e3SBjoern A. Zeeb 
2499da8fa4e3SBjoern A. Zeeb 	ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
2500da8fa4e3SBjoern A. Zeeb 	ath10k_print_driver_info(ar);
2501da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
2502da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
2503da8fa4e3SBjoern A. Zeeb 
2504da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_enable_intrs(ar);
2505da8fa4e3SBjoern A. Zeeb 
2506da8fa4e3SBjoern A. Zeeb 	ath10k_core_start_recovery(ar);
2507da8fa4e3SBjoern A. Zeeb }
2508da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_probe(struct sdio_func * func,const struct sdio_device_id * id)2509da8fa4e3SBjoern A. Zeeb static int ath10k_sdio_probe(struct sdio_func *func,
2510da8fa4e3SBjoern A. Zeeb 			     const struct sdio_device_id *id)
2511da8fa4e3SBjoern A. Zeeb {
2512da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio;
2513da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar;
2514da8fa4e3SBjoern A. Zeeb 	enum ath10k_hw_rev hw_rev;
2515da8fa4e3SBjoern A. Zeeb 	u32 dev_id_base;
2516da8fa4e3SBjoern A. Zeeb 	struct ath10k_bus_params bus_params = {};
2517da8fa4e3SBjoern A. Zeeb 	int ret, i;
2518da8fa4e3SBjoern A. Zeeb 
2519da8fa4e3SBjoern A. Zeeb 	/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
2520da8fa4e3SBjoern A. Zeeb 	 * If there will be newer chipsets that does not use the hw reg
2521da8fa4e3SBjoern A. Zeeb 	 * setup as defined in qca6174_regs and qca6174_values, this
2522da8fa4e3SBjoern A. Zeeb 	 * assumption is no longer valid and hw_rev must be setup differently
2523da8fa4e3SBjoern A. Zeeb 	 * depending on chipset.
2524da8fa4e3SBjoern A. Zeeb 	 */
2525da8fa4e3SBjoern A. Zeeb 	hw_rev = ATH10K_HW_QCA6174;
2526da8fa4e3SBjoern A. Zeeb 
2527da8fa4e3SBjoern A. Zeeb 	ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
2528da8fa4e3SBjoern A. Zeeb 				hw_rev, &ath10k_sdio_hif_ops);
2529da8fa4e3SBjoern A. Zeeb 	if (!ar) {
2530da8fa4e3SBjoern A. Zeeb 		dev_err(&func->dev, "failed to allocate core\n");
2531da8fa4e3SBjoern A. Zeeb 		return -ENOMEM;
2532da8fa4e3SBjoern A. Zeeb 	}
2533da8fa4e3SBjoern A. Zeeb 
2534da8fa4e3SBjoern A. Zeeb 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
2535da8fa4e3SBjoern A. Zeeb 
2536da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
2537da8fa4e3SBjoern A. Zeeb 		   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
2538da8fa4e3SBjoern A. Zeeb 		   func->num, func->vendor, func->device,
2539da8fa4e3SBjoern A. Zeeb 		   func->max_blksize, func->cur_blksize);
2540da8fa4e3SBjoern A. Zeeb 
2541da8fa4e3SBjoern A. Zeeb 	ar_sdio = ath10k_sdio_priv(ar);
2542da8fa4e3SBjoern A. Zeeb 
2543da8fa4e3SBjoern A. Zeeb 	ar_sdio->irq_data.irq_proc_reg =
2544da8fa4e3SBjoern A. Zeeb 		devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
2545da8fa4e3SBjoern A. Zeeb 			     GFP_KERNEL);
2546da8fa4e3SBjoern A. Zeeb 	if (!ar_sdio->irq_data.irq_proc_reg) {
2547da8fa4e3SBjoern A. Zeeb 		ret = -ENOMEM;
2548da8fa4e3SBjoern A. Zeeb 		goto err_core_destroy;
2549da8fa4e3SBjoern A. Zeeb 	}
2550da8fa4e3SBjoern A. Zeeb 
2551da8fa4e3SBjoern A. Zeeb 	ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
2552da8fa4e3SBjoern A. Zeeb 	if (!ar_sdio->vsg_buffer) {
2553da8fa4e3SBjoern A. Zeeb 		ret = -ENOMEM;
2554da8fa4e3SBjoern A. Zeeb 		goto err_core_destroy;
2555da8fa4e3SBjoern A. Zeeb 	}
2556da8fa4e3SBjoern A. Zeeb 
2557da8fa4e3SBjoern A. Zeeb 	ar_sdio->irq_data.irq_en_reg =
2558da8fa4e3SBjoern A. Zeeb 		devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
2559da8fa4e3SBjoern A. Zeeb 			     GFP_KERNEL);
2560da8fa4e3SBjoern A. Zeeb 	if (!ar_sdio->irq_data.irq_en_reg) {
2561da8fa4e3SBjoern A. Zeeb 		ret = -ENOMEM;
2562da8fa4e3SBjoern A. Zeeb 		goto err_core_destroy;
2563da8fa4e3SBjoern A. Zeeb 	}
2564da8fa4e3SBjoern A. Zeeb 
2565da8fa4e3SBjoern A. Zeeb 	ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
2566da8fa4e3SBjoern A. Zeeb 	if (!ar_sdio->bmi_buf) {
2567da8fa4e3SBjoern A. Zeeb 		ret = -ENOMEM;
2568da8fa4e3SBjoern A. Zeeb 		goto err_core_destroy;
2569da8fa4e3SBjoern A. Zeeb 	}
2570da8fa4e3SBjoern A. Zeeb 
2571da8fa4e3SBjoern A. Zeeb 	ar_sdio->func = func;
2572da8fa4e3SBjoern A. Zeeb 	sdio_set_drvdata(func, ar_sdio);
2573da8fa4e3SBjoern A. Zeeb 
2574da8fa4e3SBjoern A. Zeeb 	ar_sdio->is_disabled = true;
2575da8fa4e3SBjoern A. Zeeb 	ar_sdio->ar = ar;
2576da8fa4e3SBjoern A. Zeeb 
2577da8fa4e3SBjoern A. Zeeb 	spin_lock_init(&ar_sdio->lock);
2578da8fa4e3SBjoern A. Zeeb 	spin_lock_init(&ar_sdio->wr_async_lock);
2579da8fa4e3SBjoern A. Zeeb 	mutex_init(&ar_sdio->irq_data.mtx);
2580da8fa4e3SBjoern A. Zeeb 
2581da8fa4e3SBjoern A. Zeeb 	INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2582da8fa4e3SBjoern A. Zeeb 	INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2583da8fa4e3SBjoern A. Zeeb 
2584da8fa4e3SBjoern A. Zeeb 	INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2585da8fa4e3SBjoern A. Zeeb 	ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2586da8fa4e3SBjoern A. Zeeb 	if (!ar_sdio->workqueue) {
2587da8fa4e3SBjoern A. Zeeb 		ret = -ENOMEM;
2588da8fa4e3SBjoern A. Zeeb 		goto err_core_destroy;
2589da8fa4e3SBjoern A. Zeeb 	}
2590da8fa4e3SBjoern A. Zeeb 
2591da8fa4e3SBjoern A. Zeeb 	for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2592da8fa4e3SBjoern A. Zeeb 		ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2593da8fa4e3SBjoern A. Zeeb 
2594da8fa4e3SBjoern A. Zeeb 	skb_queue_head_init(&ar_sdio->rx_head);
2595da8fa4e3SBjoern A. Zeeb 	INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
2596da8fa4e3SBjoern A. Zeeb 
2597da8fa4e3SBjoern A. Zeeb 	dev_id_base = (id->device & 0x0F00);
2598da8fa4e3SBjoern A. Zeeb 	if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
2599da8fa4e3SBjoern A. Zeeb 	    dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
2600da8fa4e3SBjoern A. Zeeb 		ret = -ENODEV;
2601da8fa4e3SBjoern A. Zeeb 		ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2602da8fa4e3SBjoern A. Zeeb 			   dev_id_base, id->device);
2603da8fa4e3SBjoern A. Zeeb 		goto err_free_wq;
2604da8fa4e3SBjoern A. Zeeb 	}
2605da8fa4e3SBjoern A. Zeeb 
2606da8fa4e3SBjoern A. Zeeb 	ar->dev_id = QCA9377_1_0_DEVICE_ID;
2607da8fa4e3SBjoern A. Zeeb 	ar->id.vendor = id->vendor;
2608da8fa4e3SBjoern A. Zeeb 	ar->id.device = id->device;
2609da8fa4e3SBjoern A. Zeeb 
2610da8fa4e3SBjoern A. Zeeb 	ath10k_sdio_set_mbox_info(ar);
2611da8fa4e3SBjoern A. Zeeb 
2612da8fa4e3SBjoern A. Zeeb 	bus_params.dev_type = ATH10K_DEV_TYPE_HL;
2613da8fa4e3SBjoern A. Zeeb 	/* TODO: don't know yet how to get chip_id with SDIO */
2614da8fa4e3SBjoern A. Zeeb 	bus_params.chip_id = 0;
2615da8fa4e3SBjoern A. Zeeb 	bus_params.hl_msdu_ids = true;
2616da8fa4e3SBjoern A. Zeeb 
2617da8fa4e3SBjoern A. Zeeb 	ar->hw->max_mtu = ETH_DATA_LEN;
2618da8fa4e3SBjoern A. Zeeb 
2619da8fa4e3SBjoern A. Zeeb 	ret = ath10k_core_register(ar, &bus_params);
2620da8fa4e3SBjoern A. Zeeb 	if (ret) {
2621da8fa4e3SBjoern A. Zeeb 		ath10k_err(ar, "failed to register driver core: %d\n", ret);
2622da8fa4e3SBjoern A. Zeeb 		goto err_free_wq;
2623da8fa4e3SBjoern A. Zeeb 	}
2624da8fa4e3SBjoern A. Zeeb 
2625da8fa4e3SBjoern A. Zeeb 	timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
2626da8fa4e3SBjoern A. Zeeb 
2627da8fa4e3SBjoern A. Zeeb 	return 0;
2628da8fa4e3SBjoern A. Zeeb 
2629da8fa4e3SBjoern A. Zeeb err_free_wq:
2630da8fa4e3SBjoern A. Zeeb 	destroy_workqueue(ar_sdio->workqueue);
2631da8fa4e3SBjoern A. Zeeb err_core_destroy:
2632da8fa4e3SBjoern A. Zeeb 	ath10k_core_destroy(ar);
2633da8fa4e3SBjoern A. Zeeb 
2634da8fa4e3SBjoern A. Zeeb 	return ret;
2635da8fa4e3SBjoern A. Zeeb }
2636da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_remove(struct sdio_func * func)2637da8fa4e3SBjoern A. Zeeb static void ath10k_sdio_remove(struct sdio_func *func)
2638da8fa4e3SBjoern A. Zeeb {
2639da8fa4e3SBjoern A. Zeeb 	struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2640da8fa4e3SBjoern A. Zeeb 	struct ath10k *ar = ar_sdio->ar;
2641da8fa4e3SBjoern A. Zeeb 
2642da8fa4e3SBjoern A. Zeeb 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
2643da8fa4e3SBjoern A. Zeeb 		   "sdio removed func %d vendor 0x%x device 0x%x\n",
2644da8fa4e3SBjoern A. Zeeb 		   func->num, func->vendor, func->device);
2645da8fa4e3SBjoern A. Zeeb 
2646da8fa4e3SBjoern A. Zeeb 	ath10k_core_unregister(ar);
2647da8fa4e3SBjoern A. Zeeb 
2648da8fa4e3SBjoern A. Zeeb 	netif_napi_del(&ar->napi);
2649da8fa4e3SBjoern A. Zeeb 
2650da8fa4e3SBjoern A. Zeeb 	ath10k_core_destroy(ar);
2651da8fa4e3SBjoern A. Zeeb 
2652da8fa4e3SBjoern A. Zeeb 	destroy_workqueue(ar_sdio->workqueue);
2653da8fa4e3SBjoern A. Zeeb }
2654da8fa4e3SBjoern A. Zeeb 
2655da8fa4e3SBjoern A. Zeeb static const struct sdio_device_id ath10k_sdio_devices[] = {
2656da8fa4e3SBjoern A. Zeeb 	{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
2657da8fa4e3SBjoern A. Zeeb 	{SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
2658da8fa4e3SBjoern A. Zeeb 	{},
2659da8fa4e3SBjoern A. Zeeb };
2660da8fa4e3SBjoern A. Zeeb 
2661da8fa4e3SBjoern A. Zeeb MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2662da8fa4e3SBjoern A. Zeeb 
2663da8fa4e3SBjoern A. Zeeb static struct sdio_driver ath10k_sdio_driver = {
2664da8fa4e3SBjoern A. Zeeb 	.name = "ath10k_sdio",
2665da8fa4e3SBjoern A. Zeeb 	.id_table = ath10k_sdio_devices,
2666da8fa4e3SBjoern A. Zeeb 	.probe = ath10k_sdio_probe,
2667da8fa4e3SBjoern A. Zeeb 	.remove = ath10k_sdio_remove,
2668da8fa4e3SBjoern A. Zeeb 	.drv = {
2669da8fa4e3SBjoern A. Zeeb 		.owner = THIS_MODULE,
2670da8fa4e3SBjoern A. Zeeb 		.pm = ATH10K_SDIO_PM_OPS,
2671da8fa4e3SBjoern A. Zeeb 	},
2672da8fa4e3SBjoern A. Zeeb };
2673da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_init(void)2674da8fa4e3SBjoern A. Zeeb static int __init ath10k_sdio_init(void)
2675da8fa4e3SBjoern A. Zeeb {
2676da8fa4e3SBjoern A. Zeeb 	int ret;
2677da8fa4e3SBjoern A. Zeeb 
2678da8fa4e3SBjoern A. Zeeb 	ret = sdio_register_driver(&ath10k_sdio_driver);
2679da8fa4e3SBjoern A. Zeeb 	if (ret)
2680da8fa4e3SBjoern A. Zeeb 		pr_err("sdio driver registration failed: %d\n", ret);
2681da8fa4e3SBjoern A. Zeeb 
2682da8fa4e3SBjoern A. Zeeb 	return ret;
2683da8fa4e3SBjoern A. Zeeb }
2684da8fa4e3SBjoern A. Zeeb 
ath10k_sdio_exit(void)2685da8fa4e3SBjoern A. Zeeb static void __exit ath10k_sdio_exit(void)
2686da8fa4e3SBjoern A. Zeeb {
2687da8fa4e3SBjoern A. Zeeb 	sdio_unregister_driver(&ath10k_sdio_driver);
2688da8fa4e3SBjoern A. Zeeb }
2689da8fa4e3SBjoern A. Zeeb 
2690da8fa4e3SBjoern A. Zeeb module_init(ath10k_sdio_init);
2691da8fa4e3SBjoern A. Zeeb module_exit(ath10k_sdio_exit);
2692da8fa4e3SBjoern A. Zeeb 
2693da8fa4e3SBjoern A. Zeeb MODULE_AUTHOR("Qualcomm Atheros");
2694da8fa4e3SBjoern A. Zeeb MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2695da8fa4e3SBjoern A. Zeeb MODULE_LICENSE("Dual BSD/GPL");
2696