xref: /linux/drivers/net/wireless/ath/ath9k/mci.c (revision dd093fb0)
1 /*
2  * Copyright (c) 2010-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 
20 #include "ath9k.h"
21 #include "mci.h"
22 
23 static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
24 
25 static struct ath_mci_profile_info*
26 ath_mci_find_profile(struct ath_mci_profile *mci,
27 		     struct ath_mci_profile_info *info)
28 {
29 	struct ath_mci_profile_info *entry;
30 
31 	if (list_empty(&mci->info))
32 		return NULL;
33 
34 	list_for_each_entry(entry, &mci->info, list) {
35 		if (entry->conn_handle == info->conn_handle)
36 			return entry;
37 	}
38 	return NULL;
39 }
40 
41 static bool ath_mci_add_profile(struct ath_common *common,
42 				struct ath_mci_profile *mci,
43 				struct ath_mci_profile_info *info)
44 {
45 	struct ath_mci_profile_info *entry;
46 	static const u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
47 
48 	if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
49 	    (info->type == MCI_GPM_COEX_PROFILE_VOICE))
50 		return false;
51 
52 	if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
53 	    (info->type != MCI_GPM_COEX_PROFILE_VOICE))
54 		return false;
55 
56 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
57 	if (!entry)
58 		return false;
59 
60 	memcpy(entry, info, 10);
61 	INC_PROF(mci, info);
62 	list_add_tail(&entry->list, &mci->info);
63 	if (info->type == MCI_GPM_COEX_PROFILE_VOICE) {
64 		if (info->voice_type < sizeof(voice_priority))
65 			mci->voice_priority = voice_priority[info->voice_type];
66 		else
67 			mci->voice_priority = 110;
68 	}
69 
70 	return true;
71 }
72 
73 static void ath_mci_del_profile(struct ath_common *common,
74 				struct ath_mci_profile *mci,
75 				struct ath_mci_profile_info *entry)
76 {
77 	if (!entry)
78 		return;
79 
80 	DEC_PROF(mci, entry);
81 	list_del(&entry->list);
82 	kfree(entry);
83 }
84 
85 void ath_mci_flush_profile(struct ath_mci_profile *mci)
86 {
87 	struct ath_mci_profile_info *info, *tinfo;
88 
89 	mci->aggr_limit = 0;
90 	mci->num_mgmt = 0;
91 
92 	if (list_empty(&mci->info))
93 		return;
94 
95 	list_for_each_entry_safe(info, tinfo, &mci->info, list) {
96 		list_del(&info->list);
97 		DEC_PROF(mci, info);
98 		kfree(info);
99 	}
100 }
101 
102 static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
103 {
104 	struct ath_mci_profile *mci = &btcoex->mci;
105 	u32 wlan_airtime = btcoex->btcoex_period *
106 				(100 - btcoex->duty_cycle) / 100;
107 
108 	/*
109 	 * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
110 	 * When wlan_airtime is less than 4ms, aggregation limit has to be
111 	 * adjusted half of wlan_airtime to ensure that the aggregation can fit
112 	 * without collision with BT traffic.
113 	 */
114 	if ((wlan_airtime <= 4) &&
115 	    (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
116 		mci->aggr_limit = 2 * wlan_airtime;
117 }
118 
119 static void ath_mci_update_scheme(struct ath_softc *sc)
120 {
121 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
122 	struct ath_btcoex *btcoex = &sc->btcoex;
123 	struct ath_mci_profile *mci = &btcoex->mci;
124 	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
125 	struct ath_mci_profile_info *info;
126 	u32 num_profile = NUM_PROF(mci);
127 
128 	if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
129 		goto skip_tuning;
130 
131 	mci->aggr_limit = 0;
132 	btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
133 	btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
134 	if (NUM_PROF(mci))
135 		btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
136 	else
137 		btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
138 							ATH_BTCOEX_STOMP_LOW;
139 
140 	if (num_profile == 1) {
141 		info = list_first_entry(&mci->info,
142 					struct ath_mci_profile_info,
143 					list);
144 		if (mci->num_sco) {
145 			if (info->T == 12)
146 				mci->aggr_limit = 8;
147 			else if (info->T == 6) {
148 				mci->aggr_limit = 6;
149 				btcoex->duty_cycle = 30;
150 			} else
151 				mci->aggr_limit = 6;
152 			ath_dbg(common, MCI,
153 				"Single SCO, aggregation limit %d 1/4 ms\n",
154 				mci->aggr_limit);
155 		} else if (mci->num_pan || mci->num_other_acl) {
156 			/*
157 			 * For single PAN/FTP profile, allocate 35% for BT
158 			 * to improve WLAN throughput.
159 			 */
160 			btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35;
161 			btcoex->btcoex_period = 53;
162 			ath_dbg(common, MCI,
163 				"Single PAN/FTP bt period %d ms dutycycle %d\n",
164 				btcoex->duty_cycle, btcoex->btcoex_period);
165 		} else if (mci->num_hid) {
166 			btcoex->duty_cycle = 30;
167 			mci->aggr_limit = 6;
168 			ath_dbg(common, MCI,
169 				"Multiple attempt/timeout single HID "
170 				"aggregation limit 1.5 ms dutycycle 30%%\n");
171 		}
172 	} else if (num_profile == 2) {
173 		if (mci->num_hid == 2)
174 			btcoex->duty_cycle = 30;
175 		mci->aggr_limit = 6;
176 		ath_dbg(common, MCI,
177 			"Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
178 			btcoex->duty_cycle);
179 	} else if (num_profile >= 3) {
180 		mci->aggr_limit = 4;
181 		ath_dbg(common, MCI,
182 			"Three or more profiles aggregation limit 1 ms\n");
183 	}
184 
185 skip_tuning:
186 	if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
187 		if (IS_CHAN_HT(sc->sc_ah->curchan))
188 			ath_mci_adjust_aggr_limit(btcoex);
189 		else
190 			btcoex->btcoex_period >>= 1;
191 	}
192 
193 	ath9k_btcoex_timer_pause(sc);
194 	ath9k_hw_btcoex_disable(sc->sc_ah);
195 
196 	if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
197 		return;
198 
199 	btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
200 	if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
201 		btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
202 
203 	btcoex->btcoex_no_stomp =  btcoex->btcoex_period *
204 		(100 - btcoex->duty_cycle) / 100;
205 
206 	ath9k_hw_btcoex_enable(sc->sc_ah);
207 	ath9k_btcoex_timer_resume(sc);
208 }
209 
210 static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
211 {
212 	struct ath_hw *ah = sc->sc_ah;
213 	struct ath_common *common = ath9k_hw_common(ah);
214 	struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
215 	u32 payload[4] = {0, 0, 0, 0};
216 
217 	switch (opcode) {
218 	case MCI_GPM_BT_CAL_REQ:
219 		if (mci_hw->bt_state == MCI_BT_AWAKE) {
220 			mci_hw->bt_state = MCI_BT_CAL_START;
221 			ath9k_queue_reset(sc, RESET_TYPE_MCI);
222 		}
223 		ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
224 		break;
225 	case MCI_GPM_BT_CAL_GRANT:
226 		MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
227 		ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
228 					16, false, true);
229 		break;
230 	default:
231 		ath_dbg(common, MCI, "Unknown GPM CAL message\n");
232 		break;
233 	}
234 }
235 
236 static void ath9k_mci_work(struct work_struct *work)
237 {
238 	struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
239 
240 	ath_mci_update_scheme(sc);
241 }
242 
243 static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio)
244 {
245 	if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE])
246 		stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio;
247 
248 	if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL])
249 		stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio;
250 
251 	if ((cur_txprio > ATH_MCI_HI_PRIO) &&
252 	    (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW]))
253 		stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio;
254 }
255 
256 static void ath_mci_set_concur_txprio(struct ath_softc *sc)
257 {
258 	struct ath_btcoex *btcoex = &sc->btcoex;
259 	struct ath_mci_profile *mci = &btcoex->mci;
260 	u8 stomp_txprio[ATH_BTCOEX_STOMP_MAX];
261 
262 	memset(stomp_txprio, 0, sizeof(stomp_txprio));
263 	if (mci->num_mgmt) {
264 		stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO;
265 		if (!mci->num_pan && !mci->num_other_acl)
266 			stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
267 				ATH_MCI_INQUIRY_PRIO;
268 	} else {
269 		static const u8 prof_prio[] = {
270 			50, 90, 94, 52
271 		}; /* RFCOMM, A2DP, HID, PAN */
272 
273 		stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
274 		stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
275 
276 		if (mci->num_sco)
277 			ath_mci_update_stomp_txprio(mci->voice_priority,
278 						    stomp_txprio);
279 		if (mci->num_other_acl)
280 			ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio);
281 		if (mci->num_a2dp)
282 			ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio);
283 		if (mci->num_hid)
284 			ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio);
285 		if (mci->num_pan)
286 			ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio);
287 
288 		if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff)
289 			stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0;
290 
291 		if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff)
292 			stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0;
293 	}
294 	ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio);
295 }
296 
297 static u8 ath_mci_process_profile(struct ath_softc *sc,
298 				  struct ath_mci_profile_info *info)
299 {
300 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
301 	struct ath_btcoex *btcoex = &sc->btcoex;
302 	struct ath_mci_profile *mci = &btcoex->mci;
303 	struct ath_mci_profile_info *entry = NULL;
304 
305 	entry = ath_mci_find_profile(mci, info);
306 	if (entry) {
307 		/*
308 		 * Two MCI interrupts are generated while connecting to
309 		 * headset and A2DP profile, but only one MCI interrupt
310 		 * is generated with last added profile type while disconnecting
311 		 * both profiles.
312 		 * So while adding second profile type decrement
313 		 * the first one.
314 		 */
315 		if (entry->type != info->type) {
316 			DEC_PROF(mci, entry);
317 			INC_PROF(mci, info);
318 		}
319 		memcpy(entry, info, 10);
320 	}
321 
322 	if (info->start) {
323 		if (!entry && !ath_mci_add_profile(common, mci, info))
324 			return 0;
325 	} else
326 		ath_mci_del_profile(common, mci, entry);
327 
328 	ath_mci_set_concur_txprio(sc);
329 	return 1;
330 }
331 
332 static u8 ath_mci_process_status(struct ath_softc *sc,
333 				 struct ath_mci_profile_status *status)
334 {
335 	struct ath_btcoex *btcoex = &sc->btcoex;
336 	struct ath_mci_profile *mci = &btcoex->mci;
337 	struct ath_mci_profile_info info;
338 	int i = 0, old_num_mgmt = mci->num_mgmt;
339 
340 	/* Link status type are not handled */
341 	if (status->is_link)
342 		return 0;
343 
344 	info.conn_handle = status->conn_handle;
345 	if (ath_mci_find_profile(mci, &info))
346 		return 0;
347 
348 	if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
349 		return 0;
350 
351 	if (status->is_critical)
352 		__set_bit(status->conn_handle, mci->status);
353 	else
354 		__clear_bit(status->conn_handle, mci->status);
355 
356 	mci->num_mgmt = 0;
357 	do {
358 		if (test_bit(i, mci->status))
359 			mci->num_mgmt++;
360 	} while (++i < ATH_MCI_MAX_PROFILE);
361 
362 	ath_mci_set_concur_txprio(sc);
363 	if (old_num_mgmt != mci->num_mgmt)
364 		return 1;
365 
366 	return 0;
367 }
368 
369 static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
370 {
371 	struct ath_hw *ah = sc->sc_ah;
372 	struct ath_mci_profile_info profile_info;
373 	struct ath_mci_profile_status profile_status;
374 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
375 	u8 major, minor, update_scheme = 0;
376 	u32 seq_num;
377 
378 	if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
379 	    ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
380 		ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
381 		ath_mci_flush_profile(&sc->btcoex.mci);
382 		ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
383 	}
384 
385 	switch (opcode) {
386 	case MCI_GPM_COEX_VERSION_QUERY:
387 		ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
388 		break;
389 	case MCI_GPM_COEX_VERSION_RESPONSE:
390 		major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
391 		minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
392 		ar9003_mci_set_bt_version(ah, major, minor);
393 		break;
394 	case MCI_GPM_COEX_STATUS_QUERY:
395 		ar9003_mci_send_wlan_channels(ah);
396 		break;
397 	case MCI_GPM_COEX_BT_PROFILE_INFO:
398 		memcpy(&profile_info,
399 		       (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
400 
401 		if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) ||
402 		    (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) {
403 			ath_dbg(common, MCI,
404 				"Illegal profile type = %d, state = %d\n",
405 				profile_info.type,
406 				profile_info.start);
407 			break;
408 		}
409 
410 		update_scheme += ath_mci_process_profile(sc, &profile_info);
411 		break;
412 	case MCI_GPM_COEX_BT_STATUS_UPDATE:
413 		profile_status.is_link = *(rx_payload +
414 					   MCI_GPM_COEX_B_STATUS_TYPE);
415 		profile_status.conn_handle = *(rx_payload +
416 					       MCI_GPM_COEX_B_STATUS_LINKID);
417 		profile_status.is_critical = *(rx_payload +
418 					       MCI_GPM_COEX_B_STATUS_STATE);
419 
420 		seq_num = *((u32 *)(rx_payload + 12));
421 		ath_dbg(common, MCI,
422 			"BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
423 			profile_status.is_link, profile_status.conn_handle,
424 			profile_status.is_critical, seq_num);
425 
426 		update_scheme += ath_mci_process_status(sc, &profile_status);
427 		break;
428 	default:
429 		ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
430 		break;
431 	}
432 	if (update_scheme)
433 		ieee80211_queue_work(sc->hw, &sc->mci_work);
434 }
435 
436 int ath_mci_setup(struct ath_softc *sc)
437 {
438 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
439 	struct ath_mci_coex *mci = &sc->mci_coex;
440 	struct ath_mci_buf *buf = &mci->sched_buf;
441 	int ret;
442 
443 	buf->bf_addr = dmam_alloc_coherent(sc->dev,
444 				  ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
445 				  &buf->bf_paddr, GFP_KERNEL);
446 
447 	if (buf->bf_addr == NULL) {
448 		ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
449 		return -ENOMEM;
450 	}
451 
452 	memset(buf->bf_addr, MCI_GPM_RSVD_PATTERN,
453 	       ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE);
454 
455 	mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
456 
457 	mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
458 	mci->gpm_buf.bf_addr = mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
459 	mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
460 
461 	ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
462 			       mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
463 			       mci->sched_buf.bf_paddr);
464 	if (ret) {
465 		ath_err(common, "Failed to initialize MCI\n");
466 		return ret;
467 	}
468 
469 	INIT_WORK(&sc->mci_work, ath9k_mci_work);
470 	ath_dbg(common, MCI, "MCI Initialized\n");
471 
472 	return 0;
473 }
474 
475 void ath_mci_cleanup(struct ath_softc *sc)
476 {
477 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
478 	struct ath_hw *ah = sc->sc_ah;
479 
480 	ar9003_mci_cleanup(ah);
481 
482 	ath_dbg(common, MCI, "MCI De-Initialized\n");
483 }
484 
485 void ath_mci_intr(struct ath_softc *sc)
486 {
487 	struct ath_mci_coex *mci = &sc->mci_coex;
488 	struct ath_hw *ah = sc->sc_ah;
489 	struct ath_common *common = ath9k_hw_common(ah);
490 	struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
491 	u32 mci_int, mci_int_rxmsg;
492 	u32 offset, subtype, opcode;
493 	u32 *pgpm;
494 	u32 more_data = MCI_GPM_MORE;
495 	bool skip_gpm = false;
496 
497 	ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
498 
499 	if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
500 		ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET);
501 		return;
502 	}
503 
504 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
505 		u32 payload[4] = { 0xffffffff, 0xffffffff,
506 				   0xffffffff, 0xffffff00};
507 
508 		/*
509 		 * The following REMOTE_RESET and SYS_WAKING used to sent
510 		 * only when BT wake up. Now they are always sent, as a
511 		 * recovery method to reset BT MCI's RX alignment.
512 		 */
513 		ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
514 					payload, 16, true, false);
515 		ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
516 					NULL, 0, true, false);
517 
518 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
519 		ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
520 
521 		/*
522 		 * always do this for recovery and 2G/5G toggling and LNA_TRANS
523 		 */
524 		ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
525 	}
526 
527 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
528 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
529 
530 		if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
531 		    (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
532 		     MCI_BT_SLEEP))
533 			ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
534 	}
535 
536 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
537 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
538 
539 		if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
540 		    (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
541 		     MCI_BT_AWAKE))
542 			mci_hw->bt_state = MCI_BT_SLEEP;
543 	}
544 
545 	if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
546 	    (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
547 		ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
548 		skip_gpm = true;
549 	}
550 
551 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
552 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
553 		ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
554 	}
555 
556 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
557 		mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
558 
559 		while (more_data == MCI_GPM_MORE) {
560 			if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
561 				return;
562 
563 			pgpm = mci->gpm_buf.bf_addr;
564 			offset = ar9003_mci_get_next_gpm_offset(ah, &more_data);
565 
566 			if (offset == MCI_GPM_INVALID)
567 				break;
568 
569 			pgpm += (offset >> 2);
570 
571 			/*
572 			 * The first dword is timer.
573 			 * The real data starts from 2nd dword.
574 			 */
575 			subtype = MCI_GPM_TYPE(pgpm);
576 			opcode = MCI_GPM_OPCODE(pgpm);
577 
578 			if (skip_gpm)
579 				goto recycle;
580 
581 			if (MCI_GPM_IS_CAL_TYPE(subtype)) {
582 				ath_mci_cal_msg(sc, subtype, (u8 *)pgpm);
583 			} else {
584 				switch (subtype) {
585 				case MCI_GPM_COEX_AGENT:
586 					ath_mci_msg(sc, opcode, (u8 *)pgpm);
587 					break;
588 				default:
589 					break;
590 				}
591 			}
592 		recycle:
593 			MCI_GPM_RECYCLE(pgpm);
594 		}
595 	}
596 
597 	if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
598 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
599 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
600 
601 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO)
602 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
603 
604 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
605 			int value_dbm = MS(mci_hw->cont_status,
606 					   AR_MCI_CONT_RSSI_POWER);
607 
608 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
609 
610 			ath_dbg(common, MCI,
611 				"MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
612 				MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
613 				"tx" : "rx",
614 				MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
615 				value_dbm);
616 		}
617 
618 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
619 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
620 
621 		if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
622 			mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
623 	}
624 
625 	if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
626 	    (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
627 		mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
628 			     AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
629 		ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
630 	}
631 }
632 
633 void ath_mci_enable(struct ath_softc *sc)
634 {
635 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
636 
637 	if (!common->btcoex_enabled)
638 		return;
639 
640 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
641 		sc->sc_ah->imask |= ATH9K_INT_MCI;
642 }
643 
644 void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
645 {
646 	struct ath_hw *ah = sc->sc_ah;
647 	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
648 	struct ath9k_channel *chan = ah->curchan;
649 	static const u32 channelmap[] = {
650 		0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff
651 	};
652 	int i;
653 	s16 chan_start, chan_end;
654 	u16 wlan_chan;
655 
656 	if (!chan || !IS_CHAN_2GHZ(chan))
657 		return;
658 
659 	if (allow_all)
660 		goto send_wlan_chan;
661 
662 	wlan_chan = chan->channel - 2402;
663 
664 	chan_start = wlan_chan - 10;
665 	chan_end = wlan_chan + 10;
666 
667 	if (IS_CHAN_HT40PLUS(chan))
668 		chan_end += 20;
669 	else if (IS_CHAN_HT40MINUS(chan))
670 		chan_start -= 20;
671 
672 	/* adjust side band */
673 	chan_start -= 7;
674 	chan_end += 7;
675 
676 	if (chan_start <= 0)
677 		chan_start = 0;
678 	if (chan_end >= ATH_MCI_NUM_BT_CHANNELS)
679 		chan_end = ATH_MCI_NUM_BT_CHANNELS - 1;
680 
681 	ath_dbg(ath9k_hw_common(ah), MCI,
682 		"WLAN current channel %d mask BT channel %d - %d\n",
683 		wlan_chan, chan_start, chan_end);
684 
685 	for (i = chan_start; i < chan_end; i++)
686 		MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i);
687 
688 send_wlan_chan:
689 	/* update and send wlan channels info to BT */
690 	for (i = 0; i < 4; i++)
691 		mci->wlan_channels[i] = channelmap[i];
692 	ar9003_mci_send_wlan_channels(ah);
693 	ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY);
694 }
695 
696 void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
697 			   bool concur_tx)
698 {
699 	struct ath_hw *ah = sc->sc_ah;
700 	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
701 	bool old_concur_tx = mci_hw->concur_tx;
702 
703 	if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) {
704 		mci_hw->concur_tx = false;
705 		return;
706 	}
707 
708 	if (!IS_CHAN_2GHZ(ah->curchan))
709 		return;
710 
711 	if (setchannel) {
712 		struct ath9k_hw_cal_data *caldata = &sc->cur_chan->caldata;
713 		if (IS_CHAN_HT40PLUS(ah->curchan) &&
714 		    (ah->curchan->channel > caldata->channel) &&
715 		    (ah->curchan->channel <= caldata->channel + 20))
716 			return;
717 		if (IS_CHAN_HT40MINUS(ah->curchan) &&
718 		    (ah->curchan->channel < caldata->channel) &&
719 		    (ah->curchan->channel >= caldata->channel - 20))
720 			return;
721 		mci_hw->concur_tx = false;
722 	} else
723 		mci_hw->concur_tx = concur_tx;
724 
725 	if (old_concur_tx != mci_hw->concur_tx)
726 		ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
727 }
728 
729 static void ath9k_mci_stomp_audio(struct ath_softc *sc)
730 {
731 	struct ath_hw *ah = sc->sc_ah;
732 	struct ath_btcoex *btcoex = &sc->btcoex;
733 	struct ath_mci_profile *mci = &btcoex->mci;
734 
735 	if (!mci->num_sco && !mci->num_a2dp)
736 		return;
737 
738 	if (ah->stats.avgbrssi > 25) {
739 		btcoex->stomp_audio = 0;
740 		return;
741 	}
742 
743 	btcoex->stomp_audio++;
744 }
745 void ath9k_mci_update_rssi(struct ath_softc *sc)
746 {
747 	struct ath_hw *ah = sc->sc_ah;
748 	struct ath_btcoex *btcoex = &sc->btcoex;
749 	struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
750 
751 	ath9k_mci_stomp_audio(sc);
752 
753 	if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX))
754 		return;
755 
756 	if (ah->stats.avgbrssi >= 40) {
757 		if (btcoex->rssi_count < 0)
758 			btcoex->rssi_count = 0;
759 		if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) {
760 			btcoex->rssi_count = 0;
761 			ath9k_mci_set_txpower(sc, false, true);
762 		}
763 	} else {
764 		if (btcoex->rssi_count > 0)
765 			btcoex->rssi_count = 0;
766 		if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) {
767 			btcoex->rssi_count = 0;
768 			ath9k_mci_set_txpower(sc, false, false);
769 		}
770 	}
771 }
772