1 /*
2  * Copyright (c) 2013 Qualcomm Atheros, Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10  * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
13  * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14  * PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "opt_ah.h"
18 
19 #include "ah.h"
20 #include "ah_internal.h"
21 
22 #include "ar9300/ar9300.h"
23 #include "ar9300/ar9300reg.h"
24 #include "ar9300/ar9300phy.h"
25 
26 /*
27  * Checks to see if an interrupt is pending on our NIC
28  *
29  * Returns: TRUE    if an interrupt is pending
30  *          FALSE   if not
31  */
32 HAL_BOOL
33 ar9300_is_interrupt_pending(struct ath_hal *ah)
34 {
35     u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
36     u_int32_t host_isr;
37 
38     /*
39      * Some platforms trigger our ISR before applying power to
40      * the card, so make sure.
41      */
42     host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
43     if ((host_isr & AR_INTR_ASYNC_USED) && (host_isr != AR_INTR_SPURIOUS)) {
44         return AH_TRUE;
45     }
46 
47     host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE));
48     if (AR_SREV_POSEIDON(ah)) {
49         sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
50     }
51     else if (AR_SREV_WASP(ah)) {
52         sync_en_def = AR9340_INTR_SYNC_DEFAULT;
53     }
54 
55     if ((host_isr & (sync_en_def | AR_INTR_SYNC_MASK_GPIO)) &&
56         (host_isr != AR_INTR_SPURIOUS)) {
57         return AH_TRUE;
58     }
59 
60     return AH_FALSE;
61 }
62 
63 /*
64  * Reads the Interrupt Status Register value from the NIC, thus deasserting
65  * the interrupt line, and returns both the masked and unmasked mapped ISR
66  * values.  The value returned is mapped to abstract the hw-specific bit
67  * locations in the Interrupt Status Register.
68  *
69  * Returns: A hardware-abstracted bitmap of all non-masked-out
70  *          interrupts pending, as well as an unmasked value
71  */
72 #define MAP_ISR_S2_HAL_CST          6 /* Carrier sense timeout */
73 #define MAP_ISR_S2_HAL_GTT          6 /* Global transmit timeout */
74 #define MAP_ISR_S2_HAL_TIM          3 /* TIM */
75 #define MAP_ISR_S2_HAL_CABEND       0 /* CABEND */
76 #define MAP_ISR_S2_HAL_DTIMSYNC     7 /* DTIMSYNC */
77 #define MAP_ISR_S2_HAL_DTIM         7 /* DTIM */
78 #define MAP_ISR_S2_HAL_TSFOOR       4 /* Rx TSF out of range */
79 #define MAP_ISR_S2_HAL_BBPANIC      6 /* Panic watchdog IRQ from BB */
80 HAL_BOOL
81 ar9300_get_pending_interrupts(
82     struct ath_hal *ah,
83     HAL_INT *masked,
84     HAL_INT_TYPE type,
85     u_int8_t msi,
86     HAL_BOOL nortc)
87 {
88     struct ath_hal_9300 *ahp = AH9300(ah);
89     HAL_BOOL  ret_val = AH_TRUE;
90     u_int32_t isr = 0;
91     u_int32_t mask2 = 0;
92     u_int32_t sync_cause = 0;
93     u_int32_t async_cause;
94     u_int32_t msi_pend_addr_mask = 0;
95     u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
96     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
97 
98     *masked = 0;
99 
100     if (!nortc) {
101         if (HAL_INT_MSI == type) {
102             if (msi == HAL_MSIVEC_RXHP) {
103                 OS_REG_WRITE(ah, AR_ISR, AR_ISR_HP_RXOK);
104                 *masked = HAL_INT_RXHP;
105                 goto end;
106             } else if (msi == HAL_MSIVEC_RXLP) {
107                 OS_REG_WRITE(ah, AR_ISR,
108                     (AR_ISR_LP_RXOK | AR_ISR_RXMINTR | AR_ISR_RXINTM));
109                 *masked = HAL_INT_RXLP;
110                 goto end;
111             } else if (msi == HAL_MSIVEC_TX) {
112                 OS_REG_WRITE(ah, AR_ISR, AR_ISR_TXOK);
113                 *masked = HAL_INT_TX;
114                 goto end;
115             } else if (msi == HAL_MSIVEC_MISC) {
116                 /*
117                  * For the misc MSI event fall through and determine the cause.
118                  */
119             }
120         }
121     }
122 
123     /* Make sure mac interrupt is pending in async interrupt cause register */
124     async_cause = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
125     if (async_cause & AR_INTR_ASYNC_USED) {
126         /*
127          * RTC may not be on since it runs on a slow 32khz clock
128          * so check its status to be sure
129          */
130         if (!nortc &&
131             (OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
132              AR_RTC_STATUS_ON)
133         {
134             isr = OS_REG_READ(ah, AR_ISR);
135         }
136     }
137 
138     if (AR_SREV_POSEIDON(ah)) {
139         sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
140     }
141     else if (AR_SREV_WASP(ah)) {
142         sync_en_def = AR9340_INTR_SYNC_DEFAULT;
143     }
144 
145     /* Store away the async and sync cause registers */
146     /* XXX Do this before the filtering done below */
147 #ifdef	AH_INTERRUPT_DEBUGGING
148 	ah->ah_intrstate[0] = OS_REG_READ(ah, AR_ISR);
149 	ah->ah_intrstate[1] = OS_REG_READ(ah, AR_ISR_S0);
150 	ah->ah_intrstate[2] = OS_REG_READ(ah, AR_ISR_S1);
151 	ah->ah_intrstate[3] = OS_REG_READ(ah, AR_ISR_S2);
152 	ah->ah_intrstate[4] = OS_REG_READ(ah, AR_ISR_S3);
153 	ah->ah_intrstate[5] = OS_REG_READ(ah, AR_ISR_S4);
154 	ah->ah_intrstate[6] = OS_REG_READ(ah, AR_ISR_S5);
155 
156 	/* XXX double reading? */
157 	ah->ah_syncstate = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE));
158 #endif
159 
160     sync_cause =
161         OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE)) &
162         (sync_en_def | AR_INTR_SYNC_MASK_GPIO);
163 
164     if (!isr && !sync_cause && !async_cause) {
165         ret_val = AH_FALSE;
166         goto end;
167     }
168 
169     HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
170         "%s: isr=0x%x, sync_cause=0x%x, async_cause=0x%x\n",
171 	__func__,
172 	isr,
173 	sync_cause,
174 	async_cause);
175 
176     if (isr) {
177         if (isr & AR_ISR_BCNMISC) {
178             u_int32_t isr2;
179             isr2 = OS_REG_READ(ah, AR_ISR_S2);
180 
181             /* Translate ISR bits to HAL values */
182             mask2 |= ((isr2 & AR_ISR_S2_TIM) >> MAP_ISR_S2_HAL_TIM);
183             mask2 |= ((isr2 & AR_ISR_S2_DTIM) >> MAP_ISR_S2_HAL_DTIM);
184             mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >> MAP_ISR_S2_HAL_DTIMSYNC);
185             mask2 |= ((isr2 & AR_ISR_S2_CABEND) >> MAP_ISR_S2_HAL_CABEND);
186             mask2 |= ((isr2 & AR_ISR_S2_GTT) << MAP_ISR_S2_HAL_GTT);
187             mask2 |= ((isr2 & AR_ISR_S2_CST) << MAP_ISR_S2_HAL_CST);
188             mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >> MAP_ISR_S2_HAL_TSFOOR);
189             mask2 |= ((isr2 & AR_ISR_S2_BBPANIC) >> MAP_ISR_S2_HAL_BBPANIC);
190 
191             if (!p_cap->halIsrRacSupport) {
192                 /*
193                  * EV61133 (missing interrupts due to ISR_RAC):
194                  * If not using ISR_RAC, clear interrupts by writing to ISR_S2.
195                  * This avoids a race condition where a new BCNMISC interrupt
196                  * could come in between reading the ISR and clearing the
197                  * interrupt via the primary ISR.  We therefore clear the
198                  * interrupt via the secondary, which avoids this race.
199                  */
200                 OS_REG_WRITE(ah, AR_ISR_S2, isr2);
201                 isr &= ~AR_ISR_BCNMISC;
202             }
203         }
204 
205         /* Use AR_ISR_RAC only if chip supports it.
206          * See EV61133 (missing interrupts due to ISR_RAC)
207          */
208         if (p_cap->halIsrRacSupport) {
209             isr = OS_REG_READ(ah, AR_ISR_RAC);
210         }
211         if (isr == 0xffffffff) {
212             *masked = 0;
213             ret_val = AH_FALSE;
214             goto end;
215         }
216 
217         *masked = isr & HAL_INT_COMMON;
218 
219         /*
220          * When interrupt mitigation is switched on, we fake a normal RX or TX
221          * interrupt when we received a mitigated interrupt. This way, the upper
222          * layer do not need to know about feature.
223          */
224         if (ahp->ah_intr_mitigation_rx) {
225             /* Only Rx interrupt mitigation. No Tx intr. mitigation. */
226             if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) {
227                 *masked |= HAL_INT_RXLP;
228             }
229         }
230         if (ahp->ah_intr_mitigation_tx) {
231             if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM)) {
232                 *masked |= HAL_INT_TX;
233             }
234         }
235 
236         if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR)) {
237             *masked |= HAL_INT_RXLP;
238         }
239         if (isr & AR_ISR_HP_RXOK) {
240             *masked |= HAL_INT_RXHP;
241         }
242         if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
243             *masked |= HAL_INT_TX;
244 
245             if (!p_cap->halIsrRacSupport) {
246                 u_int32_t s0, s1;
247                 /*
248                  * EV61133 (missing interrupts due to ISR_RAC):
249                  * If not using ISR_RAC, clear interrupts by writing to
250                  * ISR_S0/S1.
251                  * This avoids a race condition where a new interrupt
252                  * could come in between reading the ISR and clearing the
253                  * interrupt via the primary ISR.  We therefore clear the
254                  * interrupt via the secondary, which avoids this race.
255                  */
256                 s0 = OS_REG_READ(ah, AR_ISR_S0);
257                 OS_REG_WRITE(ah, AR_ISR_S0, s0);
258                 s1 = OS_REG_READ(ah, AR_ISR_S1);
259                 OS_REG_WRITE(ah, AR_ISR_S1, s1);
260 
261                 isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL);
262             }
263         }
264 
265         /*
266          * Do not treat receive overflows as fatal for owl.
267          */
268         if (isr & AR_ISR_RXORN) {
269 #if __PKT_SERIOUS_ERRORS__
270             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
271                 "%s: receive FIFO overrun interrupt\n", __func__);
272 #endif
273         }
274 
275 #if 0
276         /* XXX Verify if this is fixed for Osprey */
277         if (!p_cap->halAutoSleepSupport) {
278             u_int32_t isr5 = OS_REG_READ(ah, AR_ISR_S5_S);
279             if (isr5 & AR_ISR_S5_TIM_TIMER) {
280                 *masked |= HAL_INT_TIM_TIMER;
281             }
282         }
283 #endif
284         if (isr & AR_ISR_GENTMR) {
285             u_int32_t s5;
286 
287             if (p_cap->halIsrRacSupport) {
288                 /* Use secondary shadow registers if using ISR_RAC */
289                 s5 = OS_REG_READ(ah, AR_ISR_S5_S);
290             } else {
291                 s5 = OS_REG_READ(ah, AR_ISR_S5);
292             }
293             if (isr & AR_ISR_GENTMR) {
294 
295                 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
296                     "%s: GENTIMER, ISR_RAC=0x%x ISR_S2_S=0x%x\n", __func__,
297                     isr, s5);
298                 ahp->ah_intr_gen_timer_trigger =
299                     MS(s5, AR_ISR_S5_GENTIMER_TRIG);
300                 ahp->ah_intr_gen_timer_thresh =
301                     MS(s5, AR_ISR_S5_GENTIMER_THRESH);
302                 if (ahp->ah_intr_gen_timer_trigger) {
303                     *masked |= HAL_INT_GENTIMER;
304                 }
305             }
306             if (!p_cap->halIsrRacSupport) {
307                 /*
308                  * EV61133 (missing interrupts due to ISR_RAC):
309                  * If not using ISR_RAC, clear interrupts by writing to ISR_S5.
310                  * This avoids a race condition where a new interrupt
311                  * could come in between reading the ISR and clearing the
312                  * interrupt via the primary ISR.  We therefore clear the
313                  * interrupt via the secondary, which avoids this race.
314                  */
315                 OS_REG_WRITE(ah, AR_ISR_S5, s5);
316                 isr &= ~AR_ISR_GENTMR;
317             }
318         }
319 
320         *masked |= mask2;
321 
322         if (!p_cap->halIsrRacSupport) {
323             /*
324              * EV61133 (missing interrupts due to ISR_RAC):
325              * If not using ISR_RAC, clear the interrupts we've read by
326              * writing back ones in these locations to the primary ISR
327              * (except for interrupts that have a secondary isr register -
328              * see above).
329              */
330             OS_REG_WRITE(ah, AR_ISR, isr);
331 
332             /* Flush prior write */
333             (void) OS_REG_READ(ah, AR_ISR);
334         }
335 
336 #ifdef AH_SUPPORT_AR9300
337         if (*masked & HAL_INT_BBPANIC) {
338             ar9300_handle_bb_panic(ah);
339         }
340 #endif
341     }
342 
343     if (async_cause) {
344         if (nortc) {
345             OS_REG_WRITE(ah,
346                 AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR), async_cause);
347             /* Flush prior write */
348             (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR));
349         } else {
350 #ifdef ATH_GPIO_USE_ASYNC_CAUSE
351             if (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) {
352                 ahp->ah_gpio_cause = (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) >>
353                                      AR_INTR_ASYNC_ENABLE_GPIO_S;
354                 *masked |= HAL_INT_GPIO;
355             }
356 #endif
357         }
358 
359 #if ATH_SUPPORT_MCI
360         if ((async_cause & AR_INTR_ASYNC_CAUSE_MCI) &&
361             p_cap->halMciSupport)
362         {
363             u_int32_t int_raw, int_rx_msg;
364 
365             int_rx_msg = OS_REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
366             int_raw = OS_REG_READ(ah, AR_MCI_INTERRUPT_RAW);
367 
368             if ((int_raw == 0xdeadbeef) || (int_rx_msg == 0xdeadbeef))
369             {
370                 HALDEBUG(ah, HAL_DEBUG_BT_COEX,
371                     "(MCI) Get 0xdeadbeef during MCI int processing"
372                     "new int_raw=0x%08x, new rx_msg_raw=0x%08x, "
373                     "int_raw=0x%08x, rx_msg_raw=0x%08x\n",
374                     int_raw, int_rx_msg, ahp->ah_mci_int_raw,
375                     ahp->ah_mci_int_rx_msg);
376             }
377             else {
378                 if (ahp->ah_mci_int_raw || ahp->ah_mci_int_rx_msg) {
379                     ahp->ah_mci_int_rx_msg |= int_rx_msg;
380                     ahp->ah_mci_int_raw |= int_raw;
381                 }
382                 else {
383                     ahp->ah_mci_int_rx_msg = int_rx_msg;
384                     ahp->ah_mci_int_raw = int_raw;
385                 }
386 
387                 *masked |= HAL_INT_MCI;
388                 ahp->ah_mci_rx_status = OS_REG_READ(ah, AR_MCI_RX_STATUS);
389                 if (int_rx_msg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
390                     ahp->ah_mci_cont_status =
391                                     OS_REG_READ(ah, AR_MCI_CONT_STATUS);
392                 }
393                 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
394                     int_rx_msg);
395                 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, int_raw);
396 
397                 HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s:AR_INTR_SYNC_MCI\n", __func__);
398             }
399         }
400 #endif
401     }
402 
403     if (sync_cause) {
404         int host1_fatal, host1_perr, radm_cpl_timeout, local_timeout;
405 
406         host1_fatal = AR_SREV_WASP(ah) ?
407             AR9340_INTR_SYNC_HOST1_FATAL : AR9300_INTR_SYNC_HOST1_FATAL;
408         host1_perr = AR_SREV_WASP(ah) ?
409             AR9340_INTR_SYNC_HOST1_PERR : AR9300_INTR_SYNC_HOST1_PERR;
410         radm_cpl_timeout = AR_SREV_WASP(ah) ?
411             0x0 : AR9300_INTR_SYNC_RADM_CPL_TIMEOUT;
412         local_timeout = AR_SREV_WASP(ah) ?
413             AR9340_INTR_SYNC_LOCAL_TIMEOUT : AR9300_INTR_SYNC_LOCAL_TIMEOUT;
414 
415         if (sync_cause & host1_fatal) {
416 #if __PKT_SERIOUS_ERRORS__
417             HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
418                 "%s: received PCI FATAL interrupt\n", __func__);
419 #endif
420            *masked |= HAL_INT_FATAL; /* Set FATAL INT flag here;*/
421         }
422         if (sync_cause & host1_perr) {
423 #if __PKT_SERIOUS_ERRORS__
424             HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
425                 "%s: received PCI PERR interrupt\n", __func__);
426 #endif
427         }
428 
429         if (sync_cause & radm_cpl_timeout) {
430             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
431                 "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
432                 __func__);
433 
434             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), AR_RC_HOSTIF);
435             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), 0);
436             *masked |= HAL_INT_FATAL;
437         }
438         if (sync_cause & local_timeout) {
439             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
440                 "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
441                 __func__);
442         }
443 
444 #ifndef ATH_GPIO_USE_ASYNC_CAUSE
445         if (sync_cause & AR_INTR_SYNC_MASK_GPIO) {
446             ahp->ah_gpio_cause = (sync_cause & AR_INTR_SYNC_MASK_GPIO) >>
447                                  AR_INTR_SYNC_ENABLE_GPIO_S;
448             *masked |= HAL_INT_GPIO;
449             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
450                 "%s: AR_INTR_SYNC_GPIO\n", __func__);
451         }
452 #endif
453 
454         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR), sync_cause);
455         /* Flush prior write */
456         (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR));
457     }
458 
459 end:
460     if (HAL_INT_MSI == type) {
461         /*
462          * WAR for Bug EV#75887
463          * In normal case, SW read HOST_INTF_PCIE_MSI (0x40A4) and write
464          * into ah_msi_reg.  Then use value of ah_msi_reg to set bit#25
465          * when want to enable HW write the cfg_msi_pending.
466          * Sometimes, driver get MSI interrupt before read 0x40a4 and
467          * ah_msi_reg is initialization value (0x0).
468          * We don't know why "MSI interrupt earlier than driver read" now...
469          */
470         if (!ahp->ah_msi_reg) {
471             ahp->ah_msi_reg = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI));
472         }
473         if (AR_SREV_POSEIDON(ah)) {
474             msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
475         } else {
476             msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
477         }
478         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
479             ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
480 
481     }
482 
483     return ret_val;
484 }
485 
486 HAL_INT
487 ar9300_get_interrupts(struct ath_hal *ah)
488 {
489     return AH9300(ah)->ah_mask_reg;
490 }
491 
492 /*
493  * Atomically enables NIC interrupts.  Interrupts are passed in
494  * via the enumerated bitmask in ints.
495  */
496 HAL_INT
497 ar9300_set_interrupts(struct ath_hal *ah, HAL_INT ints, HAL_BOOL nortc)
498 {
499     struct ath_hal_9300 *ahp = AH9300(ah);
500     u_int32_t omask = ahp->ah_mask_reg;
501     u_int32_t mask, mask2, msi_mask = 0;
502     u_int32_t msi_pend_addr_mask = 0;
503     u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
504     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
505 
506     HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
507         "%s: 0x%x => 0x%x\n", __func__, omask, ints);
508 
509     if (omask & HAL_INT_GLOBAL) {
510         HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: disable IER\n", __func__);
511 
512         if (ah->ah_config.ath_hal_enable_msi) {
513             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE), 0);
514             /* flush write to HW */
515             (void)OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE));
516         }
517 
518         if (!nortc) {
519             OS_REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
520             (void) OS_REG_READ(ah, AR_IER);   /* flush write to HW */
521         }
522 
523         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE), 0);
524         /* flush write to HW */
525         (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE));
526         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), 0);
527         /* flush write to HW */
528         (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE));
529     }
530 
531     if (!nortc) {
532         /* reference count for global IER */
533         if (ints & HAL_INT_GLOBAL) {
534 #ifdef AH_DEBUG
535             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
536                 "%s: Request HAL_INT_GLOBAL ENABLED\n", __func__);
537 #if 0
538             if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0) {
539                 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
540                     "%s: WARNING: ah_ier_ref_count is 0 "
541                     "and attempting to enable IER\n",
542                     __func__);
543             }
544 #endif
545 #endif
546 #if 0
547             if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) > 0) {
548                 OS_ATOMIC_DEC(&ahp->ah_ier_ref_count);
549             }
550 #endif
551         } else {
552             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
553                 "%s: Request HAL_INT_GLOBAL DISABLED\n", __func__);
554             OS_ATOMIC_INC(&ahp->ah_ier_ref_count);
555         }
556         HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
557             "%s: ah_ier_ref_count = %d\n", __func__, ahp->ah_ier_ref_count);
558 
559         mask = ints & HAL_INT_COMMON;
560         mask2 = 0;
561         msi_mask = 0;
562 
563         if (ints & HAL_INT_TX) {
564             if (ahp->ah_intr_mitigation_tx) {
565                 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
566             } else if (ahp->ah_tx_ok_interrupt_mask) {
567                 mask |= AR_IMR_TXOK;
568             }
569             msi_mask |= AR_INTR_PRIO_TX;
570             if (ahp->ah_tx_err_interrupt_mask) {
571                 mask |= AR_IMR_TXERR;
572             }
573             if (ahp->ah_tx_eol_interrupt_mask) {
574                 mask |= AR_IMR_TXEOL;
575             }
576         }
577         if (ints & HAL_INT_RX) {
578             mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
579             if (ahp->ah_intr_mitigation_rx) {
580                 mask &= ~(AR_IMR_RXOK_LP);
581                 mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
582             } else {
583                 mask |= AR_IMR_RXOK_LP;
584             }
585             msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
586             if (! p_cap->halAutoSleepSupport) {
587                 mask |= AR_IMR_GENTMR;
588             }
589         }
590 
591         if (ints & (HAL_INT_BMISC)) {
592             mask |= AR_IMR_BCNMISC;
593             if (ints & HAL_INT_TIM) {
594                 mask2 |= AR_IMR_S2_TIM;
595             }
596             if (ints & HAL_INT_DTIM) {
597                 mask2 |= AR_IMR_S2_DTIM;
598             }
599             if (ints & HAL_INT_DTIMSYNC) {
600                 mask2 |= AR_IMR_S2_DTIMSYNC;
601             }
602             if (ints & HAL_INT_CABEND) {
603                 mask2 |= (AR_IMR_S2_CABEND);
604             }
605             if (ints & HAL_INT_TSFOOR) {
606                 mask2 |= AR_IMR_S2_TSFOOR;
607             }
608         }
609 
610         if (ints & (HAL_INT_GTT | HAL_INT_CST)) {
611             mask |= AR_IMR_BCNMISC;
612             if (ints & HAL_INT_GTT) {
613                 mask2 |= AR_IMR_S2_GTT;
614             }
615             if (ints & HAL_INT_CST) {
616                 mask2 |= AR_IMR_S2_CST;
617             }
618         }
619 
620         if (ints & HAL_INT_BBPANIC) {
621             /* EV92527 - MAC secondary interrupt must enable AR_IMR_BCNMISC */
622             mask |= AR_IMR_BCNMISC;
623             mask2 |= AR_IMR_S2_BBPANIC;
624         }
625 
626         if (ints & HAL_INT_GENTIMER) {
627             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
628                 "%s: enabling gen timer\n", __func__);
629             mask |= AR_IMR_GENTMR;
630         }
631 
632         /* Write the new IMR and store off our SW copy. */
633         HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: new IMR 0x%x\n", __func__, mask);
634         OS_REG_WRITE(ah, AR_IMR, mask);
635         ahp->ah_mask2Reg &= ~(AR_IMR_S2_TIM |
636                         AR_IMR_S2_DTIM |
637                         AR_IMR_S2_DTIMSYNC |
638                         AR_IMR_S2_CABEND |
639                         AR_IMR_S2_CABTO  |
640                         AR_IMR_S2_TSFOOR |
641                         AR_IMR_S2_GTT |
642                         AR_IMR_S2_CST |
643                         AR_IMR_S2_BBPANIC);
644         ahp->ah_mask2Reg |= mask2;
645         OS_REG_WRITE(ah, AR_IMR_S2, ahp->ah_mask2Reg );
646         ahp->ah_mask_reg = ints;
647 
648         if (! p_cap->halAutoSleepSupport) {
649             if (ints & HAL_INT_TIM_TIMER) {
650                 OS_REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
651             }
652             else {
653                 OS_REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
654             }
655         }
656     }
657 
658     /* Re-enable interrupts if they were enabled before. */
659 #if HAL_INTR_REFCOUNT_DISABLE
660     if ((ints & HAL_INT_GLOBAL)) {
661 #else
662     if ((ints & HAL_INT_GLOBAL) && (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0)) {
663 #endif
664         HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: enable IER\n", __func__);
665 
666         if (!nortc) {
667             OS_REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
668         }
669 
670         mask = AR_INTR_MAC_IRQ;
671 #ifdef ATH_GPIO_USE_ASYNC_CAUSE
672         if (ints & HAL_INT_GPIO) {
673             if (ahp->ah_gpio_mask) {
674                 mask |= SM(ahp->ah_gpio_mask, AR_INTR_ASYNC_MASK_GPIO);
675             }
676         }
677 #endif
678 
679 #if ATH_SUPPORT_MCI
680         if (ints & HAL_INT_MCI) {
681             mask |= AR_INTR_ASYNC_MASK_MCI;
682         }
683 #endif
684 
685         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), mask);
686         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_MASK), mask);
687 
688         if (ah->ah_config.ath_hal_enable_msi) {
689             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE),
690                 msi_mask);
691             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_MASK),
692                 msi_mask);
693             if (AR_SREV_POSEIDON(ah)) {
694                 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
695             } else {
696                 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
697             }
698             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
699                 ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
700         }
701 
702         /*
703          * debug - enable to see all synchronous interrupts status
704          * Enable synchronous GPIO interrupts as well, since some async
705          * GPIO interrupts don't wake the chip up.
706          */
707         mask = 0;
708 #ifndef ATH_GPIO_USE_ASYNC_CAUSE
709         if (ints & HAL_INT_GPIO) {
710             mask |= SM(ahp->ah_gpio_mask, AR_INTR_SYNC_MASK_GPIO);
711         }
712 #endif
713         if (AR_SREV_POSEIDON(ah)) {
714             sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
715         }
716         else if (AR_SREV_WASP(ah)) {
717             sync_en_def = AR9340_INTR_SYNC_DEFAULT;
718         }
719 
720         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE),
721             (sync_en_def | mask));
722         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_MASK),
723             (sync_en_def | mask));
724 
725         HALDEBUG(ah,  HAL_DEBUG_INTERRUPT,
726             "AR_IMR 0x%x IER 0x%x\n",
727             OS_REG_READ(ah, AR_IMR), OS_REG_READ(ah, AR_IER));
728     }
729 
730     return omask;
731 }
732 
733 void
734 ar9300_set_intr_mitigation_timer(
735     struct ath_hal* ah,
736     HAL_INT_MITIGATION reg,
737     u_int32_t value)
738 {
739 #ifdef AR5416_INT_MITIGATION
740     switch (reg) {
741     case HAL_INT_THRESHOLD:
742         OS_REG_WRITE(ah, AR_MIRT, 0);
743         break;
744     case HAL_INT_RX_LASTPKT:
745         OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, value);
746         break;
747     case HAL_INT_RX_FIRSTPKT:
748         OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, value);
749         break;
750     case HAL_INT_TX_LASTPKT:
751         OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, value);
752         break;
753     case HAL_INT_TX_FIRSTPKT:
754         OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, value);
755         break;
756     default:
757         break;
758     }
759 #endif
760 }
761 
762 u_int32_t
763 ar9300_get_intr_mitigation_timer(struct ath_hal* ah, HAL_INT_MITIGATION reg)
764 {
765     u_int32_t val = 0;
766 #ifdef AR5416_INT_MITIGATION
767     switch (reg) {
768     case HAL_INT_THRESHOLD:
769         val = OS_REG_READ(ah, AR_MIRT);
770         break;
771     case HAL_INT_RX_LASTPKT:
772         val = OS_REG_READ(ah, AR_RIMT) & 0xFFFF;
773         break;
774     case HAL_INT_RX_FIRSTPKT:
775         val = OS_REG_READ(ah, AR_RIMT) >> 16;
776         break;
777     case HAL_INT_TX_LASTPKT:
778         val = OS_REG_READ(ah, AR_TIMT) & 0xFFFF;
779         break;
780     case HAL_INT_TX_FIRSTPKT:
781         val = OS_REG_READ(ah, AR_TIMT) >> 16;
782         break;
783     default:
784         break;
785     }
786 #endif
787     return val;
788 }
789