1 /*
2 * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
3 * Copyright (c) 2002-2008 Atheros Communications, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 * $Id: ar5212_xmit.c,v 1.4 2021/04/13 03:27:13 mrg Exp $
18 */
19 #include "opt_ah.h"
20
21 #include "ah.h"
22 #include "ah_internal.h"
23
24 #include "ar5212/ar5212.h"
25 #include "ar5212/ar5212reg.h"
26 #include "ar5212/ar5212desc.h"
27 #include "ar5212/ar5212phy.h"
28 #ifdef AH_SUPPORT_5311
29 #include "ar5212/ar5311reg.h"
30 #endif
31
32 #ifdef AH_NEED_DESC_SWAP
33 static void ar5212SwapTxDesc(struct ath_desc *ds);
34 #endif
35
36 /*
37 * Update Tx FIFO trigger level.
38 *
39 * Set bIncTrigLevel to TRUE to increase the trigger level.
40 * Set bIncTrigLevel to FALSE to decrease the trigger level.
41 *
42 * Returns TRUE if the trigger level was updated
43 */
44 HAL_BOOL
ar5212UpdateTxTrigLevel(struct ath_hal * ah,HAL_BOOL bIncTrigLevel)45 ar5212UpdateTxTrigLevel(struct ath_hal *ah, HAL_BOOL bIncTrigLevel)
46 {
47 struct ath_hal_5212 *ahp = AH5212(ah);
48 uint32_t txcfg, curLevel, newLevel;
49 HAL_INT omask;
50
51 if (ahp->ah_txTrigLev >= ahp->ah_maxTxTrigLev)
52 return AH_FALSE;
53
54 /*
55 * Disable interrupts while futzing with the fifo level.
56 */
57 omask = ath_hal_setInterrupts(ah, ahp->ah_maskReg &~ HAL_INT_GLOBAL);
58
59 txcfg = OS_REG_READ(ah, AR_TXCFG);
60 curLevel = MS(txcfg, AR_FTRIG);
61 newLevel = curLevel;
62 if (bIncTrigLevel) { /* increase the trigger level */
63 if (curLevel < ahp->ah_maxTxTrigLev)
64 newLevel++;
65 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
66 newLevel--;
67 if (newLevel != curLevel)
68 /* Update the trigger level */
69 OS_REG_WRITE(ah, AR_TXCFG,
70 (txcfg &~ AR_FTRIG) | SM(newLevel, AR_FTRIG));
71
72 ahp->ah_txTrigLev = newLevel;
73
74 /* re-enable chip interrupts */
75 ath_hal_setInterrupts(ah, omask);
76
77 return (newLevel != curLevel);
78 }
79
80 /*
81 * Set the properties of the tx queue with the parameters
82 * from qInfo.
83 */
84 HAL_BOOL
ar5212SetTxQueueProps(struct ath_hal * ah,int q,const HAL_TXQ_INFO * qInfo)85 ar5212SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo)
86 {
87 struct ath_hal_5212 *ahp = AH5212(ah);
88 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
89
90 if (q >= pCap->halTotalQueues) {
91 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
92 __func__, q);
93 return AH_FALSE;
94 }
95 return ath_hal_setTxQProps(ah, &ahp->ah_txq[q], qInfo);
96 }
97
98 /*
99 * Return the properties for the specified tx queue.
100 */
101 HAL_BOOL
ar5212GetTxQueueProps(struct ath_hal * ah,int q,HAL_TXQ_INFO * qInfo)102 ar5212GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo)
103 {
104 struct ath_hal_5212 *ahp = AH5212(ah);
105 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
106
107
108 if (q >= pCap->halTotalQueues) {
109 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
110 __func__, q);
111 return AH_FALSE;
112 }
113 return ath_hal_getTxQProps(ah, qInfo, &ahp->ah_txq[q]);
114 }
115
116 /*
117 * Allocate and initialize a tx DCU/QCU combination.
118 */
119 int
ar5212SetupTxQueue(struct ath_hal * ah,HAL_TX_QUEUE type,const HAL_TXQ_INFO * qInfo)120 ar5212SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
121 const HAL_TXQ_INFO *qInfo)
122 {
123 struct ath_hal_5212 *ahp = AH5212(ah);
124 HAL_TX_QUEUE_INFO *qi;
125 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
126 int q, defqflags;
127
128 /* by default enable OK+ERR+DESC+URN interrupts */
129 defqflags = HAL_TXQ_TXOKINT_ENABLE
130 | HAL_TXQ_TXERRINT_ENABLE
131 | HAL_TXQ_TXDESCINT_ENABLE
132 | HAL_TXQ_TXURNINT_ENABLE;
133 /* XXX move queue assignment to driver */
134 switch (type) {
135 case HAL_TX_QUEUE_BEACON:
136 q = pCap->halTotalQueues-1; /* highest priority */
137 defqflags |= HAL_TXQ_DBA_GATED
138 | HAL_TXQ_CBR_DIS_QEMPTY
139 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
140 | HAL_TXQ_BACKOFF_DISABLE;
141 break;
142 case HAL_TX_QUEUE_CAB:
143 q = pCap->halTotalQueues-2; /* next highest priority */
144 defqflags |= HAL_TXQ_DBA_GATED
145 | HAL_TXQ_CBR_DIS_QEMPTY
146 | HAL_TXQ_CBR_DIS_BEMPTY
147 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
148 | HAL_TXQ_BACKOFF_DISABLE;
149 break;
150 case HAL_TX_QUEUE_UAPSD:
151 q = pCap->halTotalQueues-3; /* nextest highest priority */
152 if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
153 HALDEBUG(ah, HAL_DEBUG_ANY,
154 "%s: no available UAPSD tx queue\n", __func__);
155 return -1;
156 }
157 break;
158 case HAL_TX_QUEUE_DATA:
159 for (q = 0; q < pCap->halTotalQueues; q++)
160 if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
161 break;
162 if (q == pCap->halTotalQueues) {
163 HALDEBUG(ah, HAL_DEBUG_ANY,
164 "%s: no available tx queue\n", __func__);
165 return -1;
166 }
167 break;
168 default:
169 HALDEBUG(ah, HAL_DEBUG_ANY,
170 "%s: bad tx queue type %u\n", __func__, type);
171 return -1;
172 }
173
174 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
175
176 qi = &ahp->ah_txq[q];
177 if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
178 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
179 __func__, q);
180 return -1;
181 }
182 OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
183 qi->tqi_type = type;
184 if (qInfo == AH_NULL) {
185 qi->tqi_qflags = defqflags;
186 qi->tqi_aifs = INIT_AIFS;
187 qi->tqi_cwmin = HAL_TXQ_USEDEFAULT; /* NB: do at reset */
188 qi->tqi_cwmax = INIT_CWMAX;
189 qi->tqi_shretry = INIT_SH_RETRY;
190 qi->tqi_lgretry = INIT_LG_RETRY;
191 qi->tqi_physCompBuf = 0;
192 } else {
193 qi->tqi_physCompBuf = qInfo->tqi_compBuf;
194 (void) ar5212SetTxQueueProps(ah, q, qInfo);
195 }
196 /* NB: must be followed by ar5212ResetTxQueue */
197 return q;
198 }
199
200 /*
201 * Update the h/w interrupt registers to reflect a tx q's configuration.
202 */
203 static void
setTxQInterrupts(struct ath_hal * ah,HAL_TX_QUEUE_INFO * qi)204 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
205 {
206 struct ath_hal_5212 *ahp = AH5212(ah);
207
208 HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
209 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
210 ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
211 ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
212 ahp->ah_txUrnInterruptMask);
213
214 OS_REG_WRITE(ah, AR_IMR_S0,
215 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
216 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
217 );
218 OS_REG_WRITE(ah, AR_IMR_S1,
219 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
220 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
221 );
222 OS_REG_RMW_FIELD(ah, AR_IMR_S2,
223 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
224 }
225
226 /*
227 * Free a tx DCU/QCU combination.
228 */
229 HAL_BOOL
ar5212ReleaseTxQueue(struct ath_hal * ah,u_int q)230 ar5212ReleaseTxQueue(struct ath_hal *ah, u_int q)
231 {
232 struct ath_hal_5212 *ahp = AH5212(ah);
233 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
234 HAL_TX_QUEUE_INFO *qi;
235
236 if (q >= pCap->halTotalQueues) {
237 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
238 __func__, q);
239 return AH_FALSE;
240 }
241 qi = &ahp->ah_txq[q];
242 if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
243 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
244 __func__, q);
245 return AH_FALSE;
246 }
247
248 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: release queue %u\n", __func__, q);
249
250 qi->tqi_type = HAL_TX_QUEUE_INACTIVE;
251 ahp->ah_txOkInterruptMask &= ~(1 << q);
252 ahp->ah_txErrInterruptMask &= ~(1 << q);
253 ahp->ah_txDescInterruptMask &= ~(1 << q);
254 ahp->ah_txEolInterruptMask &= ~(1 << q);
255 ahp->ah_txUrnInterruptMask &= ~(1 << q);
256 setTxQInterrupts(ah, qi);
257
258 return AH_TRUE;
259 }
260
261 /*
262 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
263 * Assumes:
264 * phwChannel has been set to point to the current channel
265 */
266 HAL_BOOL
ar5212ResetTxQueue(struct ath_hal * ah,u_int q)267 ar5212ResetTxQueue(struct ath_hal *ah, u_int q)
268 {
269 struct ath_hal_5212 *ahp = AH5212(ah);
270 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
271 HAL_CHANNEL_INTERNAL *chan = AH_PRIVATE(ah)->ah_curchan;
272 HAL_TX_QUEUE_INFO *qi;
273 uint32_t cwMin, chanCwMin, value, qmisc, dmisc;
274
275 if (q >= pCap->halTotalQueues) {
276 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
277 __func__, q);
278 return AH_FALSE;
279 }
280 qi = &ahp->ah_txq[q];
281 if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
282 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
283 __func__, q);
284 return AH_TRUE; /* XXX??? */
285 }
286
287 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
288
289 if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
290 /*
291 * Select cwmin according to channel type.
292 * NB: chan can be NULL during attach
293 */
294 if (chan && IS_CHAN_B(chan))
295 chanCwMin = INIT_CWMIN_11B;
296 else
297 chanCwMin = INIT_CWMIN;
298 /* make sure that the CWmin is of the form (2^n - 1) */
299 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
300 ;
301 } else
302 cwMin = qi->tqi_cwmin;
303
304 /* set cwMin/Max and AIFS values */
305 OS_REG_WRITE(ah, AR_DLCL_IFS(q),
306 SM(cwMin, AR_D_LCL_IFS_CWMIN)
307 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
308 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
309
310 /* Set retry limit values */
311 OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
312 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
313 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
314 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
315 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
316 );
317
318 /* NB: always enable early termination on the QCU */
319 qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
320 | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
321
322 /* NB: always enable DCU to wait for next fragment from QCU */
323 dmisc = AR_D_MISC_FRAG_WAIT_EN;
324
325 #ifdef AH_SUPPORT_5311
326 if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU) {
327 /* Configure DCU to use the global sequence count */
328 dmisc |= AR5311_D_MISC_SEQ_NUM_CONTROL;
329 }
330 #endif
331 /* multiqueue support */
332 if (qi->tqi_cbrPeriod) {
333 OS_REG_WRITE(ah, AR_QCBRCFG(q),
334 SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
335 | SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
336 qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
337 if (qi->tqi_cbrOverflowLimit)
338 qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
339 }
340 if (qi->tqi_readyTime) {
341 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
342 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
343 | AR_Q_RDYTIMECFG_ENA);
344 }
345
346 OS_REG_WRITE(ah, AR_DCHNTIME(q),
347 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
348 | (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
349
350 if (qi->tqi_readyTime &&
351 (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
352 qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
353 if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
354 qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
355 if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
356 /*
357 * These are meangingful only when not scheduled asap.
358 */
359 if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
360 qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
361 else
362 qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
363 if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
364 qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
365 else
366 qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
367 }
368
369 if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
370 dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
371 if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
372 dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
373 if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
374 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
375 AR_D_MISC_ARB_LOCKOUT_CNTRL);
376 else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
377 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
378 AR_D_MISC_ARB_LOCKOUT_CNTRL);
379 if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
380 dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
381 AR_D_MISC_VIR_COL_HANDLING);
382 if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
383 dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
384
385 /*
386 * Fillin type-dependent bits. Most of this can be
387 * removed by specifying the queue parameters in the
388 * driver; it's here for backwards compatibility.
389 */
390 switch (qi->tqi_type) {
391 case HAL_TX_QUEUE_BEACON: /* beacon frames */
392 qmisc |= AR_Q_MISC_FSP_DBA_GATED
393 | AR_Q_MISC_BEACON_USE
394 | AR_Q_MISC_CBR_INCR_DIS1;
395
396 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
397 AR_D_MISC_ARB_LOCKOUT_CNTRL)
398 | AR_D_MISC_BEACON_USE
399 | AR_D_MISC_POST_FR_BKOFF_DIS;
400 break;
401 case HAL_TX_QUEUE_CAB: /* CAB frames */
402 /*
403 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
404 * There is an issue with the CAB Queue
405 * not properly refreshing the Tx descriptor if
406 * the TXE clear setting is used.
407 */
408 qmisc |= AR_Q_MISC_FSP_DBA_GATED
409 | AR_Q_MISC_CBR_INCR_DIS1
410 | AR_Q_MISC_CBR_INCR_DIS0;
411
412 if (!qi->tqi_readyTime) {
413 /*
414 * NB: don't set default ready time if driver
415 * has explicitly specified something. This is
416 * here solely for backwards compatibility.
417 */
418 value = (ahp->ah_beaconInterval
419 - (ath_hal_sw_beacon_response_time -
420 ath_hal_dma_beacon_response_time)
421 - ath_hal_additional_swba_backoff) * 1024;
422 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q), value | AR_Q_RDYTIMECFG_ENA);
423 }
424 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
425 AR_D_MISC_ARB_LOCKOUT_CNTRL);
426 break;
427 default: /* NB: silence compiler */
428 break;
429 }
430
431 OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
432 OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
433
434 /* Setup compression scratchpad buffer */
435 /*
436 * XXX: calling this asynchronously to queue operation can
437 * cause unexpected behavior!!!
438 */
439 if (qi->tqi_physCompBuf) {
440 HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
441 qi->tqi_type == HAL_TX_QUEUE_UAPSD);
442 OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
443 OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
444 OS_REG_WRITE(ah, AR_Q_CBC, HAL_COMP_BUF_MAX_SIZE/1024);
445 OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
446 OS_REG_READ(ah, AR_Q0_MISC + 4*q)
447 | AR_Q_MISC_QCU_COMP_EN);
448 }
449
450 /*
451 * Always update the secondary interrupt mask registers - this
452 * could be a new queue getting enabled in a running system or
453 * hw getting re-initialized during a reset!
454 *
455 * Since we don't differentiate between tx interrupts corresponding
456 * to individual queues - secondary tx mask regs are always unmasked;
457 * tx interrupts are enabled/disabled for all queues collectively
458 * using the primary mask reg
459 */
460 if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
461 ahp->ah_txOkInterruptMask |= 1 << q;
462 else
463 ahp->ah_txOkInterruptMask &= ~(1 << q);
464 if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
465 ahp->ah_txErrInterruptMask |= 1 << q;
466 else
467 ahp->ah_txErrInterruptMask &= ~(1 << q);
468 if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
469 ahp->ah_txDescInterruptMask |= 1 << q;
470 else
471 ahp->ah_txDescInterruptMask &= ~(1 << q);
472 if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
473 ahp->ah_txEolInterruptMask |= 1 << q;
474 else
475 ahp->ah_txEolInterruptMask &= ~(1 << q);
476 if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
477 ahp->ah_txUrnInterruptMask |= 1 << q;
478 else
479 ahp->ah_txUrnInterruptMask &= ~(1 << q);
480 setTxQInterrupts(ah, qi);
481
482 return AH_TRUE;
483 }
484
485 /*
486 * Get the TXDP for the specified queue
487 */
488 uint32_t
ar5212GetTxDP(struct ath_hal * ah,u_int q)489 ar5212GetTxDP(struct ath_hal *ah, u_int q)
490 {
491 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
492 return OS_REG_READ(ah, AR_QTXDP(q));
493 }
494
495 /*
496 * Set the TxDP for the specified queue
497 */
498 HAL_BOOL
ar5212SetTxDP(struct ath_hal * ah,u_int q,uint32_t txdp)499 ar5212SetTxDP(struct ath_hal *ah, u_int q, uint32_t txdp)
500 {
501 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
502 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
503
504 /*
505 * Make sure that TXE is deasserted before setting the TXDP. If TXE
506 * is still asserted, setting TXDP will have no effect.
507 */
508 HALASSERT((OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) == 0);
509
510 OS_REG_WRITE(ah, AR_QTXDP(q), txdp);
511
512 return AH_TRUE;
513 }
514
515 /*
516 * Set Transmit Enable bits for the specified queue
517 */
518 HAL_BOOL
ar5212StartTxDma(struct ath_hal * ah,u_int q)519 ar5212StartTxDma(struct ath_hal *ah, u_int q)
520 {
521 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
522
523 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
524
525 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
526
527 /* Check to be sure we're not enabling a q that has its TXD bit set. */
528 HALASSERT((OS_REG_READ(ah, AR_Q_TXD) & (1 << q)) == 0);
529
530 OS_REG_WRITE(ah, AR_Q_TXE, 1 << q);
531 return AH_TRUE;
532 }
533
534 /*
535 * Return the number of pending frames or 0 if the specified
536 * queue is stopped.
537 */
538 uint32_t
ar5212NumTxPending(struct ath_hal * ah,u_int q)539 ar5212NumTxPending(struct ath_hal *ah, u_int q)
540 {
541 uint32_t npend;
542
543 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
544 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
545
546 npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
547 if (npend == 0) {
548 /*
549 * Pending frame count (PFC) can momentarily go to zero
550 * while TXE remains asserted. In other words a PFC of
551 * zero is not sufficient to say that the queue has stopped.
552 */
553 if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q))
554 npend = 1; /* arbitrarily return 1 */
555 }
556 return npend;
557 }
558
559 /*
560 * Stop transmit on the specified queue
561 */
562 HAL_BOOL
ar5212StopTxDma(struct ath_hal * ah,u_int q)563 ar5212StopTxDma(struct ath_hal *ah, u_int q)
564 {
565 u_int i;
566 u_int wait;
567
568 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
569
570 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
571
572 OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
573 for (i = 1000; i != 0; i--) {
574 if (ar5212NumTxPending(ah, q) == 0)
575 break;
576 OS_DELAY(100); /* XXX get actual value */
577 }
578 #ifdef AH_DEBUG
579 if (i == 0) {
580 HALDEBUG(ah, HAL_DEBUG_ANY,
581 "%s: queue %u DMA did not stop in 100 msec\n", __func__, q);
582 HALDEBUG(ah, HAL_DEBUG_ANY,
583 "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
584 OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
585 OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
586 HALDEBUG(ah, HAL_DEBUG_ANY,
587 "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
588 __func__, OS_REG_READ(ah, AR_QMISC(q)),
589 OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
590 OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
591 }
592 #endif /* AH_DEBUG */
593
594 /* 2413+ and up can kill packets at the PCU level */
595 if (ar5212NumTxPending(ah, q) &&
596 (IS_2413(ah) || IS_5413(ah) || IS_2425(ah) || IS_2417(ah))) {
597 uint32_t tsfLow, j;
598
599 HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
600 "%s: Num of pending TX Frames %d on Q %d\n",
601 __func__, ar5212NumTxPending(ah, q), q);
602
603 /* Kill last PCU Tx Frame */
604 /* TODO - save off and restore current values of Q1/Q2? */
605 for (j = 0; j < 2; j++) {
606 tsfLow = OS_REG_READ(ah, AR_TSF_L32);
607 OS_REG_WRITE(ah, AR_QUIET2, SM(100, AR_QUIET2_QUIET_PER) |
608 SM(10, AR_QUIET2_QUIET_DUR));
609 OS_REG_WRITE(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE |
610 SM(tsfLow >> 10, AR_QUIET1_NEXT_QUIET));
611 if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) {
612 break;
613 }
614 HALDEBUG(ah, HAL_DEBUG_ANY,
615 "%s: TSF moved while trying to set quiet time "
616 "TSF: 0x%08x\n", __func__, tsfLow);
617 HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
618 }
619
620 OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
621
622 /* Allow the quiet mechanism to do its work */
623 OS_DELAY(200);
624 OS_REG_CLR_BIT(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE);
625
626 /* Give at least 1 millisec more to wait */
627 wait = 100;
628
629 /* Verify all transmit is dead */
630 while (ar5212NumTxPending(ah, q)) {
631 if ((--wait) == 0) {
632 HALDEBUG(ah, HAL_DEBUG_ANY,
633 "%s: Failed to stop Tx DMA in %d msec after killing last frame\n",
634 __func__, wait);
635 break;
636 }
637 OS_DELAY(10);
638 }
639
640 OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
641 }
642
643 OS_REG_WRITE(ah, AR_Q_TXD, 0);
644 return (i != 0);
645 }
646
647 /*
648 * Descriptor Access Functions
649 */
650
651 #define VALID_PKT_TYPES \
652 ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
653 (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
654 (1<<HAL_PKT_TYPE_BEACON))
655 #define isValidPktType(_t) ((1<<(_t)) & VALID_PKT_TYPES)
656 #define VALID_TX_RATES \
657 ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
658 (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
659 (1<<0x1d)|(1<<0x18)|(1<<0x1c))
660 #define isValidTxRate(_r) ((1<<(_r)) & VALID_TX_RATES)
661
662 HAL_BOOL
ar5212SetupTxDesc(struct ath_hal * ah,struct ath_desc * ds,u_int pktLen,u_int hdrLen,HAL_PKT_TYPE type,u_int txPower,u_int txRate0,u_int txTries0,u_int keyIx,u_int antMode,u_int flags,u_int rtsctsRate,u_int rtsctsDuration,u_int compicvLen,u_int compivLen,u_int comp)663 ar5212SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
664 u_int pktLen,
665 u_int hdrLen,
666 HAL_PKT_TYPE type,
667 u_int txPower,
668 u_int txRate0, u_int txTries0,
669 u_int keyIx,
670 u_int antMode,
671 u_int flags,
672 u_int rtsctsRate,
673 u_int rtsctsDuration,
674 u_int compicvLen,
675 u_int compivLen,
676 u_int comp)
677 {
678 #define RTSCTS (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
679 struct ar5212_desc *ads = AR5212DESC(ds);
680 struct ath_hal_5212 *ahp = AH5212(ah);
681
682 (void) hdrLen;
683
684 HALASSERT(txTries0 != 0);
685 HALASSERT(isValidPktType(type));
686 HALASSERT(isValidTxRate(txRate0));
687 HALASSERT((flags & RTSCTS) != RTSCTS);
688 /* XXX validate antMode */
689
690 txPower = (txPower + ahp->ah_txPowerIndexOffset );
691 if(txPower > 63) txPower=63;
692
693 ads->ds_ctl0 = (pktLen & AR_FrameLen)
694 | (txPower << AR_XmitPower_S)
695 | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
696 | (flags & HAL_TXDESC_CLRDMASK ? AR_ClearDestMask : 0)
697 | SM(antMode, AR_AntModeXmit)
698 | (flags & HAL_TXDESC_INTREQ ? AR_TxInterReq : 0)
699 ;
700 ads->ds_ctl1 = (type << AR_FrmType_S)
701 | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
702 | (comp << AR_CompProc_S)
703 | (compicvLen << AR_CompICVLen_S)
704 | (compivLen << AR_CompIVLen_S)
705 ;
706 ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
707 | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEna : 0)
708 ;
709 ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
710 ;
711 if (keyIx != HAL_TXKEYIX_INVALID) {
712 /* XXX validate key index */
713 ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
714 ads->ds_ctl0 |= AR_DestIdxValid;
715 }
716 if (flags & RTSCTS) {
717 if (!isValidTxRate(rtsctsRate)) {
718 HALDEBUG(ah, HAL_DEBUG_ANY,
719 "%s: invalid rts/cts rate 0x%x\n",
720 __func__, rtsctsRate);
721 return AH_FALSE;
722 }
723 /* XXX validate rtsctsDuration */
724 ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
725 | (flags & HAL_TXDESC_RTSENA ? AR_RTSCTSEnable : 0)
726 ;
727 ads->ds_ctl2 |= SM(rtsctsDuration, AR_RTSCTSDuration);
728 ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S);
729 }
730 return AH_TRUE;
731 #undef RTSCTS
732 }
733
734 HAL_BOOL
ar5212SetupXTxDesc(struct ath_hal * ah,struct ath_desc * ds,u_int txRate1,u_int txTries1,u_int txRate2,u_int txTries2,u_int txRate3,u_int txTries3)735 ar5212SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
736 u_int txRate1, u_int txTries1,
737 u_int txRate2, u_int txTries2,
738 u_int txRate3, u_int txTries3)
739 {
740 struct ar5212_desc *ads = AR5212DESC(ds);
741
742 if (txTries1) {
743 HALASSERT(isValidTxRate(txRate1));
744 ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1)
745 | AR_DurUpdateEna
746 ;
747 ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
748 }
749 if (txTries2) {
750 HALASSERT(isValidTxRate(txRate2));
751 ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2)
752 | AR_DurUpdateEna
753 ;
754 ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
755 }
756 if (txTries3) {
757 HALASSERT(isValidTxRate(txRate3));
758 ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3)
759 | AR_DurUpdateEna
760 ;
761 ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
762 }
763 return AH_TRUE;
764 }
765
766 void
ar5212IntrReqTxDesc(struct ath_hal * ah,struct ath_desc * ds)767 ar5212IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *ds)
768 {
769 struct ar5212_desc *ads = AR5212DESC(ds);
770
771 #ifdef AH_NEED_DESC_SWAP
772 ads->ds_ctl0 |= __bswap32(AR_TxInterReq);
773 #else
774 ads->ds_ctl0 |= AR_TxInterReq;
775 #endif
776 }
777
778 HAL_BOOL
ar5212FillTxDesc(struct ath_hal * ah,struct ath_desc * ds,u_int segLen,HAL_BOOL firstSeg,HAL_BOOL lastSeg,const struct ath_desc * ds0)779 ar5212FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
780 u_int segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
781 const struct ath_desc *ds0)
782 {
783 struct ar5212_desc *ads = AR5212DESC(ds);
784
785 HALASSERT((segLen &~ AR_BufLen) == 0);
786
787 if (firstSeg) {
788 /*
789 * First descriptor, don't clobber xmit control data
790 * setup by ar5212SetupTxDesc.
791 */
792 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_More);
793 } else if (lastSeg) { /* !firstSeg && lastSeg */
794 /*
795 * Last descriptor in a multi-descriptor frame,
796 * copy the multi-rate transmit parameters from
797 * the first frame for processing on completion.
798 */
799 ads->ds_ctl0 = 0;
800 ads->ds_ctl1 = segLen;
801 #ifdef AH_NEED_DESC_SWAP
802 ads->ds_ctl2 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl2);
803 ads->ds_ctl3 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl3);
804 #else
805 ads->ds_ctl2 = AR5212DESC_CONST(ds0)->ds_ctl2;
806 ads->ds_ctl3 = AR5212DESC_CONST(ds0)->ds_ctl3;
807 #endif
808 } else { /* !firstSeg && !lastSeg */
809 /*
810 * Intermediate descriptor in a multi-descriptor frame.
811 */
812 ads->ds_ctl0 = 0;
813 ads->ds_ctl1 = segLen | AR_More;
814 ads->ds_ctl2 = 0;
815 ads->ds_ctl3 = 0;
816 }
817 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
818 return AH_TRUE;
819 }
820
821 #ifdef AH_NEED_DESC_SWAP
822 /* Swap transmit descriptor */
823 static __inline void
ar5212SwapTxDesc(struct ath_desc * ds)824 ar5212SwapTxDesc(struct ath_desc *ds)
825 {
826 ds->ds_data = __bswap32(ds->ds_data);
827 ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
828 ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
829 ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
830 ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
831 ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
832 ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
833 }
834 #endif
835
836 /*
837 * Processing of HW TX descriptor.
838 */
839 HAL_STATUS
ar5212ProcTxDesc(struct ath_hal * ah,struct ath_desc * ds,struct ath_tx_status * ts)840 ar5212ProcTxDesc(struct ath_hal *ah,
841 struct ath_desc *ds, struct ath_tx_status *ts)
842 {
843 struct ar5212_desc *ads = AR5212DESC(ds);
844
845 #ifdef AH_NEED_DESC_SWAP
846 if ((ads->ds_txstatus1 & __bswap32(AR_Done)) == 0)
847 return HAL_EINPROGRESS;
848
849 ar5212SwapTxDesc(ds);
850 #else
851 if ((ads->ds_txstatus1 & AR_Done) == 0)
852 return HAL_EINPROGRESS;
853 #endif
854
855 /* Update software copies of the HW status */
856 ts->ts_seqnum = MS(ads->ds_txstatus1, AR_SeqNum);
857 ts->ts_tstamp = MS(ads->ds_txstatus0, AR_SendTimestamp);
858 ts->ts_status = 0;
859 if ((ads->ds_txstatus0 & AR_FrmXmitOK) == 0) {
860 if (ads->ds_txstatus0 & AR_ExcessiveRetries)
861 ts->ts_status |= HAL_TXERR_XRETRY;
862 if (ads->ds_txstatus0 & AR_Filtered)
863 ts->ts_status |= HAL_TXERR_FILT;
864 if (ads->ds_txstatus0 & AR_FIFOUnderrun)
865 ts->ts_status |= HAL_TXERR_FIFO;
866 }
867 /*
868 * Extract the transmit rate used and mark the rate as
869 * ``alternate'' if it wasn't the series 0 rate.
870 */
871 ts->ts_finaltsi = MS(ads->ds_txstatus1, AR_FinalTSIndex);
872 switch (ts->ts_finaltsi) {
873 case 0:
874 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
875 break;
876 case 1:
877 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1) |
878 HAL_TXSTAT_ALTRATE;
879 break;
880 case 2:
881 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2) |
882 HAL_TXSTAT_ALTRATE;
883 break;
884 case 3:
885 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3) |
886 HAL_TXSTAT_ALTRATE;
887 break;
888 }
889 ts->ts_rssi = MS(ads->ds_txstatus1, AR_AckSigStrength);
890 ts->ts_shortretry = MS(ads->ds_txstatus0, AR_RTSFailCnt);
891 ts->ts_longretry = MS(ads->ds_txstatus0, AR_DataFailCnt);
892 /*
893 * The retry count has the number of un-acked tries for the
894 * final series used. When doing multi-rate retry we must
895 * fixup the retry count by adding in the try counts for
896 * each series that was fully-processed. Beware that this
897 * takes values from the try counts in the final descriptor.
898 * These are not required by the hardware. We assume they
899 * are placed there by the driver as otherwise we have no
900 * access and the driver can't do the calculation because it
901 * doesn't know the descriptor format.
902 */
903 switch (ts->ts_finaltsi) {
904 case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
905 /* FALLTHRU */
906 case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
907 /* FALLTHRU */
908 case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
909 }
910 ts->ts_virtcol = MS(ads->ds_txstatus0, AR_VirtCollCnt);
911 ts->ts_antenna = (ads->ds_txstatus1 & AR_XmitAtenna ? 2 : 1);
912
913 return HAL_OK;
914 }
915
916 /*
917 * Determine which tx queues need interrupt servicing.
918 */
919 void
ar5212GetTxIntrQueue(struct ath_hal * ah,uint32_t * txqs)920 ar5212GetTxIntrQueue(struct ath_hal *ah, uint32_t *txqs)
921 {
922 struct ath_hal_5212 *ahp = AH5212(ah);
923 *txqs &= ahp->ah_intrTxqs;
924 ahp->ah_intrTxqs &= ~(*txqs);
925 }
926