1 /*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #if defined(__DragonFly__)
34 #define CTLFLAG_RWTUN CTLFLAG_RW
35 #endif
36
37 /*
38 * Driver for the Atheros Wireless LAN controller.
39 *
40 * This software is derived from work of Atsushi Onoe; his contribution
41 * is greatly appreciated.
42 */
43
44 #include "opt_inet.h"
45 #include "opt_ath.h"
46 /*
47 * This is needed for register operations which are performed
48 * by the driver - eg, calls to ath_hal_gettsf32().
49 *
50 * It's also required for any AH_DEBUG checks in here, eg the
51 * module dependencies.
52 */
53 #include "opt_ah.h"
54 #include "opt_wlan.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/sysctl.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/lock.h>
62 #include <sys/kernel.h>
63 #include <sys/socket.h>
64 #include <sys/sockio.h>
65 #include <sys/errno.h>
66 #include <sys/callout.h>
67 #include <sys/bus.h>
68 #include <sys/endian.h>
69 #include <sys/kthread.h>
70 #include <sys/taskqueue.h>
71 #include <sys/caps.h>
72 #include <sys/module.h>
73 #include <sys/ktr.h>
74
75 #if defined(__DragonFly__)
76 /* empty */
77 #else
78 #include <sys/smp.h> /* for mp_ncpus */
79 #include <machine/bus.h>
80 #endif
81
82 #include <net/if.h>
83 #include <net/if_var.h>
84 #include <net/if_dl.h>
85 #include <net/if_media.h>
86 #include <net/if_types.h>
87 #include <net/if_arp.h>
88 #include <net/ethernet.h>
89 #include <net/if_llc.h>
90 #if defined(__DragonFly__)
91 #include <net/ifq_var.h>
92 #endif
93
94 #include <netproto/802_11/ieee80211_var.h>
95 #include <netproto/802_11/ieee80211_regdomain.h>
96 #ifdef IEEE80211_SUPPORT_SUPERG
97 #include <netproto/802_11/ieee80211_superg.h>
98 #endif
99 #ifdef IEEE80211_SUPPORT_TDMA
100 #include <netproto/802_11/ieee80211_tdma.h>
101 #endif
102
103 #include <net/bpf.h>
104
105 #ifdef INET
106 #include <netinet/in.h>
107 #include <netinet/if_ether.h>
108 #endif
109
110 #include <dev/netif/ath/ath/if_athvar.h>
111 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
112 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
113
114 #include <dev/netif/ath/ath/if_ath_debug.h>
115 #include <dev/netif/ath/ath/if_ath_misc.h>
116 #include <dev/netif/ath/ath/if_ath_tsf.h>
117 #include <dev/netif/ath/ath/if_ath_tx.h>
118 #include <dev/netif/ath/ath/if_ath_sysctl.h>
119 #include <dev/netif/ath/ath/if_ath_led.h>
120 #include <dev/netif/ath/ath/if_ath_keycache.h>
121 #include <dev/netif/ath/ath/if_ath_rx.h>
122 #include <dev/netif/ath/ath/if_ath_rx_edma.h>
123 #include <dev/netif/ath/ath/if_ath_tx_edma.h>
124 #include <dev/netif/ath/ath/if_ath_beacon.h>
125 #include <dev/netif/ath/ath/if_ath_btcoex.h>
126 #include <dev/netif/ath/ath/if_ath_spectral.h>
127 #include <dev/netif/ath/ath/if_ath_lna_div.h>
128 #include <dev/netif/ath/ath/if_athdfs.h>
129 #include <dev/netif/ath/ath/if_ath_ioctl.h>
130 #include <dev/netif/ath/ath/if_ath_descdma.h>
131
132 #ifdef ATH_TX99_DIAG
133 #include <dev/netif/ath/ath/ath_tx99/ath_tx99.h>
134 #endif
135
136 #ifdef ATH_DEBUG_ALQ
137 #include <dev/netif/ath/ath/if_ath_alq.h>
138 #endif
139
140 /*
141 * Only enable this if you're working on PS-POLL support.
142 */
143 #define ATH_SW_PSQ
144
145 /*
146 * ATH_BCBUF determines the number of vap's that can transmit
147 * beacons and also (currently) the number of vap's that can
148 * have unique mac addresses/bssid. When staggering beacons
149 * 4 is probably a good max as otherwise the beacons become
150 * very closely spaced and there is limited time for cab q traffic
151 * to go out. You can burst beacons instead but that is not good
152 * for stations in power save and at some point you really want
153 * another radio (and channel).
154 *
155 * The limit on the number of mac addresses is tied to our use of
156 * the U/L bit and tracking addresses in a byte; it would be
157 * worthwhile to allow more for applications like proxy sta.
158 */
159 CTASSERT(ATH_BCBUF <= 8);
160
161 static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
162 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
163 const uint8_t [IEEE80211_ADDR_LEN],
164 const uint8_t [IEEE80211_ADDR_LEN]);
165 static void ath_vap_delete(struct ieee80211vap *);
166 static int ath_init(struct ath_softc *);
167 static void ath_stop(struct ath_softc *);
168 static int ath_reset_vap(struct ieee80211vap *, u_long);
169 static int ath_transmit(struct ieee80211com *, struct mbuf *);
170 static int ath_media_change(struct ifnet *);
171 static void ath_watchdog(void *);
172 static void ath_parent(struct ieee80211com *);
173 static void ath_fatal_proc(void *, int);
174 static void ath_bmiss_vap(struct ieee80211vap *);
175 static void ath_bmiss_proc(void *, int);
176 static void ath_key_update_begin(struct ieee80211vap *);
177 static void ath_key_update_end(struct ieee80211vap *);
178 static void ath_update_mcast_hw(struct ath_softc *);
179 static void ath_update_mcast(struct ieee80211com *);
180 static void ath_update_promisc(struct ieee80211com *);
181 static void ath_updateslot(struct ieee80211com *);
182 static void ath_bstuck_proc(void *, int);
183 static void ath_reset_proc(void *, int);
184 static int ath_desc_alloc(struct ath_softc *);
185 static void ath_desc_free(struct ath_softc *);
186 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
187 const uint8_t [IEEE80211_ADDR_LEN]);
188 static void ath_node_cleanup(struct ieee80211_node *);
189 static void ath_node_free(struct ieee80211_node *);
190 static void ath_node_getsignal(const struct ieee80211_node *,
191 int8_t *, int8_t *);
192 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
193 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
194 static int ath_tx_setup(struct ath_softc *, int, int);
195 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
196 static void ath_tx_cleanup(struct ath_softc *);
197 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
198 int dosched);
199 static void ath_tx_proc_q0(void *, int);
200 static void ath_tx_proc_q0123(void *, int);
201 static void ath_tx_proc(void *, int);
202 static void ath_txq_sched_tasklet(void *, int);
203 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
204 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
205 static void ath_scan_start(struct ieee80211com *);
206 static void ath_scan_end(struct ieee80211com *);
207 static void ath_set_channel(struct ieee80211com *);
208 #ifdef ATH_ENABLE_11N
209 static void ath_update_chw(struct ieee80211com *);
210 #endif /* ATH_ENABLE_11N */
211 static void ath_calibrate(void *);
212 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
213 static void ath_setup_stationkey(struct ieee80211_node *);
214 static void ath_newassoc(struct ieee80211_node *, int);
215 static int ath_setregdomain(struct ieee80211com *,
216 struct ieee80211_regdomain *, int,
217 struct ieee80211_channel []);
218 static void ath_getradiocaps(struct ieee80211com *, int, int *,
219 struct ieee80211_channel []);
220 static int ath_getchannels(struct ath_softc *);
221
222 static int ath_rate_setup(struct ath_softc *, u_int mode);
223 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
224
225 static void ath_announce(struct ath_softc *);
226
227 static void ath_dfs_tasklet(void *, int);
228 static void ath_node_powersave(struct ieee80211_node *, int);
229 static int ath_node_set_tim(struct ieee80211_node *, int);
230 static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *);
231
232 #ifdef IEEE80211_SUPPORT_TDMA
233 #include <dev/netif/ath/ath/if_ath_tdma.h>
234 #endif
235
236 #if defined(__DragonFly__)
237 extern const char* ath_hal_ether_sprintf(const u_int8_t *mac);
238 #endif
239
240 SYSCTL_DECL(_hw_ath);
241
242 /* XXX validate sysctl values */
243 static int ath_longcalinterval = 30; /* long cals every 30 secs */
244 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
245 0, "long chip calibration interval (secs)");
246 static int ath_shortcalinterval = 100; /* short cals every 100 ms */
247 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
248 0, "short chip calibration interval (msecs)");
249 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
250 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
251 0, "reset chip calibration results (secs)");
252 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
253 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
254 0, "ANI calibration (msecs)");
255
256 int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
257 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf,
258 0, "rx buffers allocated");
259 #if defined(__DragonFly__)
260 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
261 #endif
262
263 int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
264 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf,
265 0, "tx buffers allocated");
266 #if defined(__DragonFly__)
267 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
268 #endif
269
270 int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
271 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt,
272 0, "tx (mgmt) buffers allocated");
273 #if defined(__DragonFly__)
274 TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
275 #endif
276
277 int ath_bstuck_threshold = 4; /* max missed beacons */
278 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
279 0, "max missed beacon xmits before chip reset");
280
281 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
282
283 void
ath_legacy_attach_comp_func(struct ath_softc * sc)284 ath_legacy_attach_comp_func(struct ath_softc *sc)
285 {
286
287 /*
288 * Special case certain configurations. Note the
289 * CAB queue is handled by these specially so don't
290 * include them when checking the txq setup mask.
291 */
292 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
293 case 0x01:
294 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
295 break;
296 case 0x0f:
297 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
298 break;
299 default:
300 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
301 break;
302 }
303 }
304
305 /*
306 * Set the target power mode.
307 *
308 * If this is called during a point in time where
309 * the hardware is being programmed elsewhere, it will
310 * simply store it away and update it when all current
311 * uses of the hardware are completed.
312 */
313 void
_ath_power_setpower(struct ath_softc * sc,int power_state,const char * file,int line)314 _ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line)
315 {
316 ATH_LOCK_ASSERT(sc);
317
318 sc->sc_target_powerstate = power_state;
319
320 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
321 __func__,
322 file,
323 line,
324 power_state,
325 sc->sc_powersave_refcnt);
326
327 if (sc->sc_powersave_refcnt == 0 &&
328 power_state != sc->sc_cur_powerstate) {
329 sc->sc_cur_powerstate = power_state;
330 ath_hal_setpower(sc->sc_ah, power_state);
331
332 /*
333 * If the NIC is force-awake, then set the
334 * self-gen frame state appropriately.
335 *
336 * If the nic is in network sleep or full-sleep,
337 * we let the above call leave the self-gen
338 * state as "sleep".
339 */
340 if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
341 sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
342 ath_hal_setselfgenpower(sc->sc_ah,
343 sc->sc_target_selfgen_state);
344 }
345 }
346 }
347
348 /*
349 * Set the current self-generated frames state.
350 *
351 * This is separate from the target power mode. The chip may be
352 * awake but the desired state is "sleep", so frames sent to the
353 * destination has PWRMGT=1 in the 802.11 header. The NIC also
354 * needs to know to set PWRMGT=1 in self-generated frames.
355 */
356 void
_ath_power_set_selfgen(struct ath_softc * sc,int power_state,const char * file,int line)357 _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line)
358 {
359
360 ATH_LOCK_ASSERT(sc);
361
362 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
363 __func__,
364 file,
365 line,
366 power_state,
367 sc->sc_target_selfgen_state);
368
369 sc->sc_target_selfgen_state = power_state;
370
371 /*
372 * If the NIC is force-awake, then set the power state.
373 * Network-state and full-sleep will already transition it to
374 * mark self-gen frames as sleeping - and we can't
375 * guarantee the NIC is awake to program the self-gen frame
376 * setting anyway.
377 */
378 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) {
379 ath_hal_setselfgenpower(sc->sc_ah, power_state);
380 }
381 }
382
383 /*
384 * Set the hardware power mode and take a reference.
385 *
386 * This doesn't update the target power mode in the driver;
387 * it just updates the hardware power state.
388 *
389 * XXX it should only ever force the hardware awake; it should
390 * never be called to set it asleep.
391 */
392 void
_ath_power_set_power_state(struct ath_softc * sc,int power_state,const char * file,int line)393 _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line)
394 {
395 ATH_LOCK_ASSERT(sc);
396
397 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
398 __func__,
399 file,
400 line,
401 power_state,
402 sc->sc_powersave_refcnt);
403
404 sc->sc_powersave_refcnt++;
405
406 if (power_state != sc->sc_cur_powerstate) {
407 ath_hal_setpower(sc->sc_ah, power_state);
408 sc->sc_cur_powerstate = power_state;
409
410 /*
411 * Adjust the self-gen powerstate if appropriate.
412 */
413 if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
414 sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
415 ath_hal_setselfgenpower(sc->sc_ah,
416 sc->sc_target_selfgen_state);
417 }
418
419 }
420 }
421
422 /*
423 * Restore the power save mode to what it once was.
424 *
425 * This will decrement the reference counter and once it hits
426 * zero, it'll restore the powersave state.
427 */
428 void
_ath_power_restore_power_state(struct ath_softc * sc,const char * file,int line)429 _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line)
430 {
431
432 ATH_LOCK_ASSERT(sc);
433
434 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n",
435 __func__,
436 file,
437 line,
438 sc->sc_powersave_refcnt,
439 sc->sc_target_powerstate);
440
441 if (sc->sc_powersave_refcnt == 0)
442 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__);
443 else
444 sc->sc_powersave_refcnt--;
445
446 if (sc->sc_powersave_refcnt == 0 &&
447 sc->sc_target_powerstate != sc->sc_cur_powerstate) {
448 sc->sc_cur_powerstate = sc->sc_target_powerstate;
449 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate);
450 }
451
452 /*
453 * Adjust the self-gen powerstate if appropriate.
454 */
455 if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
456 sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
457 ath_hal_setselfgenpower(sc->sc_ah,
458 sc->sc_target_selfgen_state);
459 }
460
461 }
462
463 /*
464 * Configure the initial HAL configuration values based on bus
465 * specific parameters.
466 *
467 * Some PCI IDs and other information may need tweaking.
468 *
469 * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable
470 * if BT antenna diversity isn't enabled.
471 *
472 * So, let's also figure out how to enable BT diversity for AR9485.
473 */
474 static void
ath_setup_hal_config(struct ath_softc * sc,HAL_OPS_CONFIG * ah_config)475 ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config)
476 {
477 /* XXX TODO: only for PCI devices? */
478
479 if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) {
480 ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */
481 ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE;
482 ah_config->ath_hal_min_gainidx = AH_TRUE;
483 ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88;
484 /* XXX low_rssi_thresh */
485 /* XXX fast_div_bias */
486 device_printf(sc->sc_dev, "configuring for %s\n",
487 (sc->sc_pci_devinfo & ATH_PCI_CUS198) ?
488 "CUS198" : "CUS230");
489 }
490
491 if (sc->sc_pci_devinfo & ATH_PCI_CUS217)
492 device_printf(sc->sc_dev, "CUS217 card detected\n");
493
494 if (sc->sc_pci_devinfo & ATH_PCI_CUS252)
495 device_printf(sc->sc_dev, "CUS252 card detected\n");
496
497 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT)
498 device_printf(sc->sc_dev, "WB335 1-ANT card detected\n");
499
500 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT)
501 device_printf(sc->sc_dev, "WB335 2-ANT card detected\n");
502
503 if (sc->sc_pci_devinfo & ATH_PCI_KILLER)
504 device_printf(sc->sc_dev, "Killer Wireless card detected\n");
505
506 #if 0
507 /*
508 * Some WB335 cards do not support antenna diversity. Since
509 * we use a hardcoded value for AR9565 instead of using the
510 * EEPROM/OTP data, remove the combining feature from
511 * the HW capabilities bitmap.
512 */
513 if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
514 if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV))
515 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
516 }
517
518 if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) {
519 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
520 device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n");
521 }
522 #endif
523
524 if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) {
525 ah_config->ath_hal_pcie_waen = 0x0040473b;
526 device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n");
527 }
528
529 #if 0
530 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) {
531 ah->config.no_pll_pwrsave = true;
532 device_printf(sc->sc_dev, "Disable PLL PowerSave\n");
533 }
534 #endif
535
536 }
537
538 /*
539 * Attempt to fetch the MAC address from the kernel environment.
540 *
541 * Returns 0, macaddr in macaddr if successful; -1 otherwise.
542 */
543 static int
ath_fetch_mac_kenv(struct ath_softc * sc,uint8_t * macaddr)544 ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr)
545 {
546 char devid_str[32];
547 int local_mac = 0;
548 char *local_macstr;
549
550 /*
551 * Fetch from the kenv rather than using hints.
552 *
553 * Hints would be nice but the transition to dynamic
554 * hints/kenv doesn't happen early enough for this
555 * to work reliably (eg on anything embedded.)
556 */
557 ksnprintf(devid_str, 32, "hint.%s.%d.macaddr",
558 device_get_name(sc->sc_dev),
559 device_get_unit(sc->sc_dev));
560
561 #if defined(__DragonFly__)
562 if ((local_macstr = kgetenv(devid_str)) != NULL) {
563 #else
564 if ((local_macstr = kern_getenv(devid_str)) != NULL) {
565 #endif
566 uint32_t tmpmac[ETHER_ADDR_LEN];
567 int count;
568 int i;
569
570 /* Have a MAC address; should use it */
571 device_printf(sc->sc_dev,
572 "Overriding MAC address from environment: '%s'\n",
573 local_macstr);
574
575 /* Extract out the MAC address */
576 count = ksscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
577 &tmpmac[0], &tmpmac[1],
578 &tmpmac[2], &tmpmac[3],
579 &tmpmac[4], &tmpmac[5]);
580 if (count == 6) {
581 /* Valid! */
582 local_mac = 1;
583 for (i = 0; i < ETHER_ADDR_LEN; i++)
584 macaddr[i] = tmpmac[i];
585 }
586 /* Done! */
587 kfreeenv(local_macstr);
588 local_macstr = NULL;
589 }
590
591 if (local_mac)
592 return (0);
593 return (-1);
594 }
595
596 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
597 #define HAL_MODE_HT40 \
598 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
599 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
600 int
601 ath_attach(u_int16_t devid, struct ath_softc *sc)
602 {
603 struct ieee80211com *ic = &sc->sc_ic;
604 struct ath_hal *ah = NULL;
605 HAL_STATUS status;
606 int error = 0, i;
607 u_int wmodes;
608 int rx_chainmask, tx_chainmask;
609 HAL_OPS_CONFIG ah_config;
610
611 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
612
613 #if defined(__DragonFly__)
614 wlan_serialize_enter();
615 #endif
616 ic->ic_softc = sc;
617 ic->ic_name = device_get_nameunit(sc->sc_dev);
618
619 /*
620 * Configure the initial configuration data.
621 *
622 * This is stuff that may be needed early during attach
623 * rather than done via configuration calls later.
624 */
625 bzero(&ah_config, sizeof(ah_config));
626 ath_setup_hal_config(sc, &ah_config);
627
628 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
629 sc->sc_eepromdata, &ah_config, &status);
630 if (ah == NULL) {
631 device_printf(sc->sc_dev,
632 "unable to attach hardware; HAL status %u\n", status);
633 error = ENXIO;
634 goto bad;
635 }
636 sc->sc_ah = ah;
637 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
638 #ifdef ATH_DEBUG
639 sc->sc_debug = ath_debug;
640 #endif
641
642 /*
643 * Setup the DMA/EDMA functions based on the current
644 * hardware support.
645 *
646 * This is required before the descriptors are allocated.
647 */
648 if (ath_hal_hasedma(sc->sc_ah)) {
649 sc->sc_isedma = 1;
650 ath_recv_setup_edma(sc);
651 ath_xmit_setup_edma(sc);
652 } else {
653 ath_recv_setup_legacy(sc);
654 ath_xmit_setup_legacy(sc);
655 }
656
657 if (ath_hal_hasmybeacon(sc->sc_ah)) {
658 sc->sc_do_mybeacon = 1;
659 }
660
661 /*
662 * Check if the MAC has multi-rate retry support.
663 * We do this by trying to setup a fake extended
664 * descriptor. MAC's that don't have support will
665 * return false w/o doing anything. MAC's that do
666 * support it will return true w/o doing anything.
667 */
668 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
669
670 /*
671 * Check if the device has hardware counters for PHY
672 * errors. If so we need to enable the MIB interrupt
673 * so we can act on stat triggers.
674 */
675 if (ath_hal_hwphycounters(ah))
676 sc->sc_needmib = 1;
677
678 /*
679 * Get the hardware key cache size.
680 */
681 sc->sc_keymax = ath_hal_keycachesize(ah);
682 if (sc->sc_keymax > ATH_KEYMAX) {
683 device_printf(sc->sc_dev,
684 "Warning, using only %u of %u key cache slots\n",
685 ATH_KEYMAX, sc->sc_keymax);
686 sc->sc_keymax = ATH_KEYMAX;
687 }
688 /*
689 * Reset the key cache since some parts do not
690 * reset the contents on initial power up.
691 */
692 for (i = 0; i < sc->sc_keymax; i++)
693 ath_hal_keyreset(ah, i);
694
695 /*
696 * Collect the default channel list.
697 */
698 error = ath_getchannels(sc);
699 if (error != 0)
700 goto bad;
701
702 /*
703 * Setup rate tables for all potential media types.
704 */
705 ath_rate_setup(sc, IEEE80211_MODE_11A);
706 ath_rate_setup(sc, IEEE80211_MODE_11B);
707 ath_rate_setup(sc, IEEE80211_MODE_11G);
708 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
709 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
710 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
711 ath_rate_setup(sc, IEEE80211_MODE_11NA);
712 ath_rate_setup(sc, IEEE80211_MODE_11NG);
713 ath_rate_setup(sc, IEEE80211_MODE_HALF);
714 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
715
716 /* NB: setup here so ath_rate_update is happy */
717 ath_setcurmode(sc, IEEE80211_MODE_11A);
718
719 /*
720 * Allocate TX descriptors and populate the lists.
721 */
722 error = ath_desc_alloc(sc);
723 if (error != 0) {
724 device_printf(sc->sc_dev,
725 "failed to allocate TX descriptors: %d\n", error);
726 goto bad;
727 }
728 error = ath_txdma_setup(sc);
729 if (error != 0) {
730 device_printf(sc->sc_dev,
731 "failed to allocate TX descriptors: %d\n", error);
732 goto bad;
733 }
734
735 /*
736 * Allocate RX descriptors and populate the lists.
737 */
738 error = ath_rxdma_setup(sc);
739 if (error != 0) {
740 device_printf(sc->sc_dev,
741 "failed to allocate RX descriptors: %d\n", error);
742 goto bad;
743 }
744
745 #if defined(__DragonFly__)
746 callout_init_lk(&sc->sc_cal_ch, &sc->sc_mtx);
747 callout_init_lk(&sc->sc_wd_ch, &sc->sc_mtx);
748 #else
749 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx);
750 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx);
751 #endif
752
753 ATH_TXBUF_LOCK_INIT(sc);
754
755 #if defined(__DragonFly__)
756 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT,
757 taskqueue_thread_enqueue, &sc->sc_tq);
758 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1,
759 "%s taskq", device_get_nameunit(sc->sc_dev));
760 #else
761 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
762 taskqueue_thread_enqueue, &sc->sc_tq);
763 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
764 device_get_nameunit(sc->sc_dev));
765 #endif
766
767 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
768 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
769 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
770 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
771 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc);
772 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc);
773
774 /*
775 * Allocate hardware transmit queues: one queue for
776 * beacon frames and one data queue for each QoS
777 * priority. Note that the hal handles resetting
778 * these queues at the needed time.
779 *
780 * XXX PS-Poll
781 */
782 sc->sc_bhalq = ath_beaconq_setup(sc);
783 if (sc->sc_bhalq == (u_int) -1) {
784 device_printf(sc->sc_dev,
785 "unable to setup a beacon xmit queue!\n");
786 error = EIO;
787 goto bad2;
788 }
789 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
790 if (sc->sc_cabq == NULL) {
791 device_printf(sc->sc_dev, "unable to setup CAB xmit queue!\n");
792 error = EIO;
793 goto bad2;
794 }
795 /* NB: insure BK queue is the lowest priority h/w queue */
796 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
797 device_printf(sc->sc_dev,
798 "unable to setup xmit queue for %s traffic!\n",
799 ieee80211_wme_acnames[WME_AC_BK]);
800 error = EIO;
801 goto bad2;
802 }
803 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
804 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
805 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
806 /*
807 * Not enough hardware tx queues to properly do WME;
808 * just punt and assign them all to the same h/w queue.
809 * We could do a better job of this if, for example,
810 * we allocate queues when we switch from station to
811 * AP mode.
812 */
813 if (sc->sc_ac2q[WME_AC_VI] != NULL)
814 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
815 if (sc->sc_ac2q[WME_AC_BE] != NULL)
816 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
817 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
818 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
819 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
820 }
821
822 /*
823 * Attach the TX completion function.
824 *
825 * The non-EDMA chips may have some special case optimisations;
826 * this method gives everyone a chance to attach cleanly.
827 */
828 sc->sc_tx.xmit_attach_comp_func(sc);
829
830 /*
831 * Setup rate control. Some rate control modules
832 * call back to change the anntena state so expose
833 * the necessary entry points.
834 * XXX maybe belongs in struct ath_ratectrl?
835 */
836 sc->sc_setdefantenna = ath_setdefantenna;
837 sc->sc_rc = ath_rate_attach(sc);
838 if (sc->sc_rc == NULL) {
839 error = EIO;
840 goto bad2;
841 }
842
843 /* Attach DFS module */
844 if (! ath_dfs_attach(sc)) {
845 device_printf(sc->sc_dev,
846 "%s: unable to attach DFS\n", __func__);
847 error = EIO;
848 goto bad2;
849 }
850
851 /* Attach spectral module */
852 if (ath_spectral_attach(sc) < 0) {
853 device_printf(sc->sc_dev,
854 "%s: unable to attach spectral\n", __func__);
855 error = EIO;
856 goto bad2;
857 }
858
859 /* Attach bluetooth coexistence module */
860 if (ath_btcoex_attach(sc) < 0) {
861 device_printf(sc->sc_dev,
862 "%s: unable to attach bluetooth coexistence\n", __func__);
863 error = EIO;
864 goto bad2;
865 }
866
867 /* Attach LNA diversity module */
868 if (ath_lna_div_attach(sc) < 0) {
869 device_printf(sc->sc_dev,
870 "%s: unable to attach LNA diversity\n", __func__);
871 error = EIO;
872 goto bad2;
873 }
874
875 /* Start DFS processing tasklet */
876 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
877
878 /* Configure LED state */
879 sc->sc_blinking = 0;
880 sc->sc_ledstate = 1;
881 sc->sc_ledon = 0; /* low true */
882 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
883 #if defined(__DragonFly__)
884 callout_init_mp(&sc->sc_ledtimer);
885 #else
886 callout_init(&sc->sc_ledtimer, 1);
887 #endif
888
889 /*
890 * Don't setup hardware-based blinking.
891 *
892 * Although some NICs may have this configured in the
893 * default reset register values, the user may wish
894 * to alter which pins have which function.
895 *
896 * The reference driver attaches the MAC network LED to GPIO1 and
897 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
898 * NIC has these reversed.
899 */
900 sc->sc_hardled = (1 == 0);
901 sc->sc_led_net_pin = -1;
902 sc->sc_led_pwr_pin = -1;
903 /*
904 * Auto-enable soft led processing for IBM cards and for
905 * 5211 minipci cards. Users can also manually enable/disable
906 * support with a sysctl.
907 */
908 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
909 ath_led_config(sc);
910 ath_hal_setledstate(ah, HAL_LED_INIT);
911
912 /* XXX not right but it's not used anywhere important */
913 ic->ic_phytype = IEEE80211_T_OFDM;
914 ic->ic_opmode = IEEE80211_M_STA;
915 ic->ic_caps =
916 IEEE80211_C_STA /* station mode */
917 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
918 | IEEE80211_C_HOSTAP /* hostap mode */
919 | IEEE80211_C_MONITOR /* monitor mode */
920 | IEEE80211_C_AHDEMO /* adhoc demo mode */
921 | IEEE80211_C_WDS /* 4-address traffic works */
922 | IEEE80211_C_MBSS /* mesh point link mode */
923 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
924 | IEEE80211_C_SHSLOT /* short slot time supported */
925 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
926 #ifndef ATH_ENABLE_11N
927 | IEEE80211_C_BGSCAN /* capable of bg scanning */
928 #endif
929 | IEEE80211_C_TXFRAG /* handle tx frags */
930 #ifdef ATH_ENABLE_DFS
931 | IEEE80211_C_DFS /* Enable radar detection */
932 #endif
933 | IEEE80211_C_PMGT /* Station side power mgmt */
934 | IEEE80211_C_SWSLEEP
935 ;
936 /*
937 * Query the hal to figure out h/w crypto support.
938 */
939 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
940 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
941 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
942 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
943 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
944 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
945 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
946 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
947 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
948 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
949 /*
950 * Check if h/w does the MIC and/or whether the
951 * separate key cache entries are required to
952 * handle both tx+rx MIC keys.
953 */
954 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
955 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
956 /*
957 * If the h/w supports storing tx+rx MIC keys
958 * in one cache slot automatically enable use.
959 */
960 if (ath_hal_hastkipsplit(ah) ||
961 !ath_hal_settkipsplit(ah, AH_FALSE))
962 sc->sc_splitmic = 1;
963 /*
964 * If the h/w can do TKIP MIC together with WME then
965 * we use it; otherwise we force the MIC to be done
966 * in software by the net80211 layer.
967 */
968 if (ath_hal_haswmetkipmic(ah))
969 sc->sc_wmetkipmic = 1;
970 }
971 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
972 /*
973 * Check for multicast key search support.
974 */
975 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
976 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
977 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
978 }
979 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
980 /*
981 * Mark key cache slots associated with global keys
982 * as in use. If we knew TKIP was not to be used we
983 * could leave the +32, +64, and +32+64 slots free.
984 */
985 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
986 setbit(sc->sc_keymap, i);
987 setbit(sc->sc_keymap, i+64);
988 if (sc->sc_splitmic) {
989 setbit(sc->sc_keymap, i+32);
990 setbit(sc->sc_keymap, i+32+64);
991 }
992 }
993 /*
994 * TPC support can be done either with a global cap or
995 * per-packet support. The latter is not available on
996 * all parts. We're a bit pedantic here as all parts
997 * support a global cap.
998 */
999 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
1000 ic->ic_caps |= IEEE80211_C_TXPMGT;
1001
1002 /*
1003 * Mark WME capability only if we have sufficient
1004 * hardware queues to do proper priority scheduling.
1005 */
1006 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
1007 ic->ic_caps |= IEEE80211_C_WME;
1008 /*
1009 * Check for misc other capabilities.
1010 */
1011 if (ath_hal_hasbursting(ah))
1012 ic->ic_caps |= IEEE80211_C_BURST;
1013 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
1014 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
1015 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
1016 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
1017 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
1018 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah);
1019 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah);
1020 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah);
1021
1022 if (ath_hal_hasfastframes(ah))
1023 ic->ic_caps |= IEEE80211_C_FF;
1024 wmodes = ath_hal_getwirelessmodes(ah);
1025 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
1026 ic->ic_caps |= IEEE80211_C_TURBOP;
1027 #ifdef IEEE80211_SUPPORT_TDMA
1028 if (ath_hal_macversion(ah) > 0x78) {
1029 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
1030 ic->ic_tdma_update = ath_tdma_update;
1031 }
1032 #endif
1033
1034 /*
1035 * TODO: enforce that at least this many frames are available
1036 * in the txbuf list before allowing data frames (raw or
1037 * otherwise) to be transmitted.
1038 */
1039 sc->sc_txq_data_minfree = 10;
1040 /*
1041 * Leave this as default to maintain legacy behaviour.
1042 * Shortening the cabq/mcastq may end up causing some
1043 * undesirable behaviour.
1044 */
1045 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
1046
1047 /*
1048 * How deep can the node software TX queue get whilst it's asleep.
1049 */
1050 sc->sc_txq_node_psq_maxdepth = 16;
1051
1052 /*
1053 * Default the maximum queue depth for a given node
1054 * to 1/4'th the TX buffers, or 64, whichever
1055 * is larger.
1056 */
1057 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4);
1058
1059 /* Enable CABQ by default */
1060 sc->sc_cabq_enable = 1;
1061
1062 /*
1063 * Allow the TX and RX chainmasks to be overridden by
1064 * environment variables and/or device.hints.
1065 *
1066 * This must be done early - before the hardware is
1067 * calibrated or before the 802.11n stream calculation
1068 * is done.
1069 */
1070 if (resource_int_value(device_get_name(sc->sc_dev),
1071 device_get_unit(sc->sc_dev), "rx_chainmask",
1072 &rx_chainmask) == 0) {
1073 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
1074 rx_chainmask);
1075 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
1076 }
1077 if (resource_int_value(device_get_name(sc->sc_dev),
1078 device_get_unit(sc->sc_dev), "tx_chainmask",
1079 &tx_chainmask) == 0) {
1080 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
1081 tx_chainmask);
1082 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
1083 }
1084
1085 /*
1086 * Query the TX/RX chainmask configuration.
1087 *
1088 * This is only relevant for 11n devices.
1089 */
1090 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
1091 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
1092
1093 /*
1094 * Disable MRR with protected frames by default.
1095 * Only 802.11n series NICs can handle this.
1096 */
1097 sc->sc_mrrprot = 0; /* XXX should be a capability */
1098
1099 /*
1100 * Query the enterprise mode information the HAL.
1101 */
1102 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0,
1103 &sc->sc_ent_cfg) == HAL_OK)
1104 sc->sc_use_ent = 1;
1105
1106 #ifdef ATH_ENABLE_11N
1107 /*
1108 * Query HT capabilities
1109 */
1110 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
1111 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
1112 uint32_t rxs, txs;
1113 uint32_t ldpc;
1114
1115 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
1116
1117 sc->sc_mrrprot = 1; /* XXX should be a capability */
1118
1119 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
1120 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
1121 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
1122 | IEEE80211_HTCAP_MAXAMSDU_3839
1123 /* max A-MSDU length */
1124 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
1125
1126 /*
1127 * Enable short-GI for HT20 only if the hardware
1128 * advertises support.
1129 * Notably, anything earlier than the AR9287 doesn't.
1130 */
1131 if ((ath_hal_getcapability(ah,
1132 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
1133 (wmodes & HAL_MODE_HT20)) {
1134 device_printf(sc->sc_dev,
1135 "[HT] enabling short-GI in 20MHz mode\n");
1136 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
1137 }
1138
1139 if (wmodes & HAL_MODE_HT40)
1140 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
1141 | IEEE80211_HTCAP_SHORTGI40;
1142
1143 /*
1144 * TX/RX streams need to be taken into account when
1145 * negotiating which MCS rates it'll receive and
1146 * what MCS rates are available for TX.
1147 */
1148 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
1149 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
1150 ic->ic_txstream = txs;
1151 ic->ic_rxstream = rxs;
1152
1153 /*
1154 * Setup TX and RX STBC based on what the HAL allows and
1155 * the currently configured chainmask set.
1156 * Ie - don't enable STBC TX if only one chain is enabled.
1157 * STBC RX is fine on a single RX chain; it just won't
1158 * provide any real benefit.
1159 */
1160 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0,
1161 NULL) == HAL_OK) {
1162 sc->sc_rx_stbc = 1;
1163 device_printf(sc->sc_dev,
1164 "[HT] 1 stream STBC receive enabled\n");
1165 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM;
1166 }
1167 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0,
1168 NULL) == HAL_OK) {
1169 sc->sc_tx_stbc = 1;
1170 device_printf(sc->sc_dev,
1171 "[HT] 1 stream STBC transmit enabled\n");
1172 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC;
1173 }
1174
1175 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
1176 &sc->sc_rts_aggr_limit);
1177 if (sc->sc_rts_aggr_limit != (64 * 1024))
1178 device_printf(sc->sc_dev,
1179 "[HT] RTS aggregates limited to %d KiB\n",
1180 sc->sc_rts_aggr_limit / 1024);
1181
1182 /*
1183 * LDPC
1184 */
1185 if ((ath_hal_getcapability(ah, HAL_CAP_LDPC, 0, &ldpc))
1186 == HAL_OK && (ldpc == 1)) {
1187 sc->sc_has_ldpc = 1;
1188 device_printf(sc->sc_dev,
1189 "[HT] LDPC transmit/receive enabled\n");
1190 ic->ic_htcaps |= IEEE80211_HTCAP_LDPC;
1191 }
1192
1193
1194 device_printf(sc->sc_dev,
1195 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
1196 }
1197 #endif
1198
1199 /*
1200 * Initial aggregation settings.
1201 */
1202 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH;
1203 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH;
1204 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
1205 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
1206 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE;
1207 sc->sc_delim_min_pad = 0;
1208
1209 /*
1210 * Check if the hardware requires PCI register serialisation.
1211 * Some of the Owl based MACs require this.
1212 */
1213 #if defined(__DragonFly__)
1214 if (ncpus > 1 &&
1215 #else
1216 if (mp_ncpus > 1 &&
1217 #endif
1218 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
1219 0, NULL) == HAL_OK) {
1220 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
1221 device_printf(sc->sc_dev,
1222 "Enabling register serialisation\n");
1223 }
1224
1225 /*
1226 * Initialise the deferred completed RX buffer list.
1227 */
1228 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
1229 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
1230
1231 /*
1232 * Indicate we need the 802.11 header padded to a
1233 * 32-bit boundary for 4-address and QoS frames.
1234 */
1235 ic->ic_flags |= IEEE80211_F_DATAPAD;
1236
1237 /*
1238 * Query the hal about antenna support.
1239 */
1240 sc->sc_defant = ath_hal_getdefantenna(ah);
1241
1242 /*
1243 * Not all chips have the VEOL support we want to
1244 * use with IBSS beacons; check here for it.
1245 */
1246 sc->sc_hasveol = ath_hal_hasveol(ah);
1247
1248 /* get mac address from kenv first, then hardware */
1249 if (ath_fetch_mac_kenv(sc, ic->ic_macaddr) == 0) {
1250 /* Tell the HAL now about the new MAC */
1251 ath_hal_setmac(ah, ic->ic_macaddr);
1252 } else {
1253 ath_hal_getmac(ah, ic->ic_macaddr);
1254 }
1255
1256 if (sc->sc_hasbmask)
1257 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
1258
1259 /* NB: used to size node table key mapping array */
1260 ic->ic_max_keyix = sc->sc_keymax;
1261 /* call MI attach routine. */
1262 ieee80211_ifattach(ic);
1263 ic->ic_setregdomain = ath_setregdomain;
1264 ic->ic_getradiocaps = ath_getradiocaps;
1265 sc->sc_opmode = HAL_M_STA;
1266
1267 /* override default methods */
1268 ic->ic_ioctl = ath_ioctl;
1269 ic->ic_parent = ath_parent;
1270 ic->ic_transmit = ath_transmit;
1271 ic->ic_newassoc = ath_newassoc;
1272 ic->ic_updateslot = ath_updateslot;
1273 ic->ic_wme.wme_update = ath_wme_update;
1274 ic->ic_vap_create = ath_vap_create;
1275 ic->ic_vap_delete = ath_vap_delete;
1276 ic->ic_raw_xmit = ath_raw_xmit;
1277 ic->ic_update_mcast = ath_update_mcast;
1278 ic->ic_update_promisc = ath_update_promisc;
1279 ic->ic_node_alloc = ath_node_alloc;
1280 sc->sc_node_free = ic->ic_node_free;
1281 ic->ic_node_free = ath_node_free;
1282 sc->sc_node_cleanup = ic->ic_node_cleanup;
1283 ic->ic_node_cleanup = ath_node_cleanup;
1284 ic->ic_node_getsignal = ath_node_getsignal;
1285 ic->ic_scan_start = ath_scan_start;
1286 ic->ic_scan_end = ath_scan_end;
1287 ic->ic_set_channel = ath_set_channel;
1288 #ifdef ATH_ENABLE_11N
1289 /* 802.11n specific - but just override anyway */
1290 sc->sc_addba_request = ic->ic_addba_request;
1291 sc->sc_addba_response = ic->ic_addba_response;
1292 sc->sc_addba_stop = ic->ic_addba_stop;
1293 sc->sc_bar_response = ic->ic_bar_response;
1294 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
1295
1296 ic->ic_addba_request = ath_addba_request;
1297 ic->ic_addba_response = ath_addba_response;
1298 ic->ic_addba_response_timeout = ath_addba_response_timeout;
1299 ic->ic_addba_stop = ath_addba_stop;
1300 ic->ic_bar_response = ath_bar_response;
1301
1302 ic->ic_update_chw = ath_update_chw;
1303 #endif /* ATH_ENABLE_11N */
1304
1305 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
1306 /*
1307 * There's one vendor bitmap entry in the RX radiotap
1308 * header; make sure that's taken into account.
1309 */
1310 ieee80211_radiotap_attachv(ic,
1311 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
1312 ATH_TX_RADIOTAP_PRESENT,
1313 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
1314 ATH_RX_RADIOTAP_PRESENT);
1315 #else
1316 /*
1317 * No vendor bitmap/extensions are present.
1318 */
1319 ieee80211_radiotap_attach(ic,
1320 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
1321 ATH_TX_RADIOTAP_PRESENT,
1322 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
1323 ATH_RX_RADIOTAP_PRESENT);
1324 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
1325
1326 /*
1327 * Setup the ALQ logging if required
1328 */
1329 #ifdef ATH_DEBUG_ALQ
1330 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev));
1331 if_ath_alq_setcfg(&sc->sc_alq,
1332 sc->sc_ah->ah_macVersion,
1333 sc->sc_ah->ah_macRev,
1334 sc->sc_ah->ah_phyRev,
1335 sc->sc_ah->ah_magic);
1336 #endif
1337
1338 /*
1339 * Setup dynamic sysctl's now that country code and
1340 * regdomain are available from the hal.
1341 */
1342 ath_sysctlattach(sc);
1343 ath_sysctl_stats_attach(sc);
1344 ath_sysctl_hal_attach(sc);
1345
1346 if (bootverbose)
1347 ieee80211_announce(ic);
1348 ath_announce(sc);
1349
1350 /*
1351 * Put it to sleep for now.
1352 */
1353 ATH_LOCK(sc);
1354 ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
1355 ATH_UNLOCK(sc);
1356
1357 #if defined(__DragonFly__)
1358 wlan_serialize_exit();
1359 #endif
1360
1361 return 0;
1362 bad2:
1363 ath_tx_cleanup(sc);
1364 ath_desc_free(sc);
1365 ath_txdma_teardown(sc);
1366 ath_rxdma_teardown(sc);
1367 bad:
1368 if (ah)
1369 ath_hal_detach(ah);
1370
1371 #if defined(__DragonFly__)
1372 /*
1373 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE..
1374 */
1375 sc->sc_invalid = 1;
1376 wlan_serialize_exit();
1377 #else
1378 sc->sc_invalid = 1;
1379 #endif
1380
1381 return error;
1382 }
1383
1384 int
1385 ath_detach(struct ath_softc *sc)
1386 {
1387
1388 /*
1389 * NB: the order of these is important:
1390 * o stop the chip so no more interrupts will fire
1391 * o call the 802.11 layer before detaching the hal to
1392 * insure callbacks into the driver to delete global
1393 * key cache entries can be handled
1394 * o free the taskqueue which drains any pending tasks
1395 * o reclaim the tx queue data structures after calling
1396 * the 802.11 layer as we'll get called back to reclaim
1397 * node state and potentially want to use them
1398 * o to cleanup the tx queues the hal is called, so detach
1399 * it last
1400 * Other than that, it's straightforward...
1401 */
1402
1403 /*
1404 * XXX Wake the hardware up first. ath_stop() will still
1405 * wake it up first, but I'd rather do it here just to
1406 * ensure it's awake.
1407 */
1408 ATH_LOCK(sc);
1409 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1410 ath_power_setpower(sc, HAL_PM_AWAKE);
1411
1412 /*
1413 * Stop things cleanly.
1414 */
1415 ath_stop(sc);
1416 ATH_UNLOCK(sc);
1417
1418 ieee80211_ifdetach(&sc->sc_ic);
1419 taskqueue_free(sc->sc_tq);
1420 #ifdef ATH_TX99_DIAG
1421 if (sc->sc_tx99 != NULL)
1422 sc->sc_tx99->detach(sc->sc_tx99);
1423 #endif
1424 ath_rate_detach(sc->sc_rc);
1425 #ifdef ATH_DEBUG_ALQ
1426 if_ath_alq_tidyup(&sc->sc_alq);
1427 #endif
1428 ath_lna_div_detach(sc);
1429 ath_btcoex_detach(sc);
1430 ath_spectral_detach(sc);
1431 ath_dfs_detach(sc);
1432 ath_desc_free(sc);
1433 ath_txdma_teardown(sc);
1434 ath_rxdma_teardown(sc);
1435 ath_tx_cleanup(sc);
1436 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
1437
1438 return 0;
1439 }
1440
1441 /*
1442 * MAC address handling for multiple BSS on the same radio.
1443 * The first vap uses the MAC address from the EEPROM. For
1444 * subsequent vap's we set the U/L bit (bit 1) in the MAC
1445 * address and use the next six bits as an index.
1446 */
1447 static void
1448 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
1449 {
1450 int i;
1451
1452 if (clone && sc->sc_hasbmask) {
1453 /* NB: we only do this if h/w supports multiple bssid */
1454 for (i = 0; i < 8; i++)
1455 if ((sc->sc_bssidmask & (1<<i)) == 0)
1456 break;
1457 if (i != 0)
1458 mac[0] |= (i << 2)|0x2;
1459 } else
1460 i = 0;
1461 sc->sc_bssidmask |= 1<<i;
1462 sc->sc_hwbssidmask[0] &= ~mac[0];
1463 if (i == 0)
1464 sc->sc_nbssid0++;
1465 }
1466
1467 static void
1468 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
1469 {
1470 int i = mac[0] >> 2;
1471 uint8_t mask;
1472
1473 if (i != 0 || --sc->sc_nbssid0 == 0) {
1474 sc->sc_bssidmask &= ~(1<<i);
1475 /* recalculate bssid mask from remaining addresses */
1476 mask = 0xff;
1477 for (i = 1; i < 8; i++)
1478 if (sc->sc_bssidmask & (1<<i))
1479 mask &= ~((i<<2)|0x2);
1480 sc->sc_hwbssidmask[0] |= mask;
1481 }
1482 }
1483
1484 /*
1485 * Assign a beacon xmit slot. We try to space out
1486 * assignments so when beacons are staggered the
1487 * traffic coming out of the cab q has maximal time
1488 * to go out before the next beacon is scheduled.
1489 */
1490 static int
1491 assign_bslot(struct ath_softc *sc)
1492 {
1493 u_int slot, free;
1494
1495 free = 0;
1496 for (slot = 0; slot < ATH_BCBUF; slot++)
1497 if (sc->sc_bslot[slot] == NULL) {
1498 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
1499 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
1500 return slot;
1501 free = slot;
1502 /* NB: keep looking for a double slot */
1503 }
1504 return free;
1505 }
1506
1507 static struct ieee80211vap *
1508 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1509 enum ieee80211_opmode opmode, int flags,
1510 const uint8_t bssid[IEEE80211_ADDR_LEN],
1511 const uint8_t mac0[IEEE80211_ADDR_LEN])
1512 {
1513 struct ath_softc *sc = ic->ic_softc;
1514 struct ath_vap *avp;
1515 struct ieee80211vap *vap;
1516 uint8_t mac[IEEE80211_ADDR_LEN];
1517 int needbeacon, error;
1518 enum ieee80211_opmode ic_opmode;
1519
1520 avp = kmalloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO);
1521 needbeacon = 0;
1522 IEEE80211_ADDR_COPY(mac, mac0);
1523
1524 ATH_LOCK(sc);
1525 ic_opmode = opmode; /* default to opmode of new vap */
1526 switch (opmode) {
1527 case IEEE80211_M_STA:
1528 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
1529 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
1530 goto bad;
1531 }
1532 if (sc->sc_nvaps) {
1533 /*
1534 * With multiple vaps we must fall back
1535 * to s/w beacon miss handling.
1536 */
1537 flags |= IEEE80211_CLONE_NOBEACONS;
1538 }
1539 if (flags & IEEE80211_CLONE_NOBEACONS) {
1540 /*
1541 * Station mode w/o beacons are implemented w/ AP mode.
1542 */
1543 ic_opmode = IEEE80211_M_HOSTAP;
1544 }
1545 break;
1546 case IEEE80211_M_IBSS:
1547 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1548 device_printf(sc->sc_dev,
1549 "only 1 ibss vap supported\n");
1550 goto bad;
1551 }
1552 needbeacon = 1;
1553 break;
1554 case IEEE80211_M_AHDEMO:
1555 #ifdef IEEE80211_SUPPORT_TDMA
1556 if (flags & IEEE80211_CLONE_TDMA) {
1557 if (sc->sc_nvaps != 0) {
1558 device_printf(sc->sc_dev,
1559 "only 1 tdma vap supported\n");
1560 goto bad;
1561 }
1562 needbeacon = 1;
1563 flags |= IEEE80211_CLONE_NOBEACONS;
1564 }
1565 /* fall thru... */
1566 #endif
1567 case IEEE80211_M_MONITOR:
1568 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1569 /*
1570 * Adopt existing mode. Adding a monitor or ahdemo
1571 * vap to an existing configuration is of dubious
1572 * value but should be ok.
1573 */
1574 /* XXX not right for monitor mode */
1575 ic_opmode = ic->ic_opmode;
1576 }
1577 break;
1578 case IEEE80211_M_HOSTAP:
1579 case IEEE80211_M_MBSS:
1580 needbeacon = 1;
1581 break;
1582 case IEEE80211_M_WDS:
1583 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1584 device_printf(sc->sc_dev,
1585 "wds not supported in sta mode\n");
1586 goto bad;
1587 }
1588 /*
1589 * Silently remove any request for a unique
1590 * bssid; WDS vap's always share the local
1591 * mac address.
1592 */
1593 flags &= ~IEEE80211_CLONE_BSSID;
1594 if (sc->sc_nvaps == 0)
1595 ic_opmode = IEEE80211_M_HOSTAP;
1596 else
1597 ic_opmode = ic->ic_opmode;
1598 break;
1599 default:
1600 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1601 goto bad;
1602 }
1603 /*
1604 * Check that a beacon buffer is available; the code below assumes it.
1605 */
1606 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
1607 device_printf(sc->sc_dev, "no beacon buffer available\n");
1608 goto bad;
1609 }
1610
1611 /* STA, AHDEMO? */
1612 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1613 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1614 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1615 }
1616
1617 vap = &avp->av_vap;
1618 /* XXX can't hold mutex across if_alloc */
1619 ATH_UNLOCK(sc);
1620 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
1621 ATH_LOCK(sc);
1622 if (error != 0) {
1623 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1624 __func__, error);
1625 goto bad2;
1626 }
1627
1628 /* h/w crypto support */
1629 vap->iv_key_alloc = ath_key_alloc;
1630 vap->iv_key_delete = ath_key_delete;
1631 vap->iv_key_set = ath_key_set;
1632 vap->iv_key_update_begin = ath_key_update_begin;
1633 vap->iv_key_update_end = ath_key_update_end;
1634
1635 /* override various methods */
1636 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1637 vap->iv_recv_mgmt = ath_recv_mgmt;
1638 vap->iv_reset = ath_reset_vap;
1639 vap->iv_update_beacon = ath_beacon_update;
1640 avp->av_newstate = vap->iv_newstate;
1641 vap->iv_newstate = ath_newstate;
1642 avp->av_bmiss = vap->iv_bmiss;
1643 vap->iv_bmiss = ath_bmiss_vap;
1644
1645 avp->av_node_ps = vap->iv_node_ps;
1646 vap->iv_node_ps = ath_node_powersave;
1647
1648 avp->av_set_tim = vap->iv_set_tim;
1649 vap->iv_set_tim = ath_node_set_tim;
1650
1651 avp->av_recv_pspoll = vap->iv_recv_pspoll;
1652 vap->iv_recv_pspoll = ath_node_recv_pspoll;
1653
1654 /* Set default parameters */
1655
1656 /*
1657 * Anything earlier than some AR9300 series MACs don't
1658 * support a smaller MPDU density.
1659 */
1660 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1661 /*
1662 * All NICs can handle the maximum size, however
1663 * AR5416 based MACs can only TX aggregates w/ RTS
1664 * protection when the total aggregate size is <= 8k.
1665 * However, for now that's enforced by the TX path.
1666 */
1667 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1668
1669 avp->av_bslot = -1;
1670 if (needbeacon) {
1671 /*
1672 * Allocate beacon state and setup the q for buffered
1673 * multicast frames. We know a beacon buffer is
1674 * available because we checked above.
1675 */
1676 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1677 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1678 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1679 /*
1680 * Assign the vap to a beacon xmit slot. As above
1681 * this cannot fail to find a free one.
1682 */
1683 avp->av_bslot = assign_bslot(sc);
1684 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1685 ("beacon slot %u not empty", avp->av_bslot));
1686 sc->sc_bslot[avp->av_bslot] = vap;
1687 sc->sc_nbcnvaps++;
1688 }
1689 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1690 /*
1691 * Multple vaps are to transmit beacons and we
1692 * have h/w support for TSF adjusting; enable
1693 * use of staggered beacons.
1694 */
1695 sc->sc_stagbeacons = 1;
1696 }
1697 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1698 }
1699
1700 ic->ic_opmode = ic_opmode;
1701 if (opmode != IEEE80211_M_WDS) {
1702 sc->sc_nvaps++;
1703 if (opmode == IEEE80211_M_STA)
1704 sc->sc_nstavaps++;
1705 if (opmode == IEEE80211_M_MBSS)
1706 sc->sc_nmeshvaps++;
1707 }
1708 switch (ic_opmode) {
1709 case IEEE80211_M_IBSS:
1710 sc->sc_opmode = HAL_M_IBSS;
1711 break;
1712 case IEEE80211_M_STA:
1713 sc->sc_opmode = HAL_M_STA;
1714 break;
1715 case IEEE80211_M_AHDEMO:
1716 #ifdef IEEE80211_SUPPORT_TDMA
1717 if (vap->iv_caps & IEEE80211_C_TDMA) {
1718 sc->sc_tdma = 1;
1719 /* NB: disable tsf adjust */
1720 sc->sc_stagbeacons = 0;
1721 }
1722 /*
1723 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1724 * just ap mode.
1725 */
1726 /* fall thru... */
1727 #endif
1728 case IEEE80211_M_HOSTAP:
1729 case IEEE80211_M_MBSS:
1730 sc->sc_opmode = HAL_M_HOSTAP;
1731 break;
1732 case IEEE80211_M_MONITOR:
1733 sc->sc_opmode = HAL_M_MONITOR;
1734 break;
1735 default:
1736 /* XXX should not happen */
1737 break;
1738 }
1739 if (sc->sc_hastsfadd) {
1740 /*
1741 * Configure whether or not TSF adjust should be done.
1742 */
1743 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1744 }
1745 if (flags & IEEE80211_CLONE_NOBEACONS) {
1746 /*
1747 * Enable s/w beacon miss handling.
1748 */
1749 sc->sc_swbmiss = 1;
1750 }
1751 ATH_UNLOCK(sc);
1752
1753 /* complete setup */
1754 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status,
1755 mac);
1756 return vap;
1757 bad2:
1758 reclaim_address(sc, mac);
1759 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1760 bad:
1761 kfree(avp, M_80211_VAP);
1762 ATH_UNLOCK(sc);
1763 return NULL;
1764 }
1765
1766 static void
1767 ath_vap_delete(struct ieee80211vap *vap)
1768 {
1769 struct ieee80211com *ic = vap->iv_ic;
1770 struct ath_softc *sc = ic->ic_softc;
1771 struct ath_hal *ah = sc->sc_ah;
1772 struct ath_vap *avp = ATH_VAP(vap);
1773
1774 ATH_LOCK(sc);
1775 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1776 ATH_UNLOCK(sc);
1777
1778 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1779 if (sc->sc_running) {
1780 /*
1781 * Quiesce the hardware while we remove the vap. In
1782 * particular we need to reclaim all references to
1783 * the vap state by any frames pending on the tx queues.
1784 */
1785 ath_hal_intrset(ah, 0); /* disable interrupts */
1786 /* XXX Do all frames from all vaps/nodes need draining here? */
1787 ath_stoprecv(sc, 1); /* stop recv side */
1788 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1789 }
1790
1791 /* .. leave the hardware awake for now. */
1792
1793 ieee80211_vap_detach(vap);
1794
1795 /*
1796 * XXX Danger Will Robinson! Danger!
1797 *
1798 * Because ieee80211_vap_detach() can queue a frame (the station
1799 * diassociate message?) after we've drained the TXQ and
1800 * flushed the software TXQ, we will end up with a frame queued
1801 * to a node whose vap is about to be freed.
1802 *
1803 * To work around this, flush the hardware/software again.
1804 * This may be racy - the ath task may be running and the packet
1805 * may be being scheduled between sw->hw txq. Tsk.
1806 *
1807 * TODO: figure out why a new node gets allocated somewhere around
1808 * here (after the ath_tx_swq() call; and after an ath_stop()
1809 * call!)
1810 */
1811
1812 ath_draintxq(sc, ATH_RESET_DEFAULT);
1813
1814 ATH_LOCK(sc);
1815 /*
1816 * Reclaim beacon state. Note this must be done before
1817 * the vap instance is reclaimed as we may have a reference
1818 * to it in the buffer for the beacon frame.
1819 */
1820 if (avp->av_bcbuf != NULL) {
1821 if (avp->av_bslot != -1) {
1822 sc->sc_bslot[avp->av_bslot] = NULL;
1823 sc->sc_nbcnvaps--;
1824 }
1825 ath_beacon_return(sc, avp->av_bcbuf);
1826 avp->av_bcbuf = NULL;
1827 if (sc->sc_nbcnvaps == 0) {
1828 sc->sc_stagbeacons = 0;
1829 if (sc->sc_hastsfadd)
1830 ath_hal_settsfadjust(sc->sc_ah, 0);
1831 }
1832 /*
1833 * Reclaim any pending mcast frames for the vap.
1834 */
1835 ath_tx_draintxq(sc, &avp->av_mcastq);
1836 }
1837 /*
1838 * Update bookkeeping.
1839 */
1840 if (vap->iv_opmode == IEEE80211_M_STA) {
1841 sc->sc_nstavaps--;
1842 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1843 sc->sc_swbmiss = 0;
1844 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1845 vap->iv_opmode == IEEE80211_M_MBSS) {
1846 reclaim_address(sc, vap->iv_myaddr);
1847 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1848 if (vap->iv_opmode == IEEE80211_M_MBSS)
1849 sc->sc_nmeshvaps--;
1850 }
1851 if (vap->iv_opmode != IEEE80211_M_WDS)
1852 sc->sc_nvaps--;
1853 #ifdef IEEE80211_SUPPORT_TDMA
1854 /* TDMA operation ceases when the last vap is destroyed */
1855 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1856 sc->sc_tdma = 0;
1857 sc->sc_swbmiss = 0;
1858 }
1859 #endif
1860 kfree(avp, M_80211_VAP);
1861
1862 if (sc->sc_running) {
1863 /*
1864 * Restart rx+tx machines if still running (RUNNING will
1865 * be reset if we just destroyed the last vap).
1866 */
1867 if (ath_startrecv(sc) != 0)
1868 device_printf(sc->sc_dev,
1869 "%s: unable to restart recv logic\n", __func__);
1870 if (sc->sc_beacons) { /* restart beacons */
1871 #ifdef IEEE80211_SUPPORT_TDMA
1872 if (sc->sc_tdma)
1873 ath_tdma_config(sc, NULL);
1874 else
1875 #endif
1876 ath_beacon_config(sc, NULL);
1877 }
1878 ath_hal_intrset(ah, sc->sc_imask);
1879 }
1880
1881 /* Ok, let the hardware asleep. */
1882 ath_power_restore_power_state(sc);
1883 ATH_UNLOCK(sc);
1884 }
1885
1886 void
1887 ath_suspend(struct ath_softc *sc)
1888 {
1889 struct ieee80211com *ic = &sc->sc_ic;
1890
1891 sc->sc_resume_up = ic->ic_nrunning != 0;
1892
1893 ieee80211_suspend_all(ic);
1894 /*
1895 * NB: don't worry about putting the chip in low power
1896 * mode; pci will power off our socket on suspend and
1897 * CardBus detaches the device.
1898 *
1899 * XXX TODO: well, that's great, except for non-cardbus
1900 * devices!
1901 */
1902
1903 /*
1904 * XXX This doesn't wait until all pending taskqueue
1905 * items and parallel transmit/receive/other threads
1906 * are running!
1907 */
1908 ath_hal_intrset(sc->sc_ah, 0);
1909 taskqueue_block(sc->sc_tq);
1910
1911 ATH_LOCK(sc);
1912 #if defined(__DragonFly__)
1913 callout_cancel(&sc->sc_cal_ch);
1914 #else
1915 callout_stop(&sc->sc_cal_ch);
1916 #endif
1917 ATH_UNLOCK(sc);
1918
1919 /*
1920 * XXX ensure sc_invalid is 1
1921 */
1922
1923 /* Disable the PCIe PHY, complete with workarounds */
1924 ath_hal_enablepcie(sc->sc_ah, 1, 1);
1925 }
1926
1927 /*
1928 * Reset the key cache since some parts do not reset the
1929 * contents on resume. First we clear all entries, then
1930 * re-load keys that the 802.11 layer assumes are setup
1931 * in h/w.
1932 */
1933 static void
1934 ath_reset_keycache(struct ath_softc *sc)
1935 {
1936 struct ieee80211com *ic = &sc->sc_ic;
1937 struct ath_hal *ah = sc->sc_ah;
1938 int i;
1939
1940 ATH_LOCK(sc);
1941 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1942 for (i = 0; i < sc->sc_keymax; i++)
1943 ath_hal_keyreset(ah, i);
1944 ath_power_restore_power_state(sc);
1945 ATH_UNLOCK(sc);
1946 ieee80211_crypto_reload_keys(ic);
1947 }
1948
1949 /*
1950 * Fetch the current chainmask configuration based on the current
1951 * operating channel and options.
1952 */
1953 static void
1954 ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan)
1955 {
1956
1957 /*
1958 * Set TX chainmask to the currently configured chainmask;
1959 * the TX chainmask depends upon the current operating mode.
1960 */
1961 sc->sc_cur_rxchainmask = sc->sc_rxchainmask;
1962 if (IEEE80211_IS_CHAN_HT(chan)) {
1963 sc->sc_cur_txchainmask = sc->sc_txchainmask;
1964 } else {
1965 sc->sc_cur_txchainmask = 1;
1966 }
1967
1968 DPRINTF(sc, ATH_DEBUG_RESET,
1969 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n",
1970 __func__,
1971 sc->sc_cur_txchainmask,
1972 sc->sc_cur_rxchainmask);
1973 }
1974
1975 void
1976 ath_resume(struct ath_softc *sc)
1977 {
1978 struct ieee80211com *ic = &sc->sc_ic;
1979 struct ath_hal *ah = sc->sc_ah;
1980 HAL_STATUS status;
1981
1982 ath_hal_enablepcie(ah, 0, 0);
1983
1984 /*
1985 * Must reset the chip before we reload the
1986 * keycache as we were powered down on suspend.
1987 */
1988 ath_update_chainmasks(sc,
1989 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan);
1990 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
1991 sc->sc_cur_rxchainmask);
1992
1993 /* Ensure we set the current power state to on */
1994 ATH_LOCK(sc);
1995 ath_power_setselfgen(sc, HAL_PM_AWAKE);
1996 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1997 ath_power_setpower(sc, HAL_PM_AWAKE);
1998 ATH_UNLOCK(sc);
1999
2000 ath_hal_reset(ah, sc->sc_opmode,
2001 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
2002 AH_FALSE, HAL_RESET_NORMAL, &status);
2003 ath_reset_keycache(sc);
2004
2005 ATH_RX_LOCK(sc);
2006 sc->sc_rx_stopped = 1;
2007 sc->sc_rx_resetted = 1;
2008 ATH_RX_UNLOCK(sc);
2009
2010 /* Let DFS at it in case it's a DFS channel */
2011 ath_dfs_radar_enable(sc, ic->ic_curchan);
2012
2013 /* Let spectral at in case spectral is enabled */
2014 ath_spectral_enable(sc, ic->ic_curchan);
2015
2016 /*
2017 * Let bluetooth coexistence at in case it's needed for this channel
2018 */
2019 ath_btcoex_enable(sc, ic->ic_curchan);
2020
2021 /*
2022 * If we're doing TDMA, enforce the TXOP limitation for chips that
2023 * support it.
2024 */
2025 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2026 ath_hal_setenforcetxop(sc->sc_ah, 1);
2027 else
2028 ath_hal_setenforcetxop(sc->sc_ah, 0);
2029
2030 /* Restore the LED configuration */
2031 ath_led_config(sc);
2032 ath_hal_setledstate(ah, HAL_LED_INIT);
2033
2034 if (sc->sc_resume_up)
2035 ieee80211_resume_all(ic);
2036
2037 ATH_LOCK(sc);
2038 ath_power_restore_power_state(sc);
2039 ATH_UNLOCK(sc);
2040
2041 /* XXX beacons ? */
2042 }
2043
2044 void
2045 ath_shutdown(struct ath_softc *sc)
2046 {
2047
2048 ATH_LOCK(sc);
2049 ath_stop(sc);
2050 ATH_UNLOCK(sc);
2051 /* NB: no point powering down chip as we're about to reboot */
2052 }
2053
2054 /*
2055 * Interrupt handler. Most of the actual processing is deferred.
2056 */
2057 void
2058 ath_intr(void *arg)
2059 {
2060 struct ath_softc *sc = arg;
2061 struct ath_hal *ah = sc->sc_ah;
2062 HAL_INT status = 0;
2063 uint32_t txqs;
2064
2065 /*
2066 * If we're inside a reset path, just print a warning and
2067 * clear the ISR. The reset routine will finish it for us.
2068 */
2069 ATH_PCU_LOCK(sc);
2070 if (sc->sc_inreset_cnt) {
2071 HAL_INT status;
2072 ath_hal_getisr(ah, &status); /* clear ISR */
2073 ath_hal_intrset(ah, 0); /* disable further intr's */
2074 DPRINTF(sc, ATH_DEBUG_ANY,
2075 "%s: in reset, ignoring: status=0x%x\n",
2076 __func__, status);
2077 ATH_PCU_UNLOCK(sc);
2078 return;
2079 }
2080
2081 if (sc->sc_invalid) {
2082 /*
2083 * The hardware is not ready/present, don't touch anything.
2084 * Note this can happen early on if the IRQ is shared.
2085 */
2086 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
2087 ATH_PCU_UNLOCK(sc);
2088 return;
2089 }
2090 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
2091 ATH_PCU_UNLOCK(sc);
2092 return;
2093 }
2094
2095 ATH_LOCK(sc);
2096 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2097 ATH_UNLOCK(sc);
2098
2099 if (sc->sc_ic.ic_nrunning == 0 && sc->sc_running == 0) {
2100 HAL_INT status;
2101
2102 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_nrunning %d sc_running %d\n",
2103 __func__, sc->sc_ic.ic_nrunning, sc->sc_running);
2104 ath_hal_getisr(ah, &status); /* clear ISR */
2105 ath_hal_intrset(ah, 0); /* disable further intr's */
2106 ATH_PCU_UNLOCK(sc);
2107
2108 ATH_LOCK(sc);
2109 ath_power_restore_power_state(sc);
2110 ATH_UNLOCK(sc);
2111 return;
2112 }
2113
2114 /*
2115 * Figure out the reason(s) for the interrupt. Note
2116 * that the hal returns a pseudo-ISR that may include
2117 * bits we haven't explicitly enabled so we mask the
2118 * value to insure we only process bits we requested.
2119 */
2120 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
2121 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
2122 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
2123 #ifdef ATH_DEBUG_ALQ
2124 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate,
2125 ah->ah_syncstate);
2126 #endif /* ATH_DEBUG_ALQ */
2127 #ifdef ATH_KTR_INTR_DEBUG
2128 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
2129 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
2130 ah->ah_intrstate[0],
2131 ah->ah_intrstate[1],
2132 ah->ah_intrstate[2],
2133 ah->ah_intrstate[3],
2134 ah->ah_intrstate[6]);
2135 #endif
2136
2137 /* Squirrel away SYNC interrupt debugging */
2138 if (ah->ah_syncstate != 0) {
2139 int i;
2140 for (i = 0; i < 32; i++)
2141 if (ah->ah_syncstate & (i << i))
2142 sc->sc_intr_stats.sync_intr[i]++;
2143 }
2144
2145 status &= sc->sc_imask; /* discard unasked for bits */
2146
2147 /* Short-circuit un-handled interrupts */
2148 if (status == 0x0) {
2149 ATH_PCU_UNLOCK(sc);
2150
2151 ATH_LOCK(sc);
2152 ath_power_restore_power_state(sc);
2153 ATH_UNLOCK(sc);
2154
2155 return;
2156 }
2157
2158 /*
2159 * Take a note that we're inside the interrupt handler, so
2160 * the reset routines know to wait.
2161 */
2162 sc->sc_intr_cnt++;
2163 ATH_PCU_UNLOCK(sc);
2164
2165 /*
2166 * Handle the interrupt. We won't run concurrent with the reset
2167 * or channel change routines as they'll wait for sc_intr_cnt
2168 * to be 0 before continuing.
2169 */
2170 if (status & HAL_INT_FATAL) {
2171 sc->sc_stats.ast_hardware++;
2172 ath_hal_intrset(ah, 0); /* disable intr's until reset */
2173 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
2174 } else {
2175 if (status & HAL_INT_SWBA) {
2176 /*
2177 * Software beacon alert--time to send a beacon.
2178 * Handle beacon transmission directly; deferring
2179 * this is too slow to meet timing constraints
2180 * under load.
2181 */
2182 #ifdef IEEE80211_SUPPORT_TDMA
2183 if (sc->sc_tdma) {
2184 if (sc->sc_tdmaswba == 0) {
2185 struct ieee80211com *ic = &sc->sc_ic;
2186 struct ieee80211vap *vap =
2187 TAILQ_FIRST(&ic->ic_vaps);
2188 ath_tdma_beacon_send(sc, vap);
2189 sc->sc_tdmaswba =
2190 vap->iv_tdma->tdma_bintval;
2191 } else
2192 sc->sc_tdmaswba--;
2193 } else
2194 #endif
2195 {
2196 ath_beacon_proc(sc, 0);
2197 #ifdef IEEE80211_SUPPORT_SUPERG
2198 /*
2199 * Schedule the rx taskq in case there's no
2200 * traffic so any frames held on the staging
2201 * queue are aged and potentially flushed.
2202 */
2203 sc->sc_rx.recv_sched(sc, 1);
2204 #endif
2205 }
2206 }
2207 if (status & HAL_INT_RXEOL) {
2208 int imask;
2209 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
2210 if (! sc->sc_isedma) {
2211 ATH_PCU_LOCK(sc);
2212 /*
2213 * NB: the hardware should re-read the link when
2214 * RXE bit is written, but it doesn't work at
2215 * least on older hardware revs.
2216 */
2217 sc->sc_stats.ast_rxeol++;
2218 /*
2219 * Disable RXEOL/RXORN - prevent an interrupt
2220 * storm until the PCU logic can be reset.
2221 * In case the interface is reset some other
2222 * way before "sc_kickpcu" is called, don't
2223 * modify sc_imask - that way if it is reset
2224 * by a call to ath_reset() somehow, the
2225 * interrupt mask will be correctly reprogrammed.
2226 */
2227 imask = sc->sc_imask;
2228 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
2229 ath_hal_intrset(ah, imask);
2230 /*
2231 * Only blank sc_rxlink if we've not yet kicked
2232 * the PCU.
2233 *
2234 * This isn't entirely correct - the correct solution
2235 * would be to have a PCU lock and engage that for
2236 * the duration of the PCU fiddling; which would include
2237 * running the RX process. Otherwise we could end up
2238 * messing up the RX descriptor chain and making the
2239 * RX desc list much shorter.
2240 */
2241 if (! sc->sc_kickpcu)
2242 sc->sc_rxlink = NULL;
2243 sc->sc_kickpcu = 1;
2244 ATH_PCU_UNLOCK(sc);
2245 }
2246 /*
2247 * Enqueue an RX proc to handle whatever
2248 * is in the RX queue.
2249 * This will then kick the PCU if required.
2250 */
2251 sc->sc_rx.recv_sched(sc, 1);
2252 }
2253 if (status & HAL_INT_TXURN) {
2254 sc->sc_stats.ast_txurn++;
2255 /* bump tx trigger level */
2256 ath_hal_updatetxtriglevel(ah, AH_TRUE);
2257 }
2258 /*
2259 * Handle both the legacy and RX EDMA interrupt bits.
2260 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
2261 */
2262 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
2263 sc->sc_stats.ast_rx_intr++;
2264 sc->sc_rx.recv_sched(sc, 1);
2265 }
2266 if (status & HAL_INT_TX) {
2267 sc->sc_stats.ast_tx_intr++;
2268 /*
2269 * Grab all the currently set bits in the HAL txq bitmap
2270 * and blank them. This is the only place we should be
2271 * doing this.
2272 */
2273 if (! sc->sc_isedma) {
2274 ATH_PCU_LOCK(sc);
2275 txqs = 0xffffffff;
2276 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
2277 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
2278 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
2279 txqs,
2280 sc->sc_txq_active,
2281 sc->sc_txq_active | txqs);
2282 sc->sc_txq_active |= txqs;
2283 ATH_PCU_UNLOCK(sc);
2284 }
2285 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
2286 }
2287 if (status & HAL_INT_BMISS) {
2288 sc->sc_stats.ast_bmiss++;
2289 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
2290 }
2291 if (status & HAL_INT_GTT)
2292 sc->sc_stats.ast_tx_timeout++;
2293 if (status & HAL_INT_CST)
2294 sc->sc_stats.ast_tx_cst++;
2295 if (status & HAL_INT_MIB) {
2296 sc->sc_stats.ast_mib++;
2297 ATH_PCU_LOCK(sc);
2298 /*
2299 * Disable interrupts until we service the MIB
2300 * interrupt; otherwise it will continue to fire.
2301 */
2302 ath_hal_intrset(ah, 0);
2303 /*
2304 * Let the hal handle the event. We assume it will
2305 * clear whatever condition caused the interrupt.
2306 */
2307 ath_hal_mibevent(ah, &sc->sc_halstats);
2308 /*
2309 * Don't reset the interrupt if we've just
2310 * kicked the PCU, or we may get a nested
2311 * RXEOL before the rxproc has had a chance
2312 * to run.
2313 */
2314 if (sc->sc_kickpcu == 0)
2315 ath_hal_intrset(ah, sc->sc_imask);
2316 ATH_PCU_UNLOCK(sc);
2317 }
2318 if (status & HAL_INT_RXORN) {
2319 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
2320 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
2321 sc->sc_stats.ast_rxorn++;
2322 }
2323 if (status & HAL_INT_TSFOOR) {
2324 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__);
2325 sc->sc_syncbeacon = 1;
2326 }
2327 }
2328 ATH_PCU_LOCK(sc);
2329 sc->sc_intr_cnt--;
2330 ATH_PCU_UNLOCK(sc);
2331
2332 ATH_LOCK(sc);
2333 ath_power_restore_power_state(sc);
2334 ATH_UNLOCK(sc);
2335 }
2336
2337 static void
2338 ath_fatal_proc(void *arg, int pending)
2339 {
2340 struct ath_softc *sc = arg;
2341 u_int32_t *state;
2342 u_int32_t len;
2343 void *sp;
2344
2345 if (sc->sc_invalid)
2346 return;
2347
2348 device_printf(sc->sc_dev, "hardware error; resetting\n");
2349 /*
2350 * Fatal errors are unrecoverable. Typically these
2351 * are caused by DMA errors. Collect h/w state from
2352 * the hal so we can diagnose what's going on.
2353 */
2354 #if defined(__DragonFly__)
2355 wlan_serialize_enter();
2356 #endif
2357 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
2358 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
2359 state = sp;
2360 device_printf(sc->sc_dev,
2361 "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
2362 state[0], state[1] , state[2], state[3],
2363 state[4], state[5]);
2364 }
2365 ath_reset(sc, ATH_RESET_NOLOSS);
2366 #if defined(__DragonFly__)
2367 wlan_serialize_exit();
2368 #endif
2369 }
2370
2371 static void
2372 ath_bmiss_vap(struct ieee80211vap *vap)
2373 {
2374 struct ath_softc *sc = vap->iv_ic->ic_softc;
2375
2376 /*
2377 * Workaround phantom bmiss interrupts by sanity-checking
2378 * the time of our last rx'd frame. If it is within the
2379 * beacon miss interval then ignore the interrupt. If it's
2380 * truly a bmiss we'll get another interrupt soon and that'll
2381 * be dispatched up for processing. Note this applies only
2382 * for h/w beacon miss events.
2383 */
2384
2385 /*
2386 * XXX TODO: Just read the TSF during the interrupt path;
2387 * that way we don't have to wake up again just to read it
2388 * again.
2389 */
2390 ATH_LOCK(sc);
2391 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2392 ATH_UNLOCK(sc);
2393
2394 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
2395 u_int64_t lastrx = sc->sc_lastrx;
2396 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
2397 /* XXX should take a locked ref to iv_bss */
2398 u_int bmisstimeout =
2399 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
2400
2401 DPRINTF(sc, ATH_DEBUG_BEACON,
2402 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
2403 __func__, (unsigned long long) tsf,
2404 (unsigned long long)(tsf - lastrx),
2405 (unsigned long long) lastrx, bmisstimeout);
2406
2407 if (tsf - lastrx <= bmisstimeout) {
2408 sc->sc_stats.ast_bmiss_phantom++;
2409
2410 ATH_LOCK(sc);
2411 ath_power_restore_power_state(sc);
2412 ATH_UNLOCK(sc);
2413
2414 return;
2415 }
2416 }
2417
2418 /*
2419 * There's no need to keep the hardware awake during the call
2420 * to av_bmiss().
2421 */
2422 ATH_LOCK(sc);
2423 ath_power_restore_power_state(sc);
2424 ATH_UNLOCK(sc);
2425
2426 /*
2427 * Attempt to force a beacon resync.
2428 */
2429 sc->sc_syncbeacon = 1;
2430
2431 ATH_VAP(vap)->av_bmiss(vap);
2432 }
2433
2434 /* XXX this needs a force wakeup! */
2435 int
2436 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
2437 {
2438 uint32_t rsize;
2439 void *sp;
2440
2441 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
2442 return 0;
2443 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
2444 *hangs = *(uint32_t *)sp;
2445 return 1;
2446 }
2447
2448 static void
2449 ath_bmiss_proc(void *arg, int pending)
2450 {
2451 struct ath_softc *sc = arg;
2452 uint32_t hangs;
2453
2454 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
2455
2456 ATH_LOCK(sc);
2457 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2458 ATH_UNLOCK(sc);
2459
2460 ath_beacon_miss(sc);
2461
2462 /*
2463 * Do a reset upon any becaon miss event.
2464 *
2465 * It may be a non-recognised RX clear hang which needs a reset
2466 * to clear.
2467 */
2468 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
2469 ath_reset(sc, ATH_RESET_NOLOSS);
2470 device_printf(sc->sc_dev,
2471 "bb hang detected (0x%x), resetting\n", hangs);
2472 } else {
2473 ath_reset(sc, ATH_RESET_NOLOSS);
2474 ieee80211_beacon_miss(&sc->sc_ic);
2475 }
2476
2477 /* Force a beacon resync, in case they've drifted */
2478 sc->sc_syncbeacon = 1;
2479
2480 ATH_LOCK(sc);
2481 ath_power_restore_power_state(sc);
2482 ATH_UNLOCK(sc);
2483 }
2484
2485 /*
2486 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
2487 * calcs together with WME. If necessary disable the crypto
2488 * hardware and mark the 802.11 state so keys will be setup
2489 * with the MIC work done in software.
2490 */
2491 static void
2492 ath_settkipmic(struct ath_softc *sc)
2493 {
2494 struct ieee80211com *ic = &sc->sc_ic;
2495
2496 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
2497 if (ic->ic_flags & IEEE80211_F_WME) {
2498 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
2499 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
2500 } else {
2501 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
2502 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
2503 }
2504 }
2505 }
2506
2507 static int
2508 ath_init(struct ath_softc *sc)
2509 {
2510 struct ieee80211com *ic = &sc->sc_ic;
2511 struct ath_hal *ah = sc->sc_ah;
2512 HAL_STATUS status;
2513
2514 ATH_LOCK_ASSERT(sc);
2515
2516 /*
2517 * Force the sleep state awake.
2518 */
2519 ath_power_setselfgen(sc, HAL_PM_AWAKE);
2520 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2521 ath_power_setpower(sc, HAL_PM_AWAKE);
2522
2523 /*
2524 * Stop anything previously setup. This is safe
2525 * whether this is the first time through or not.
2526 */
2527 ath_stop(sc);
2528
2529 /*
2530 * The basic interface to setting the hardware in a good
2531 * state is ``reset''. On return the hardware is known to
2532 * be powered up and with interrupts disabled. This must
2533 * be followed by initialization of the appropriate bits
2534 * and then setup of the interrupt mask.
2535 */
2536 ath_settkipmic(sc);
2537 ath_update_chainmasks(sc, ic->ic_curchan);
2538 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2539 sc->sc_cur_rxchainmask);
2540
2541 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE,
2542 HAL_RESET_NORMAL, &status)) {
2543 device_printf(sc->sc_dev,
2544 "unable to reset hardware; hal status %u\n", status);
2545 return (ENODEV);
2546 }
2547
2548 ATH_RX_LOCK(sc);
2549 sc->sc_rx_stopped = 1;
2550 sc->sc_rx_resetted = 1;
2551 ATH_RX_UNLOCK(sc);
2552
2553 ath_chan_change(sc, ic->ic_curchan);
2554
2555 /* Let DFS at it in case it's a DFS channel */
2556 ath_dfs_radar_enable(sc, ic->ic_curchan);
2557
2558 /* Let spectral at in case spectral is enabled */
2559 ath_spectral_enable(sc, ic->ic_curchan);
2560
2561 /*
2562 * Let bluetooth coexistence at in case it's needed for this channel
2563 */
2564 ath_btcoex_enable(sc, ic->ic_curchan);
2565
2566 /*
2567 * If we're doing TDMA, enforce the TXOP limitation for chips that
2568 * support it.
2569 */
2570 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2571 ath_hal_setenforcetxop(sc->sc_ah, 1);
2572 else
2573 ath_hal_setenforcetxop(sc->sc_ah, 0);
2574
2575 /*
2576 * Likewise this is set during reset so update
2577 * state cached in the driver.
2578 */
2579 sc->sc_diversity = ath_hal_getdiversity(ah);
2580 sc->sc_lastlongcal = ticks;
2581 sc->sc_resetcal = 1;
2582 sc->sc_lastcalreset = 0;
2583 sc->sc_lastani = ticks;
2584 sc->sc_lastshortcal = ticks;
2585 sc->sc_doresetcal = AH_FALSE;
2586 /*
2587 * Beacon timers were cleared here; give ath_newstate()
2588 * a hint that the beacon timers should be poked when
2589 * things transition to the RUN state.
2590 */
2591 sc->sc_beacons = 0;
2592
2593 /*
2594 * Setup the hardware after reset: the key cache
2595 * is filled as needed and the receive engine is
2596 * set going. Frame transmit is handled entirely
2597 * in the frame output path; there's nothing to do
2598 * here except setup the interrupt mask.
2599 */
2600 if (ath_startrecv(sc) != 0) {
2601 device_printf(sc->sc_dev, "unable to start recv logic\n");
2602 ath_power_restore_power_state(sc);
2603 return (ENODEV);
2604 }
2605
2606 /*
2607 * Enable interrupts.
2608 */
2609 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
2610 | HAL_INT_RXORN | HAL_INT_TXURN
2611 | HAL_INT_FATAL | HAL_INT_GLOBAL;
2612
2613 /*
2614 * Enable RX EDMA bits. Note these overlap with
2615 * HAL_INT_RX and HAL_INT_RXDESC respectively.
2616 */
2617 if (sc->sc_isedma)
2618 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
2619
2620 /*
2621 * If we're an EDMA NIC, we don't care about RXEOL.
2622 * Writing a new descriptor in will simply restart
2623 * RX DMA.
2624 */
2625 if (! sc->sc_isedma)
2626 sc->sc_imask |= HAL_INT_RXEOL;
2627
2628 /*
2629 * Enable MIB interrupts when there are hardware phy counters.
2630 * Note we only do this (at the moment) for station mode.
2631 */
2632 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
2633 sc->sc_imask |= HAL_INT_MIB;
2634
2635 /*
2636 * XXX add capability for this.
2637 *
2638 * If we're in STA mode (and maybe IBSS?) then register for
2639 * TSFOOR interrupts.
2640 */
2641 if (ic->ic_opmode == IEEE80211_M_STA)
2642 sc->sc_imask |= HAL_INT_TSFOOR;
2643
2644 /* Enable global TX timeout and carrier sense timeout if available */
2645 if (ath_hal_gtxto_supported(ah))
2646 sc->sc_imask |= HAL_INT_GTT;
2647
2648 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
2649 __func__, sc->sc_imask);
2650
2651 sc->sc_running = 1;
2652 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
2653 ath_hal_intrset(ah, sc->sc_imask);
2654
2655 ath_power_restore_power_state(sc);
2656
2657 return (0);
2658 }
2659
2660 static void
2661 ath_stop(struct ath_softc *sc)
2662 {
2663 struct ath_hal *ah = sc->sc_ah;
2664
2665 ATH_LOCK_ASSERT(sc);
2666
2667 /*
2668 * Wake the hardware up before fiddling with it.
2669 */
2670 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2671
2672 if (sc->sc_running) {
2673 /*
2674 * Shutdown the hardware and driver:
2675 * reset 802.11 state machine
2676 * turn off timers
2677 * disable interrupts
2678 * turn off the radio
2679 * clear transmit machinery
2680 * clear receive machinery
2681 * drain and release tx queues
2682 * reclaim beacon resources
2683 * power down hardware
2684 *
2685 * Note that some of this work is not possible if the
2686 * hardware is gone (invalid).
2687 */
2688 #ifdef ATH_TX99_DIAG
2689 if (sc->sc_tx99 != NULL)
2690 sc->sc_tx99->stop(sc->sc_tx99);
2691 #endif
2692 #if defined(__DragonFly__)
2693 callout_cancel(&sc->sc_wd_ch);
2694 #else
2695 callout_stop(&sc->sc_wd_ch);
2696 #endif
2697 sc->sc_wd_timer = 0;
2698 /* ifp->if_flags &= ~IFF_RUNNING; */
2699 sc->sc_running = 0;
2700 if (!sc->sc_invalid) {
2701 if (sc->sc_softled) {
2702 #if defined(__DragonFly__)
2703 callout_cancel(&sc->sc_ledtimer);
2704 #else
2705 callout_stop(&sc->sc_ledtimer);
2706 #endif
2707 ath_hal_gpioset(ah, sc->sc_ledpin,
2708 !sc->sc_ledon);
2709 sc->sc_blinking = 0;
2710 }
2711 ath_hal_intrset(ah, 0);
2712 }
2713 /* XXX we should stop RX regardless of whether it's valid */
2714 if (!sc->sc_invalid) {
2715 ath_stoprecv(sc, 1);
2716 ath_hal_phydisable(ah);
2717 } else
2718 sc->sc_rxlink = NULL;
2719 ath_draintxq(sc, ATH_RESET_DEFAULT);
2720 ath_beacon_free(sc); /* XXX not needed */
2721 }
2722
2723 /* And now, restore the current power state */
2724 ath_power_restore_power_state(sc);
2725 }
2726
2727 /*
2728 * Wait until all pending TX/RX has completed.
2729 *
2730 * This waits until all existing transmit, receive and interrupts
2731 * have completed. It's assumed that the caller has first
2732 * grabbed the reset lock so it doesn't try to do overlapping
2733 * chip resets.
2734 */
2735 #define MAX_TXRX_ITERATIONS 100
2736 static void
2737 ath_txrx_stop_locked(struct ath_softc *sc)
2738 {
2739 int i = MAX_TXRX_ITERATIONS;
2740
2741 ATH_UNLOCK_ASSERT(sc);
2742 ATH_PCU_LOCK_ASSERT(sc);
2743
2744 /*
2745 * Sleep until all the pending operations have completed.
2746 *
2747 * The caller must ensure that reset has been incremented
2748 * or the pending operations may continue being queued.
2749 */
2750 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
2751 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
2752 if (i <= 0)
2753 break;
2754 #if defined(__DragonFly__)
2755 if (wlan_is_serialized()) {
2756 wlan_serialize_exit();
2757 lksleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop",
2758 msecs_to_ticks(10));
2759 wlan_serialize_enter();
2760 } else {
2761 lksleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop",
2762 msecs_to_ticks(10));
2763 }
2764 #else
2765 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop",
2766 msecs_to_ticks(10));
2767 #endif
2768 i--;
2769 }
2770
2771 if (i <= 0)
2772 device_printf(sc->sc_dev,
2773 "%s: didn't finish after %d iterations\n",
2774 __func__, MAX_TXRX_ITERATIONS);
2775 }
2776 #undef MAX_TXRX_ITERATIONS
2777
2778 #if 0
2779 static void
2780 ath_txrx_stop(struct ath_softc *sc)
2781 {
2782 ATH_UNLOCK_ASSERT(sc);
2783 ATH_PCU_UNLOCK_ASSERT(sc);
2784
2785 ATH_PCU_LOCK(sc);
2786 ath_txrx_stop_locked(sc);
2787 ATH_PCU_UNLOCK(sc);
2788 }
2789 #endif
2790
2791 static void
2792 ath_txrx_start(struct ath_softc *sc)
2793 {
2794
2795 taskqueue_unblock(sc->sc_tq);
2796 }
2797
2798 /*
2799 * Grab the reset lock, and wait around until no one else
2800 * is trying to do anything with it.
2801 *
2802 * This is totally horrible but we can't hold this lock for
2803 * long enough to do TX/RX or we end up with net80211/ip stack
2804 * LORs and eventual deadlock.
2805 *
2806 * "dowait" signals whether to spin, waiting for the reset
2807 * lock count to reach 0. This should (for now) only be used
2808 * during the reset path, as the rest of the code may not
2809 * be locking-reentrant enough to behave correctly.
2810 *
2811 * Another, cleaner way should be found to serialise all of
2812 * these operations.
2813 */
2814 #define MAX_RESET_ITERATIONS 25
2815 static int
2816 ath_reset_grablock(struct ath_softc *sc, int dowait)
2817 {
2818 int w = 0;
2819 int i = MAX_RESET_ITERATIONS;
2820
2821 ATH_PCU_LOCK_ASSERT(sc);
2822 do {
2823 if (sc->sc_inreset_cnt == 0) {
2824 w = 1;
2825 break;
2826 }
2827 if (dowait == 0) {
2828 w = 0;
2829 break;
2830 }
2831 ATH_PCU_UNLOCK(sc);
2832 /*
2833 * 1 tick is likely not enough time for long calibrations
2834 * to complete. So we should wait quite a while.
2835 */
2836 #if defined(__DragonFly__)
2837 tsleep(&sc->sc_inreset_cnt, 0,
2838 "ath_reset_grablock", (hz + 99) / 100);
2839 #else
2840 pause("ath_reset_grablock", msecs_to_ticks(100));
2841 #endif
2842 i--;
2843 ATH_PCU_LOCK(sc);
2844 } while (i > 0);
2845
2846 /*
2847 * We always increment the refcounter, regardless
2848 * of whether we succeeded to get it in an exclusive
2849 * way.
2850 */
2851 sc->sc_inreset_cnt++;
2852
2853 if (i <= 0)
2854 device_printf(sc->sc_dev,
2855 "%s: didn't finish after %d iterations\n",
2856 __func__, MAX_RESET_ITERATIONS);
2857
2858 if (w == 0)
2859 device_printf(sc->sc_dev,
2860 "%s: warning, recursive reset path!\n",
2861 __func__);
2862
2863 return w;
2864 }
2865 #undef MAX_RESET_ITERATIONS
2866
2867 /*
2868 * Reset the hardware w/o losing operational state. This is
2869 * basically a more efficient way of doing ath_stop, ath_init,
2870 * followed by state transitions to the current 802.11
2871 * operational state. Used to recover from various errors and
2872 * to reset or reload hardware state.
2873 */
2874 int
2875 ath_reset(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
2876 {
2877 struct ieee80211com *ic = &sc->sc_ic;
2878 struct ath_hal *ah = sc->sc_ah;
2879 HAL_STATUS status;
2880 int i;
2881
2882 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2883
2884 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2885 ATH_PCU_UNLOCK_ASSERT(sc);
2886 ATH_UNLOCK_ASSERT(sc);
2887
2888 /* Try to (stop any further TX/RX from occurring */
2889 taskqueue_block(sc->sc_tq);
2890
2891 /*
2892 * Wake the hardware up.
2893 */
2894 ATH_LOCK(sc);
2895 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2896 ATH_UNLOCK(sc);
2897
2898 ATH_PCU_LOCK(sc);
2899
2900 /*
2901 * Grab the reset lock before TX/RX is stopped.
2902 *
2903 * This is needed to ensure that when the TX/RX actually does finish,
2904 * no further TX/RX/reset runs in parallel with this.
2905 */
2906 if (ath_reset_grablock(sc, 1) == 0) {
2907 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2908 __func__);
2909 }
2910
2911 /* disable interrupts */
2912 ath_hal_intrset(ah, 0);
2913
2914 /*
2915 * Now, ensure that any in progress TX/RX completes before we
2916 * continue.
2917 */
2918 ath_txrx_stop_locked(sc);
2919
2920 ATH_PCU_UNLOCK(sc);
2921
2922 /*
2923 * Regardless of whether we're doing a no-loss flush or
2924 * not, stop the PCU and handle what's in the RX queue.
2925 * That way frames aren't dropped which shouldn't be.
2926 */
2927 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2928 ath_rx_flush(sc);
2929
2930 /*
2931 * Should now wait for pending TX/RX to complete
2932 * and block future ones from occurring. This needs to be
2933 * done before the TX queue is drained.
2934 */
2935 ath_draintxq(sc, reset_type); /* stop xmit side */
2936
2937 ath_settkipmic(sc); /* configure TKIP MIC handling */
2938 /* NB: indicate channel change so we do a full reset */
2939 ath_update_chainmasks(sc, ic->ic_curchan);
2940 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2941 sc->sc_cur_rxchainmask);
2942 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE,
2943 HAL_RESET_NORMAL, &status))
2944 device_printf(sc->sc_dev,
2945 "%s: unable to reset hardware; hal status %u\n",
2946 __func__, status);
2947 sc->sc_diversity = ath_hal_getdiversity(ah);
2948
2949 ATH_RX_LOCK(sc);
2950 sc->sc_rx_stopped = 1;
2951 sc->sc_rx_resetted = 1;
2952 ATH_RX_UNLOCK(sc);
2953
2954 /* Let DFS at it in case it's a DFS channel */
2955 ath_dfs_radar_enable(sc, ic->ic_curchan);
2956
2957 /* Let spectral at in case spectral is enabled */
2958 ath_spectral_enable(sc, ic->ic_curchan);
2959
2960 /*
2961 * Let bluetooth coexistence at in case it's needed for this channel
2962 */
2963 ath_btcoex_enable(sc, ic->ic_curchan);
2964
2965 /*
2966 * If we're doing TDMA, enforce the TXOP limitation for chips that
2967 * support it.
2968 */
2969 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2970 ath_hal_setenforcetxop(sc->sc_ah, 1);
2971 else
2972 ath_hal_setenforcetxop(sc->sc_ah, 0);
2973
2974 if (ath_startrecv(sc) != 0) /* restart recv */
2975 device_printf(sc->sc_dev,
2976 "%s: unable to start recv logic\n", __func__);
2977 /*
2978 * We may be doing a reset in response to an ioctl
2979 * that changes the channel so update any state that
2980 * might change as a result.
2981 */
2982 ath_chan_change(sc, ic->ic_curchan);
2983 if (sc->sc_beacons) { /* restart beacons */
2984 #ifdef IEEE80211_SUPPORT_TDMA
2985 if (sc->sc_tdma)
2986 ath_tdma_config(sc, NULL);
2987 else
2988 #endif
2989 ath_beacon_config(sc, NULL);
2990 }
2991
2992 /*
2993 * Release the reset lock and re-enable interrupts here.
2994 * If an interrupt was being processed in ath_intr(),
2995 * it would disable interrupts at this point. So we have
2996 * to atomically enable interrupts and decrement the
2997 * reset counter - this way ath_intr() doesn't end up
2998 * disabling interrupts without a corresponding enable
2999 * in the rest or channel change path.
3000 *
3001 * Grab the TX reference in case we need to transmit.
3002 * That way a parallel transmit doesn't.
3003 */
3004 ATH_PCU_LOCK(sc);
3005 sc->sc_inreset_cnt--;
3006 sc->sc_txstart_cnt++;
3007 /* XXX only do this if sc_inreset_cnt == 0? */
3008 ath_hal_intrset(ah, sc->sc_imask);
3009 ATH_PCU_UNLOCK(sc);
3010
3011 /*
3012 * TX and RX can be started here. If it were started with
3013 * sc_inreset_cnt > 0, the TX and RX path would abort.
3014 * Thus if this is a nested call through the reset or
3015 * channel change code, TX completion will occur but
3016 * RX completion and ath_start / ath_tx_start will not
3017 * run.
3018 */
3019
3020 /* Restart TX/RX as needed */
3021 ath_txrx_start(sc);
3022
3023 /* XXX TODO: we need to hold the tx refcount here! */
3024
3025 /* Restart TX completion and pending TX */
3026 if (reset_type == ATH_RESET_NOLOSS) {
3027 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
3028 if (ATH_TXQ_SETUP(sc, i)) {
3029 ATH_TXQ_LOCK(&sc->sc_txq[i]);
3030 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
3031 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
3032
3033 ATH_TX_LOCK(sc);
3034 ath_txq_sched(sc, &sc->sc_txq[i]);
3035 ATH_TX_UNLOCK(sc);
3036 }
3037 }
3038 }
3039
3040 ATH_LOCK(sc);
3041 ath_power_restore_power_state(sc);
3042 ATH_UNLOCK(sc);
3043
3044 ATH_PCU_LOCK(sc);
3045 sc->sc_txstart_cnt--;
3046 ATH_PCU_UNLOCK(sc);
3047
3048 /* Handle any frames in the TX queue */
3049 /*
3050 * XXX should this be done by the caller, rather than
3051 * ath_reset() ?
3052 */
3053 ath_tx_kick(sc); /* restart xmit */
3054 return 0;
3055 }
3056
3057 static int
3058 ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
3059 {
3060 struct ieee80211com *ic = vap->iv_ic;
3061 struct ath_softc *sc = ic->ic_softc;
3062 struct ath_hal *ah = sc->sc_ah;
3063
3064 switch (cmd) {
3065 case IEEE80211_IOC_TXPOWER:
3066 /*
3067 * If per-packet TPC is enabled, then we have nothing
3068 * to do; otherwise we need to force the global limit.
3069 * All this can happen directly; no need to reset.
3070 */
3071 if (!ath_hal_gettpc(ah))
3072 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
3073 return 0;
3074 }
3075 /* XXX? Full or NOLOSS? */
3076 return ath_reset(sc, ATH_RESET_FULL);
3077 }
3078
3079 struct ath_buf *
3080 _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
3081 {
3082 struct ath_buf *bf;
3083
3084 ATH_TXBUF_LOCK_ASSERT(sc);
3085
3086 if (btype == ATH_BUFTYPE_MGMT)
3087 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
3088 else
3089 bf = TAILQ_FIRST(&sc->sc_txbuf);
3090
3091 if (bf == NULL) {
3092 sc->sc_stats.ast_tx_getnobuf++;
3093 } else {
3094 if (bf->bf_flags & ATH_BUF_BUSY) {
3095 sc->sc_stats.ast_tx_getbusybuf++;
3096 bf = NULL;
3097 }
3098 }
3099
3100 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
3101 if (btype == ATH_BUFTYPE_MGMT)
3102 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
3103 else {
3104 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
3105 sc->sc_txbuf_cnt--;
3106
3107 /*
3108 * This shuldn't happen; however just to be
3109 * safe print a warning and fudge the txbuf
3110 * count.
3111 */
3112 if (sc->sc_txbuf_cnt < 0) {
3113 device_printf(sc->sc_dev,
3114 "%s: sc_txbuf_cnt < 0?\n",
3115 __func__);
3116 sc->sc_txbuf_cnt = 0;
3117 }
3118 }
3119 } else
3120 bf = NULL;
3121
3122 if (bf == NULL) {
3123 /* XXX should check which list, mgmt or otherwise */
3124 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
3125 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
3126 "out of xmit buffers" : "xmit buffer busy");
3127 return NULL;
3128 }
3129
3130 /* XXX TODO: should do this at buffer list initialisation */
3131 /* XXX (then, ensure the buffer has the right flag set) */
3132 bf->bf_flags = 0;
3133 if (btype == ATH_BUFTYPE_MGMT)
3134 bf->bf_flags |= ATH_BUF_MGMT;
3135 else
3136 bf->bf_flags &= (~ATH_BUF_MGMT);
3137
3138 /* Valid bf here; clear some basic fields */
3139 bf->bf_next = NULL; /* XXX just to be sure */
3140 bf->bf_last = NULL; /* XXX again, just to be sure */
3141 bf->bf_comp = NULL; /* XXX again, just to be sure */
3142 bzero(&bf->bf_state, sizeof(bf->bf_state));
3143
3144 /*
3145 * Track the descriptor ID only if doing EDMA
3146 */
3147 if (sc->sc_isedma) {
3148 bf->bf_descid = sc->sc_txbuf_descid;
3149 sc->sc_txbuf_descid++;
3150 }
3151
3152 return bf;
3153 }
3154
3155 /*
3156 * When retrying a software frame, buffers marked ATH_BUF_BUSY
3157 * can't be thrown back on the queue as they could still be
3158 * in use by the hardware.
3159 *
3160 * This duplicates the buffer, or returns NULL.
3161 *
3162 * The descriptor is also copied but the link pointers and
3163 * the DMA segments aren't copied; this frame should thus
3164 * be again passed through the descriptor setup/chain routines
3165 * so the link is correct.
3166 *
3167 * The caller must free the buffer using ath_freebuf().
3168 */
3169 struct ath_buf *
3170 ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
3171 {
3172 struct ath_buf *tbf;
3173
3174 tbf = ath_getbuf(sc,
3175 (bf->bf_flags & ATH_BUF_MGMT) ?
3176 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
3177 if (tbf == NULL)
3178 return NULL; /* XXX failure? Why? */
3179
3180 /* Copy basics */
3181 tbf->bf_next = NULL;
3182 tbf->bf_nseg = bf->bf_nseg;
3183 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE;
3184 tbf->bf_status = bf->bf_status;
3185 tbf->bf_m = bf->bf_m;
3186 tbf->bf_node = bf->bf_node;
3187 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__));
3188 /* will be setup by the chain/setup function */
3189 tbf->bf_lastds = NULL;
3190 /* for now, last == self */
3191 tbf->bf_last = tbf;
3192 tbf->bf_comp = bf->bf_comp;
3193
3194 /* NOTE: DMA segments will be setup by the setup/chain functions */
3195
3196 /* The caller has to re-init the descriptor + links */
3197
3198 /*
3199 * Free the DMA mapping here, before we NULL the mbuf.
3200 * We must only call bus_dmamap_unload() once per mbuf chain
3201 * or behaviour is undefined.
3202 */
3203 if (bf->bf_m != NULL) {
3204 /*
3205 * XXX is this POSTWRITE call required?
3206 */
3207 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3208 BUS_DMASYNC_POSTWRITE);
3209 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3210 }
3211
3212 bf->bf_m = NULL;
3213 bf->bf_node = NULL;
3214
3215 /* Copy state */
3216 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
3217
3218 return tbf;
3219 }
3220
3221 struct ath_buf *
3222 ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
3223 {
3224 struct ath_buf *bf;
3225
3226 ATH_TXBUF_LOCK(sc);
3227 bf = _ath_getbuf_locked(sc, btype);
3228 /*
3229 * If a mgmt buffer was requested but we're out of those,
3230 * try requesting a normal one.
3231 */
3232 if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
3233 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
3234 ATH_TXBUF_UNLOCK(sc);
3235 if (bf == NULL) {
3236 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
3237 sc->sc_stats.ast_tx_qstop++;
3238 }
3239 return bf;
3240 }
3241
3242 /*
3243 * Transmit a single frame.
3244 *
3245 * net80211 will free the node reference if the transmit
3246 * fails, so don't free the node reference here.
3247 */
3248 static int
3249 ath_transmit(struct ieee80211com *ic, struct mbuf *m)
3250 {
3251 struct ath_softc *sc = ic->ic_softc;
3252 struct ieee80211_node *ni;
3253 struct mbuf *next;
3254 struct ath_buf *bf;
3255 ath_bufhead frags;
3256 int retval = 0;
3257
3258 /*
3259 * Tell the reset path that we're currently transmitting.
3260 */
3261 ATH_PCU_LOCK(sc);
3262 if (sc->sc_inreset_cnt > 0) {
3263 DPRINTF(sc, ATH_DEBUG_XMIT,
3264 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
3265 ATH_PCU_UNLOCK(sc);
3266 sc->sc_stats.ast_tx_qstop++;
3267 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish");
3268 /* mbuf left intact on error */
3269 return (ENOBUFS); /* XXX should be EINVAL or? */
3270 }
3271 sc->sc_txstart_cnt++;
3272 ATH_PCU_UNLOCK(sc);
3273
3274 /* Wake the hardware up already */
3275 ATH_LOCK(sc);
3276 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3277 ATH_UNLOCK(sc);
3278
3279 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start");
3280 /*
3281 * Grab the TX lock - it's ok to do this here; we haven't
3282 * yet started transmitting.
3283 */
3284 ATH_TX_LOCK(sc);
3285
3286 /*
3287 * Node reference, if there's one.
3288 */
3289 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
3290
3291 /*
3292 * Enforce how deep a node queue can get.
3293 *
3294 * XXX it would be nicer if we kept an mbuf queue per
3295 * node and only whacked them into ath_bufs when we
3296 * are ready to schedule some traffic from them.
3297 * .. that may come later.
3298 *
3299 * XXX we should also track the per-node hardware queue
3300 * depth so it is easy to limit the _SUM_ of the swq and
3301 * hwq frames. Since we only schedule two HWQ frames
3302 * at a time, this should be OK for now.
3303 */
3304 if ((!(m->m_flags & M_EAPOL)) &&
3305 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) {
3306 sc->sc_stats.ast_tx_nodeq_overflow++;
3307 retval = ENOBUFS;
3308 goto finish;
3309 }
3310
3311 /*
3312 * Check how many TX buffers are available.
3313 *
3314 * If this is for non-EAPOL traffic, just leave some
3315 * space free in order for buffer cloning and raw
3316 * frame transmission to occur.
3317 *
3318 * If it's for EAPOL traffic, ignore this for now.
3319 * Management traffic will be sent via the raw transmit
3320 * method which bypasses this check.
3321 *
3322 * This is needed to ensure that EAPOL frames during
3323 * (re) keying have a chance to go out.
3324 *
3325 * See kern/138379 for more information.
3326 */
3327 if ((!(m->m_flags & M_EAPOL)) &&
3328 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) {
3329 sc->sc_stats.ast_tx_nobuf++;
3330 retval = ENOBUFS;
3331 goto finish;
3332 }
3333
3334 /*
3335 * Grab a TX buffer and associated resources.
3336 *
3337 * If it's an EAPOL frame, allocate a MGMT ath_buf.
3338 * That way even with temporary buffer exhaustion due to
3339 * the data path doesn't leave us without the ability
3340 * to transmit management frames.
3341 *
3342 * Otherwise allocate a normal buffer.
3343 */
3344 if (m->m_flags & M_EAPOL)
3345 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
3346 else
3347 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
3348
3349 if (bf == NULL) {
3350 /*
3351 * If we failed to allocate a buffer, fail.
3352 *
3353 * We shouldn't fail normally, due to the check
3354 * above.
3355 */
3356 sc->sc_stats.ast_tx_nobuf++;
3357 retval = ENOBUFS;
3358 goto finish;
3359 }
3360
3361 /*
3362 * At this point we have a buffer; so we need to free it
3363 * if we hit any error conditions.
3364 */
3365
3366 /*
3367 * Check for fragmentation. If this frame
3368 * has been broken up verify we have enough
3369 * buffers to send all the fragments so all
3370 * go out or none...
3371 */
3372 TAILQ_INIT(&frags);
3373 if ((m->m_flags & M_FRAG) &&
3374 !ath_txfrag_setup(sc, &frags, m, ni)) {
3375 DPRINTF(sc, ATH_DEBUG_XMIT,
3376 "%s: out of txfrag buffers\n", __func__);
3377 sc->sc_stats.ast_tx_nofrag++;
3378 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
3379 /*
3380 * XXXGL: is mbuf valid after ath_txfrag_setup? If yes,
3381 * we shouldn't free it but return back.
3382 */
3383 ieee80211_free_mbuf(m);
3384 m = NULL;
3385 goto bad;
3386 }
3387
3388 /*
3389 * At this point if we have any TX fragments, then we will
3390 * have bumped the node reference once for each of those.
3391 */
3392
3393 /*
3394 * XXX Is there anything actually _enforcing_ that the
3395 * fragments are being transmitted in one hit, rather than
3396 * being interleaved with other transmissions on that
3397 * hardware queue?
3398 *
3399 * The ATH TX output lock is the only thing serialising this
3400 * right now.
3401 */
3402
3403 /*
3404 * Calculate the "next fragment" length field in ath_buf
3405 * in order to let the transmit path know enough about
3406 * what to next write to the hardware.
3407 */
3408 if (m->m_flags & M_FRAG) {
3409 struct ath_buf *fbf = bf;
3410 struct ath_buf *n_fbf = NULL;
3411 struct mbuf *fm = m->m_nextpkt;
3412
3413 /*
3414 * We need to walk the list of fragments and set
3415 * the next size to the following buffer.
3416 * However, the first buffer isn't in the frag
3417 * list, so we have to do some gymnastics here.
3418 */
3419 TAILQ_FOREACH(n_fbf, &frags, bf_list) {
3420 fbf->bf_nextfraglen = fm->m_pkthdr.len;
3421 fbf = n_fbf;
3422 fm = fm->m_nextpkt;
3423 }
3424 }
3425
3426 nextfrag:
3427 /*
3428 * Pass the frame to the h/w for transmission.
3429 * Fragmented frames have each frag chained together
3430 * with m_nextpkt. We know there are sufficient ath_buf's
3431 * to send all the frags because of work done by
3432 * ath_txfrag_setup. We leave m_nextpkt set while
3433 * calling ath_tx_start so it can use it to extend the
3434 * the tx duration to cover the subsequent frag and
3435 * so it can reclaim all the mbufs in case of an error;
3436 * ath_tx_start clears m_nextpkt once it commits to
3437 * handing the frame to the hardware.
3438 *
3439 * Note: if this fails, then the mbufs are freed but
3440 * not the node reference.
3441 *
3442 * So, we now have to free the node reference ourselves here
3443 * and return OK up to the stack.
3444 */
3445 next = m->m_nextpkt;
3446 if (ath_tx_start(sc, ni, bf, m)) {
3447 bad:
3448 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
3449 reclaim:
3450 bf->bf_m = NULL;
3451 bf->bf_node = NULL;
3452 ATH_TXBUF_LOCK(sc);
3453 ath_returnbuf_head(sc, bf);
3454 /*
3455 * Free the rest of the node references and
3456 * buffers for the fragment list.
3457 */
3458 ath_txfrag_cleanup(sc, &frags, ni);
3459 ATH_TXBUF_UNLOCK(sc);
3460
3461 /*
3462 * XXX: And free the node/return OK; ath_tx_start() may have
3463 * modified the buffer. We currently have no way to
3464 * signify that the mbuf was freed but there was an error.
3465 */
3466 ieee80211_free_node(ni);
3467 retval = 0;
3468 goto finish;
3469 }
3470
3471 /*
3472 * Check here if the node is in power save state.
3473 */
3474 ath_tx_update_tim(sc, ni, 1);
3475
3476 if (next != NULL) {
3477 /*
3478 * Beware of state changing between frags.
3479 * XXX check sta power-save state?
3480 */
3481 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
3482 DPRINTF(sc, ATH_DEBUG_XMIT,
3483 "%s: flush fragmented packet, state %s\n",
3484 __func__,
3485 ieee80211_state_name[ni->ni_vap->iv_state]);
3486 /* XXX dmamap */
3487 ieee80211_free_mbuf(next);
3488 goto reclaim;
3489 }
3490 m = next;
3491 bf = TAILQ_FIRST(&frags);
3492 KASSERT(bf != NULL, ("no buf for txfrag"));
3493 TAILQ_REMOVE(&frags, bf, bf_list);
3494 goto nextfrag;
3495 }
3496
3497 /*
3498 * Bump watchdog timer.
3499 */
3500 sc->sc_wd_timer = 5;
3501
3502 finish:
3503 ATH_TX_UNLOCK(sc);
3504
3505 /*
3506 * Finished transmitting!
3507 */
3508 ATH_PCU_LOCK(sc);
3509 sc->sc_txstart_cnt--;
3510 ATH_PCU_UNLOCK(sc);
3511
3512 /* Sleep the hardware if required */
3513 ATH_LOCK(sc);
3514 ath_power_restore_power_state(sc);
3515 ATH_UNLOCK(sc);
3516
3517 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished");
3518
3519 return (retval);
3520 }
3521
3522 static int
3523 ath_media_change(struct ifnet *ifp)
3524 {
3525 int error = ieee80211_media_change(ifp);
3526 /* NB: only the fixed rate can change and that doesn't need a reset */
3527 return (error == ENETRESET ? 0 : error);
3528 }
3529
3530 /*
3531 * Block/unblock tx+rx processing while a key change is done.
3532 * We assume the caller serializes key management operations
3533 * so we only need to worry about synchronization with other
3534 * uses that originate in the driver.
3535 */
3536 static void
3537 ath_key_update_begin(struct ieee80211vap *vap)
3538 {
3539 struct ath_softc *sc = vap->iv_ic->ic_softc;
3540
3541 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
3542 taskqueue_block(sc->sc_tq);
3543 }
3544
3545 static void
3546 ath_key_update_end(struct ieee80211vap *vap)
3547 {
3548 struct ath_softc *sc = vap->iv_ic->ic_softc;
3549
3550 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
3551 taskqueue_unblock(sc->sc_tq);
3552 }
3553
3554 static void
3555 ath_update_promisc(struct ieee80211com *ic)
3556 {
3557 struct ath_softc *sc = ic->ic_softc;
3558 u_int32_t rfilt;
3559
3560 /* configure rx filter */
3561 ATH_LOCK(sc);
3562 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3563 rfilt = ath_calcrxfilter(sc);
3564 ath_hal_setrxfilter(sc->sc_ah, rfilt);
3565 ath_power_restore_power_state(sc);
3566 ATH_UNLOCK(sc);
3567
3568 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
3569 }
3570
3571 /*
3572 * Driver-internal mcast update call.
3573 *
3574 * Assumes the hardware is already awake.
3575 */
3576 static void
3577 ath_update_mcast_hw(struct ath_softc *sc)
3578 {
3579 struct ieee80211com *ic = &sc->sc_ic;
3580 u_int32_t mfilt[2];
3581
3582 /* calculate and install multicast filter */
3583 if (ic->ic_allmulti == 0) {
3584 struct ieee80211vap *vap;
3585 struct ifnet *ifp;
3586 struct ifmultiaddr *ifma;
3587
3588 /*
3589 * Merge multicast addresses to form the hardware filter.
3590 */
3591 mfilt[0] = mfilt[1] = 0;
3592 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
3593 ifp = vap->iv_ifp;
3594 #if defined(__DragonFly__)
3595 /* nothing */
3596 #else
3597 if_maddr_rlock(ifp);
3598 #endif
3599 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3600 caddr_t dl;
3601 u_int32_t val;
3602 u_int8_t pos;
3603
3604 /* calculate XOR of eight 6bit values */
3605 dl = LLADDR((struct sockaddr_dl *)
3606 ifma->ifma_addr);
3607 val = le32dec(dl + 0);
3608 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^
3609 val;
3610 val = le32dec(dl + 3);
3611 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^
3612 val;
3613 pos &= 0x3f;
3614 mfilt[pos / 32] |= (1 << (pos % 32));
3615 }
3616 #if defined(__DragonFly__)
3617 /* nothing */
3618 #else
3619 if_maddr_runlock(ifp);
3620 #endif
3621 }
3622 } else
3623 mfilt[0] = mfilt[1] = ~0;
3624
3625 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
3626
3627 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
3628 __func__, mfilt[0], mfilt[1]);
3629 }
3630
3631 /*
3632 * Called from the net80211 layer - force the hardware
3633 * awake before operating.
3634 */
3635 static void
3636 ath_update_mcast(struct ieee80211com *ic)
3637 {
3638 struct ath_softc *sc = ic->ic_softc;
3639
3640 ATH_LOCK(sc);
3641 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3642 ATH_UNLOCK(sc);
3643
3644 ath_update_mcast_hw(sc);
3645
3646 ATH_LOCK(sc);
3647 ath_power_restore_power_state(sc);
3648 ATH_UNLOCK(sc);
3649 }
3650
3651 void
3652 ath_mode_init(struct ath_softc *sc)
3653 {
3654 struct ieee80211com *ic = &sc->sc_ic;
3655 struct ath_hal *ah = sc->sc_ah;
3656 u_int32_t rfilt;
3657
3658 /* configure rx filter */
3659 rfilt = ath_calcrxfilter(sc);
3660 ath_hal_setrxfilter(ah, rfilt);
3661
3662 /* configure operational mode */
3663 ath_hal_setopmode(ah);
3664
3665 /* handle any link-level address change */
3666 ath_hal_setmac(ah, ic->ic_macaddr);
3667
3668 /* calculate and install multicast filter */
3669 ath_update_mcast_hw(sc);
3670 }
3671
3672 /*
3673 * Set the slot time based on the current setting.
3674 */
3675 void
3676 ath_setslottime(struct ath_softc *sc)
3677 {
3678 struct ieee80211com *ic = &sc->sc_ic;
3679 struct ath_hal *ah = sc->sc_ah;
3680 u_int usec;
3681
3682 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
3683 usec = 13;
3684 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
3685 usec = 21;
3686 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
3687 /* honor short/long slot time only in 11g */
3688 /* XXX shouldn't honor on pure g or turbo g channel */
3689 if (ic->ic_flags & IEEE80211_F_SHSLOT)
3690 usec = HAL_SLOT_TIME_9;
3691 else
3692 usec = HAL_SLOT_TIME_20;
3693 } else
3694 usec = HAL_SLOT_TIME_9;
3695
3696 DPRINTF(sc, ATH_DEBUG_RESET,
3697 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
3698 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
3699 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
3700
3701 /* Wake up the hardware first before updating the slot time */
3702 ATH_LOCK(sc);
3703 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3704 ath_hal_setslottime(ah, usec);
3705 ath_power_restore_power_state(sc);
3706 sc->sc_updateslot = OK;
3707 ATH_UNLOCK(sc);
3708 }
3709
3710 /*
3711 * Callback from the 802.11 layer to update the
3712 * slot time based on the current setting.
3713 */
3714 static void
3715 ath_updateslot(struct ieee80211com *ic)
3716 {
3717 struct ath_softc *sc = ic->ic_softc;
3718
3719 /*
3720 * When not coordinating the BSS, change the hardware
3721 * immediately. For other operation we defer the change
3722 * until beacon updates have propagated to the stations.
3723 *
3724 * XXX sc_updateslot isn't changed behind a lock?
3725 */
3726 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3727 ic->ic_opmode == IEEE80211_M_MBSS)
3728 sc->sc_updateslot = UPDATE;
3729 else
3730 ath_setslottime(sc);
3731 }
3732
3733 /*
3734 * Append the contents of src to dst; both queues
3735 * are assumed to be locked.
3736 */
3737 void
3738 ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
3739 {
3740
3741 ATH_TXQ_LOCK_ASSERT(src);
3742 ATH_TXQ_LOCK_ASSERT(dst);
3743
3744 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
3745 dst->axq_link = src->axq_link;
3746 src->axq_link = NULL;
3747 dst->axq_depth += src->axq_depth;
3748 dst->axq_aggr_depth += src->axq_aggr_depth;
3749 src->axq_depth = 0;
3750 src->axq_aggr_depth = 0;
3751 }
3752
3753 /*
3754 * Reset the hardware, with no loss.
3755 *
3756 * This can't be used for a general case reset.
3757 */
3758 static void
3759 ath_reset_proc(void *arg, int pending)
3760 {
3761 struct ath_softc *sc = arg;
3762
3763 #if 0
3764 device_printf(sc->sc_dev, "%s: resetting\n", __func__);
3765 #endif
3766 #if defined(__DragonFly__)
3767 wlan_serialize_enter();
3768 #endif
3769 ath_reset(sc, ATH_RESET_NOLOSS);
3770 #if defined(__DragonFly__)
3771 wlan_serialize_exit();
3772 #endif
3773 }
3774
3775 /*
3776 * Reset the hardware after detecting beacons have stopped.
3777 */
3778 static void
3779 ath_bstuck_proc(void *arg, int pending)
3780 {
3781 struct ath_softc *sc = arg;
3782 uint32_t hangs = 0;
3783
3784 #if defined(__DragonFly__)
3785 wlan_serialize_enter();
3786 #endif
3787 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
3788 device_printf(sc->sc_dev, "bb hang detected (0x%x)\n", hangs);
3789
3790 #ifdef ATH_DEBUG_ALQ
3791 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON))
3792 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL);
3793 #endif
3794
3795 device_printf(sc->sc_dev, "stuck beacon; resetting (bmiss count %u)\n",
3796 sc->sc_bmisscount);
3797 sc->sc_stats.ast_bstuck++;
3798 /*
3799 * This assumes that there's no simultaneous channel mode change
3800 */
3801 ath_reset(sc, ATH_RESET_NOLOSS);
3802 #if defined(__DragonFly__)
3803 wlan_serialize_exit();
3804 #endif
3805 }
3806
3807 static int
3808 ath_desc_alloc(struct ath_softc *sc)
3809 {
3810 int error;
3811
3812 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3813 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER);
3814 if (error != 0) {
3815 return error;
3816 }
3817 sc->sc_txbuf_cnt = ath_txbuf;
3818
3819 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
3820 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt,
3821 ATH_TXDESC);
3822 if (error != 0) {
3823 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3824 return error;
3825 }
3826
3827 /*
3828 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
3829 * flag doesn't have to be set in ath_getbuf_locked().
3830 */
3831
3832 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3833 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1);
3834 if (error != 0) {
3835 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3836 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3837 &sc->sc_txbuf_mgmt);
3838 return error;
3839 }
3840 return 0;
3841 }
3842
3843 static void
3844 ath_desc_free(struct ath_softc *sc)
3845 {
3846
3847 if (sc->sc_bdma.dd_desc_len != 0)
3848 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3849 if (sc->sc_txdma.dd_desc_len != 0)
3850 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3851 if (sc->sc_txdma_mgmt.dd_desc_len != 0)
3852 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3853 &sc->sc_txbuf_mgmt);
3854 }
3855
3856 static struct ieee80211_node *
3857 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3858 {
3859 struct ieee80211com *ic = vap->iv_ic;
3860 struct ath_softc *sc = ic->ic_softc;
3861 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3862 struct ath_node *an;
3863
3864 an = kmalloc(space, M_80211_NODE, M_INTWAIT | M_ZERO);
3865 if (an == NULL) {
3866 /* XXX stat+msg */
3867 return NULL;
3868 }
3869 ath_rate_node_init(sc, an);
3870
3871 /* Setup the mutex - there's no associd yet so set the name to NULL */
3872 ksnprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3873 device_get_nameunit(sc->sc_dev), an);
3874 #if defined(__DragonFly__)
3875 lockinit(&an->an_mtx, an->an_name, 0, 0);
3876 #else
3877 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3878 #endif
3879
3880 /* XXX setup ath_tid */
3881 ath_tx_tid_init(sc, an);
3882
3883 #if defined(__DragonFly__)
3884 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__,
3885 ath_hal_ether_sprintf(mac), an);
3886 #else
3887 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an);
3888 #endif
3889 return &an->an_node;
3890 }
3891
3892 static void
3893 ath_node_cleanup(struct ieee80211_node *ni)
3894 {
3895 struct ieee80211com *ic = ni->ni_ic;
3896 struct ath_softc *sc = ic->ic_softc;
3897
3898 #if defined(__DragonFly__)
3899 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__,
3900 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni));
3901 #else
3902 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__,
3903 ni->ni_macaddr, ":", ATH_NODE(ni));
3904 #endif
3905
3906 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3907 ath_tx_node_flush(sc, ATH_NODE(ni));
3908 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3909 sc->sc_node_cleanup(ni);
3910 }
3911
3912 static void
3913 ath_node_free(struct ieee80211_node *ni)
3914 {
3915 struct ieee80211com *ic = ni->ni_ic;
3916 struct ath_softc *sc = ic->ic_softc;
3917
3918 #if defined(__DragonFly__)
3919 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__,
3920 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni));
3921 #else
3922 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__,
3923 ni->ni_macaddr, ":", ATH_NODE(ni));
3924 #endif
3925 #if defined(__DragonFly__)
3926 lockuninit(&ATH_NODE(ni)->an_mtx);
3927 #else
3928 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3929 #endif
3930 sc->sc_node_free(ni);
3931 }
3932
3933 static void
3934 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3935 {
3936 struct ieee80211com *ic = ni->ni_ic;
3937 struct ath_softc *sc = ic->ic_softc;
3938 struct ath_hal *ah = sc->sc_ah;
3939
3940 *rssi = ic->ic_node_getrssi(ni);
3941 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3942 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3943 else
3944 *noise = -95; /* nominally correct */
3945 }
3946
3947 /*
3948 * Set the default antenna.
3949 */
3950 void
3951 ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3952 {
3953 struct ath_hal *ah = sc->sc_ah;
3954
3955 /* XXX block beacon interrupts */
3956 ath_hal_setdefantenna(ah, antenna);
3957 if (sc->sc_defant != antenna)
3958 sc->sc_stats.ast_ant_defswitch++;
3959 sc->sc_defant = antenna;
3960 sc->sc_rxotherant = 0;
3961 }
3962
3963 static void
3964 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3965 {
3966 txq->axq_qnum = qnum;
3967 txq->axq_ac = 0;
3968 txq->axq_depth = 0;
3969 txq->axq_aggr_depth = 0;
3970 txq->axq_intrcnt = 0;
3971 txq->axq_link = NULL;
3972 txq->axq_softc = sc;
3973 TAILQ_INIT(&txq->axq_q);
3974 TAILQ_INIT(&txq->axq_tidq);
3975 TAILQ_INIT(&txq->fifo.axq_q);
3976 ATH_TXQ_LOCK_INIT(sc, txq);
3977 }
3978
3979 /*
3980 * Setup a h/w transmit queue.
3981 */
3982 static struct ath_txq *
3983 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3984 {
3985 struct ath_hal *ah = sc->sc_ah;
3986 HAL_TXQ_INFO qi;
3987 int qnum;
3988
3989 memset(&qi, 0, sizeof(qi));
3990 qi.tqi_subtype = subtype;
3991 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3992 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3993 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3994 /*
3995 * Enable interrupts only for EOL and DESC conditions.
3996 * We mark tx descriptors to receive a DESC interrupt
3997 * when a tx queue gets deep; otherwise waiting for the
3998 * EOL to reap descriptors. Note that this is done to
3999 * reduce interrupt load and this only defers reaping
4000 * descriptors, never transmitting frames. Aside from
4001 * reducing interrupts this also permits more concurrency.
4002 * The only potential downside is if the tx queue backs
4003 * up in which case the top half of the kernel may backup
4004 * due to a lack of tx descriptors.
4005 */
4006 if (sc->sc_isedma)
4007 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
4008 HAL_TXQ_TXOKINT_ENABLE;
4009 else
4010 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
4011 HAL_TXQ_TXDESCINT_ENABLE;
4012
4013 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4014 if (qnum == -1) {
4015 /*
4016 * NB: don't print a message, this happens
4017 * normally on parts with too few tx queues
4018 */
4019 return NULL;
4020 }
4021 if (qnum >= nitems(sc->sc_txq)) {
4022 device_printf(sc->sc_dev,
4023 "hal qnum %u out of range, max %zu!\n",
4024 qnum, nitems(sc->sc_txq));
4025 ath_hal_releasetxqueue(ah, qnum);
4026 return NULL;
4027 }
4028 if (!ATH_TXQ_SETUP(sc, qnum)) {
4029 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4030 sc->sc_txqsetup |= 1<<qnum;
4031 }
4032 return &sc->sc_txq[qnum];
4033 }
4034
4035 /*
4036 * Setup a hardware data transmit queue for the specified
4037 * access control. The hal may not support all requested
4038 * queues in which case it will return a reference to a
4039 * previously setup queue. We record the mapping from ac's
4040 * to h/w queues for use by ath_tx_start and also track
4041 * the set of h/w queues being used to optimize work in the
4042 * transmit interrupt handler and related routines.
4043 */
4044 static int
4045 ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4046 {
4047 struct ath_txq *txq;
4048
4049 if (ac >= nitems(sc->sc_ac2q)) {
4050 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4051 ac, nitems(sc->sc_ac2q));
4052 return 0;
4053 }
4054 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4055 if (txq != NULL) {
4056 txq->axq_ac = ac;
4057 sc->sc_ac2q[ac] = txq;
4058 return 1;
4059 } else
4060 return 0;
4061 }
4062
4063 /*
4064 * Update WME parameters for a transmit queue.
4065 */
4066 static int
4067 ath_txq_update(struct ath_softc *sc, int ac)
4068 {
4069 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
4070 struct ieee80211com *ic = &sc->sc_ic;
4071 struct ath_txq *txq = sc->sc_ac2q[ac];
4072 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4073 struct ath_hal *ah = sc->sc_ah;
4074 HAL_TXQ_INFO qi;
4075
4076 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4077 #ifdef IEEE80211_SUPPORT_TDMA
4078 if (sc->sc_tdma) {
4079 /*
4080 * AIFS is zero so there's no pre-transmit wait. The
4081 * burst time defines the slot duration and is configured
4082 * through net80211. The QCU is setup to not do post-xmit
4083 * back off, lockout all lower-priority QCU's, and fire
4084 * off the DMA beacon alert timer which is setup based
4085 * on the slot configuration.
4086 */
4087 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4088 | HAL_TXQ_TXERRINT_ENABLE
4089 | HAL_TXQ_TXURNINT_ENABLE
4090 | HAL_TXQ_TXEOLINT_ENABLE
4091 | HAL_TXQ_DBA_GATED
4092 | HAL_TXQ_BACKOFF_DISABLE
4093 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4094 ;
4095 qi.tqi_aifs = 0;
4096 /* XXX +dbaprep? */
4097 qi.tqi_readyTime = sc->sc_tdmaslotlen;
4098 qi.tqi_burstTime = qi.tqi_readyTime;
4099 } else {
4100 #endif
4101 /*
4102 * XXX shouldn't this just use the default flags
4103 * used in the previous queue setup?
4104 */
4105 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4106 | HAL_TXQ_TXERRINT_ENABLE
4107 | HAL_TXQ_TXDESCINT_ENABLE
4108 | HAL_TXQ_TXURNINT_ENABLE
4109 | HAL_TXQ_TXEOLINT_ENABLE
4110 ;
4111 qi.tqi_aifs = wmep->wmep_aifsn;
4112 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4113 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4114 qi.tqi_readyTime = 0;
4115 qi.tqi_burstTime = IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit);
4116 #ifdef IEEE80211_SUPPORT_TDMA
4117 }
4118 #endif
4119
4120 DPRINTF(sc, ATH_DEBUG_RESET,
4121 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4122 __func__, txq->axq_qnum, qi.tqi_qflags,
4123 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4124
4125 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4126 device_printf(sc->sc_dev, "unable to update hardware queue "
4127 "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]);
4128 return 0;
4129 } else {
4130 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4131 return 1;
4132 }
4133 #undef ATH_EXPONENT_TO_VALUE
4134 }
4135
4136 /*
4137 * Callback from the 802.11 layer to update WME parameters.
4138 */
4139 int
4140 ath_wme_update(struct ieee80211com *ic)
4141 {
4142 struct ath_softc *sc = ic->ic_softc;
4143
4144 return !ath_txq_update(sc, WME_AC_BE) ||
4145 !ath_txq_update(sc, WME_AC_BK) ||
4146 !ath_txq_update(sc, WME_AC_VI) ||
4147 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4148 }
4149
4150 /*
4151 * Reclaim resources for a setup queue.
4152 */
4153 static void
4154 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4155 {
4156
4157 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4158 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4159 ATH_TXQ_LOCK_DESTROY(txq);
4160 }
4161
4162 /*
4163 * Reclaim all tx queue resources.
4164 */
4165 static void
4166 ath_tx_cleanup(struct ath_softc *sc)
4167 {
4168 int i;
4169
4170 ATH_TXBUF_LOCK_DESTROY(sc);
4171 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4172 if (ATH_TXQ_SETUP(sc, i))
4173 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4174 }
4175
4176 /*
4177 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
4178 * using the current rates in sc_rixmap.
4179 */
4180 int
4181 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
4182 {
4183 int rix = sc->sc_rixmap[rate];
4184 /* NB: return lowest rix for invalid rate */
4185 return (rix == 0xff ? 0 : rix);
4186 }
4187
4188 static void
4189 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
4190 struct ath_buf *bf)
4191 {
4192 struct ieee80211_node *ni = bf->bf_node;
4193 struct ieee80211com *ic = &sc->sc_ic;
4194 int sr, lr, pri;
4195
4196 if (ts->ts_status == 0) {
4197 u_int8_t txant = ts->ts_antenna;
4198 sc->sc_stats.ast_ant_tx[txant]++;
4199 sc->sc_ant_tx[txant]++;
4200 if (ts->ts_finaltsi != 0)
4201 sc->sc_stats.ast_tx_altrate++;
4202 pri = M_WME_GETAC(bf->bf_m);
4203 if (pri >= WME_AC_VO)
4204 ic->ic_wme.wme_hipri_traffic++;
4205 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
4206 ni->ni_inact = ni->ni_inact_reload;
4207 } else {
4208 if (ts->ts_status & HAL_TXERR_XRETRY)
4209 sc->sc_stats.ast_tx_xretries++;
4210 if (ts->ts_status & HAL_TXERR_FIFO)
4211 sc->sc_stats.ast_tx_fifoerr++;
4212 if (ts->ts_status & HAL_TXERR_FILT)
4213 sc->sc_stats.ast_tx_filtered++;
4214 if (ts->ts_status & HAL_TXERR_XTXOP)
4215 sc->sc_stats.ast_tx_xtxop++;
4216 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
4217 sc->sc_stats.ast_tx_timerexpired++;
4218
4219 if (bf->bf_m->m_flags & M_FF)
4220 sc->sc_stats.ast_ff_txerr++;
4221 }
4222 /* XXX when is this valid? */
4223 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR)
4224 sc->sc_stats.ast_tx_desccfgerr++;
4225 /*
4226 * This can be valid for successful frame transmission!
4227 * If there's a TX FIFO underrun during aggregate transmission,
4228 * the MAC will pad the rest of the aggregate with delimiters.
4229 * If a BA is returned, the frame is marked as "OK" and it's up
4230 * to the TX completion code to notice which frames weren't
4231 * successfully transmitted.
4232 */
4233 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN)
4234 sc->sc_stats.ast_tx_data_underrun++;
4235 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN)
4236 sc->sc_stats.ast_tx_delim_underrun++;
4237
4238 sr = ts->ts_shortretry;
4239 lr = ts->ts_longretry;
4240 sc->sc_stats.ast_tx_shortretry += sr;
4241 sc->sc_stats.ast_tx_longretry += lr;
4242
4243 }
4244
4245 /*
4246 * The default completion. If fail is 1, this means
4247 * "please don't retry the frame, and just return -1 status
4248 * to the net80211 stack.
4249 */
4250 void
4251 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4252 {
4253 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4254 int st;
4255
4256 if (fail == 1)
4257 st = -1;
4258 else
4259 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
4260 ts->ts_status : HAL_TXERR_XRETRY;
4261
4262 #if 0
4263 if (bf->bf_state.bfs_dobaw)
4264 device_printf(sc->sc_dev,
4265 "%s: bf %p: seqno %d: dobaw should've been cleared!\n",
4266 __func__,
4267 bf,
4268 SEQNO(bf->bf_state.bfs_seqno));
4269 #endif
4270 if (bf->bf_next != NULL)
4271 device_printf(sc->sc_dev,
4272 "%s: bf %p: seqno %d: bf_next not NULL!\n",
4273 __func__,
4274 bf,
4275 SEQNO(bf->bf_state.bfs_seqno));
4276
4277 /*
4278 * Check if the node software queue is empty; if so
4279 * then clear the TIM.
4280 *
4281 * This needs to be done before the buffer is freed as
4282 * otherwise the node reference will have been released
4283 * and the node may not actually exist any longer.
4284 *
4285 * XXX I don't like this belonging here, but it's cleaner
4286 * to do it here right now then all the other places
4287 * where ath_tx_default_comp() is called.
4288 *
4289 * XXX TODO: during drain, ensure that the callback is
4290 * being called so we get a chance to update the TIM.
4291 */
4292 if (bf->bf_node) {
4293 ATH_TX_LOCK(sc);
4294 ath_tx_update_tim(sc, bf->bf_node, 0);
4295 ATH_TX_UNLOCK(sc);
4296 }
4297
4298 /*
4299 * Do any tx complete callback. Note this must
4300 * be done before releasing the node reference.
4301 * This will free the mbuf, release the net80211
4302 * node and recycle the ath_buf.
4303 */
4304 ath_tx_freebuf(sc, bf, st);
4305 }
4306
4307 /*
4308 * Update rate control with the given completion status.
4309 */
4310 void
4311 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
4312 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
4313 int nframes, int nbad)
4314 {
4315 struct ath_node *an;
4316
4317 /* Only for unicast frames */
4318 if (ni == NULL)
4319 return;
4320
4321 an = ATH_NODE(ni);
4322 ATH_NODE_UNLOCK_ASSERT(an);
4323
4324 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
4325 ATH_NODE_LOCK(an);
4326 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
4327 ATH_NODE_UNLOCK(an);
4328 }
4329 }
4330
4331 /*
4332 * Process the completion of the given buffer.
4333 *
4334 * This calls the rate control update and then the buffer completion.
4335 * This will either free the buffer or requeue it. In any case, the
4336 * bf pointer should be treated as invalid after this function is called.
4337 */
4338 void
4339 ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq,
4340 struct ath_tx_status *ts, struct ath_buf *bf)
4341 {
4342 struct ieee80211_node *ni = bf->bf_node;
4343
4344 ATH_TX_UNLOCK_ASSERT(sc);
4345 ATH_TXQ_UNLOCK_ASSERT(txq);
4346
4347 /* If unicast frame, update general statistics */
4348 if (ni != NULL) {
4349 /* update statistics */
4350 ath_tx_update_stats(sc, ts, bf);
4351 }
4352
4353 /*
4354 * Call the completion handler.
4355 * The completion handler is responsible for
4356 * calling the rate control code.
4357 *
4358 * Frames with no completion handler get the
4359 * rate control code called here.
4360 */
4361 if (bf->bf_comp == NULL) {
4362 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4363 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
4364 /*
4365 * XXX assume this isn't an aggregate
4366 * frame.
4367 */
4368 ath_tx_update_ratectrl(sc, ni,
4369 bf->bf_state.bfs_rc, ts,
4370 bf->bf_state.bfs_pktlen, 1,
4371 (ts->ts_status == 0 ? 0 : 1));
4372 }
4373 ath_tx_default_comp(sc, bf, 0);
4374 } else
4375 bf->bf_comp(sc, bf, 0);
4376 }
4377
4378
4379
4380 /*
4381 * Process completed xmit descriptors from the specified queue.
4382 * Kick the packet scheduler if needed. This can occur from this
4383 * particular task.
4384 */
4385 static int
4386 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
4387 {
4388 struct ath_hal *ah = sc->sc_ah;
4389 struct ath_buf *bf;
4390 struct ath_desc *ds;
4391 struct ath_tx_status *ts;
4392 struct ieee80211_node *ni;
4393 #ifdef IEEE80211_SUPPORT_SUPERG
4394 struct ieee80211com *ic = &sc->sc_ic;
4395 #endif /* IEEE80211_SUPPORT_SUPERG */
4396 int nacked;
4397 HAL_STATUS status;
4398
4399 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4400 __func__, txq->axq_qnum,
4401 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4402 txq->axq_link);
4403
4404 ATH_KTR(sc, ATH_KTR_TXCOMP, 4,
4405 "ath_tx_processq: txq=%u head %p link %p depth %p",
4406 txq->axq_qnum,
4407 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4408 txq->axq_link,
4409 txq->axq_depth);
4410
4411 nacked = 0;
4412 for (;;) {
4413 ATH_TXQ_LOCK(txq);
4414 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
4415 bf = TAILQ_FIRST(&txq->axq_q);
4416 if (bf == NULL) {
4417 ATH_TXQ_UNLOCK(txq);
4418 break;
4419 }
4420 ds = bf->bf_lastds; /* XXX must be setup correctly! */
4421 ts = &bf->bf_status.ds_txstat;
4422
4423 status = ath_hal_txprocdesc(ah, ds, ts);
4424 #ifdef ATH_DEBUG
4425 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4426 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4427 status == HAL_OK);
4428 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0))
4429 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4430 status == HAL_OK);
4431 #endif
4432 #ifdef ATH_DEBUG_ALQ
4433 if (if_ath_alq_checkdebug(&sc->sc_alq,
4434 ATH_ALQ_EDMA_TXSTATUS)) {
4435 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
4436 sc->sc_tx_statuslen,
4437 (char *) ds);
4438 }
4439 #endif
4440
4441 if (status == HAL_EINPROGRESS) {
4442 ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
4443 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
4444 txq->axq_qnum, bf, ds);
4445 ATH_TXQ_UNLOCK(txq);
4446 break;
4447 }
4448 ATH_TXQ_REMOVE(txq, bf, bf_list);
4449
4450 /*
4451 * Sanity check.
4452 */
4453 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) {
4454 device_printf(sc->sc_dev,
4455 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n",
4456 __func__,
4457 txq->axq_qnum,
4458 bf,
4459 bf->bf_state.bfs_tx_queue);
4460 }
4461 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) {
4462 device_printf(sc->sc_dev,
4463 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n",
4464 __func__,
4465 txq->axq_qnum,
4466 bf->bf_last,
4467 bf->bf_last->bf_state.bfs_tx_queue);
4468 }
4469
4470 #if 0
4471 if (txq->axq_depth > 0) {
4472 /*
4473 * More frames follow. Mark the buffer busy
4474 * so it's not re-used while the hardware may
4475 * still re-read the link field in the descriptor.
4476 *
4477 * Use the last buffer in an aggregate as that
4478 * is where the hardware may be - intermediate
4479 * descriptors won't be "busy".
4480 */
4481 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4482 } else
4483 txq->axq_link = NULL;
4484 #else
4485 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4486 #endif
4487 if (bf->bf_state.bfs_aggr)
4488 txq->axq_aggr_depth--;
4489
4490 ni = bf->bf_node;
4491
4492 ATH_KTR(sc, ATH_KTR_TXCOMP, 5,
4493 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x",
4494 txq->axq_qnum, bf, ds, ni, ts->ts_status);
4495 /*
4496 * If unicast frame was ack'd update RSSI,
4497 * including the last rx time used to
4498 * workaround phantom bmiss interrupts.
4499 */
4500 if (ni != NULL && ts->ts_status == 0 &&
4501 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
4502 nacked++;
4503 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4504 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4505 ts->ts_rssi);
4506 }
4507 ATH_TXQ_UNLOCK(txq);
4508
4509 /*
4510 * Update statistics and call completion
4511 */
4512 ath_tx_process_buf_completion(sc, txq, ts, bf);
4513
4514 /* XXX at this point, bf and ni may be totally invalid */
4515 }
4516 #ifdef IEEE80211_SUPPORT_SUPERG
4517 /*
4518 * Flush fast-frame staging queue when traffic slows.
4519 */
4520 if (txq->axq_depth <= 1)
4521 ieee80211_ff_flush(ic, txq->axq_ac);
4522 #endif
4523
4524 /* Kick the software TXQ scheduler */
4525 if (dosched) {
4526 ATH_TX_LOCK(sc);
4527 ath_txq_sched(sc, txq);
4528 ATH_TX_UNLOCK(sc);
4529 }
4530
4531 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4532 "ath_tx_processq: txq=%u: done",
4533 txq->axq_qnum);
4534
4535 return nacked;
4536 }
4537
4538 #define TXQACTIVE(t, q) ( (t) & (1 << (q)))
4539
4540 /*
4541 * Deferred processing of transmit interrupt; special-cased
4542 * for a single hardware transmit queue (e.g. 5210 and 5211).
4543 */
4544 static void
4545 ath_tx_proc_q0(void *arg, int npending)
4546 {
4547 struct ath_softc *sc = arg;
4548 uint32_t txqs;
4549
4550 ATH_PCU_LOCK(sc);
4551 sc->sc_txproc_cnt++;
4552 txqs = sc->sc_txq_active;
4553 sc->sc_txq_active &= ~txqs;
4554 ATH_PCU_UNLOCK(sc);
4555
4556 ATH_LOCK(sc);
4557 ath_power_set_power_state(sc, HAL_PM_AWAKE);
4558 ATH_UNLOCK(sc);
4559
4560 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4561 "ath_tx_proc_q0: txqs=0x%08x", txqs);
4562
4563 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
4564 /* XXX why is lastrx updated in tx code? */
4565 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4566 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4567 ath_tx_processq(sc, sc->sc_cabq, 1);
4568 sc->sc_wd_timer = 0;
4569
4570 if (sc->sc_softled)
4571 ath_led_event(sc, sc->sc_txrix);
4572
4573 ATH_PCU_LOCK(sc);
4574 sc->sc_txproc_cnt--;
4575 ATH_PCU_UNLOCK(sc);
4576
4577 ATH_LOCK(sc);
4578 ath_power_restore_power_state(sc);
4579 ATH_UNLOCK(sc);
4580
4581 ath_tx_kick(sc);
4582 }
4583
4584 /*
4585 * Deferred processing of transmit interrupt; special-cased
4586 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4587 */
4588 static void
4589 ath_tx_proc_q0123(void *arg, int npending)
4590 {
4591 struct ath_softc *sc = arg;
4592 int nacked;
4593 uint32_t txqs;
4594
4595 ATH_PCU_LOCK(sc);
4596 sc->sc_txproc_cnt++;
4597 txqs = sc->sc_txq_active;
4598 sc->sc_txq_active &= ~txqs;
4599 ATH_PCU_UNLOCK(sc);
4600
4601 ATH_LOCK(sc);
4602 ath_power_set_power_state(sc, HAL_PM_AWAKE);
4603 ATH_UNLOCK(sc);
4604
4605 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4606 "ath_tx_proc_q0123: txqs=0x%08x", txqs);
4607
4608 /*
4609 * Process each active queue.
4610 */
4611 nacked = 0;
4612 if (TXQACTIVE(txqs, 0))
4613 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
4614 if (TXQACTIVE(txqs, 1))
4615 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
4616 if (TXQACTIVE(txqs, 2))
4617 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
4618 if (TXQACTIVE(txqs, 3))
4619 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
4620 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4621 ath_tx_processq(sc, sc->sc_cabq, 1);
4622 if (nacked)
4623 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4624
4625 sc->sc_wd_timer = 0;
4626
4627 if (sc->sc_softled)
4628 ath_led_event(sc, sc->sc_txrix);
4629
4630 ATH_PCU_LOCK(sc);
4631 sc->sc_txproc_cnt--;
4632 ATH_PCU_UNLOCK(sc);
4633
4634 ATH_LOCK(sc);
4635 ath_power_restore_power_state(sc);
4636 ATH_UNLOCK(sc);
4637
4638 ath_tx_kick(sc);
4639 }
4640
4641 /*
4642 * Deferred processing of transmit interrupt.
4643 */
4644 static void
4645 ath_tx_proc(void *arg, int npending)
4646 {
4647 struct ath_softc *sc = arg;
4648 int i, nacked;
4649 uint32_t txqs;
4650
4651 ATH_PCU_LOCK(sc);
4652 sc->sc_txproc_cnt++;
4653 txqs = sc->sc_txq_active;
4654 sc->sc_txq_active &= ~txqs;
4655 ATH_PCU_UNLOCK(sc);
4656
4657 ATH_LOCK(sc);
4658 ath_power_set_power_state(sc, HAL_PM_AWAKE);
4659 ATH_UNLOCK(sc);
4660
4661 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
4662
4663 /*
4664 * Process each active queue.
4665 */
4666 nacked = 0;
4667 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4668 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
4669 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
4670 if (nacked)
4671 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4672
4673 sc->sc_wd_timer = 0;
4674
4675 if (sc->sc_softled)
4676 ath_led_event(sc, sc->sc_txrix);
4677
4678 ATH_PCU_LOCK(sc);
4679 sc->sc_txproc_cnt--;
4680 ATH_PCU_UNLOCK(sc);
4681
4682 ATH_LOCK(sc);
4683 ath_power_restore_power_state(sc);
4684 ATH_UNLOCK(sc);
4685
4686 ath_tx_kick(sc);
4687 }
4688 #undef TXQACTIVE
4689
4690 /*
4691 * Deferred processing of TXQ rescheduling.
4692 */
4693 static void
4694 ath_txq_sched_tasklet(void *arg, int npending)
4695 {
4696 struct ath_softc *sc = arg;
4697 int i;
4698
4699 /* XXX is skipping ok? */
4700 ATH_PCU_LOCK(sc);
4701 #if 0
4702 if (sc->sc_inreset_cnt > 0) {
4703 device_printf(sc->sc_dev,
4704 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
4705 ATH_PCU_UNLOCK(sc);
4706 return;
4707 }
4708 #endif
4709 sc->sc_txproc_cnt++;
4710 ATH_PCU_UNLOCK(sc);
4711
4712 ATH_LOCK(sc);
4713 ath_power_set_power_state(sc, HAL_PM_AWAKE);
4714 ATH_UNLOCK(sc);
4715
4716 ATH_TX_LOCK(sc);
4717 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4718 if (ATH_TXQ_SETUP(sc, i)) {
4719 ath_txq_sched(sc, &sc->sc_txq[i]);
4720 }
4721 }
4722 ATH_TX_UNLOCK(sc);
4723
4724 ATH_LOCK(sc);
4725 ath_power_restore_power_state(sc);
4726 ATH_UNLOCK(sc);
4727
4728 ATH_PCU_LOCK(sc);
4729 sc->sc_txproc_cnt--;
4730 ATH_PCU_UNLOCK(sc);
4731 }
4732
4733 void
4734 ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
4735 {
4736
4737 ATH_TXBUF_LOCK_ASSERT(sc);
4738
4739 if (bf->bf_flags & ATH_BUF_MGMT)
4740 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
4741 else {
4742 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4743 sc->sc_txbuf_cnt++;
4744 if (sc->sc_txbuf_cnt > ath_txbuf) {
4745 device_printf(sc->sc_dev,
4746 "%s: sc_txbuf_cnt > %d?\n",
4747 __func__,
4748 ath_txbuf);
4749 sc->sc_txbuf_cnt = ath_txbuf;
4750 }
4751 }
4752 }
4753
4754 void
4755 ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
4756 {
4757
4758 ATH_TXBUF_LOCK_ASSERT(sc);
4759
4760 if (bf->bf_flags & ATH_BUF_MGMT)
4761 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
4762 else {
4763 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
4764 sc->sc_txbuf_cnt++;
4765 if (sc->sc_txbuf_cnt > ATH_TXBUF) {
4766 device_printf(sc->sc_dev,
4767 "%s: sc_txbuf_cnt > %d?\n",
4768 __func__,
4769 ATH_TXBUF);
4770 sc->sc_txbuf_cnt = ATH_TXBUF;
4771 }
4772 }
4773 }
4774
4775 /*
4776 * Free the holding buffer if it exists
4777 */
4778 void
4779 ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
4780 {
4781 ATH_TXBUF_UNLOCK_ASSERT(sc);
4782 ATH_TXQ_LOCK_ASSERT(txq);
4783
4784 if (txq->axq_holdingbf == NULL)
4785 return;
4786
4787 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY;
4788
4789 ATH_TXBUF_LOCK(sc);
4790 ath_returnbuf_tail(sc, txq->axq_holdingbf);
4791 ATH_TXBUF_UNLOCK(sc);
4792
4793 txq->axq_holdingbf = NULL;
4794 }
4795
4796 /*
4797 * Add this buffer to the holding queue, freeing the previous
4798 * one if it exists.
4799 */
4800 static void
4801 ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf)
4802 {
4803 struct ath_txq *txq;
4804
4805 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
4806
4807 ATH_TXBUF_UNLOCK_ASSERT(sc);
4808 ATH_TXQ_LOCK_ASSERT(txq);
4809
4810 /* XXX assert ATH_BUF_BUSY is set */
4811
4812 /* XXX assert the tx queue is under the max number */
4813 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) {
4814 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n",
4815 __func__,
4816 bf,
4817 bf->bf_state.bfs_tx_queue);
4818 bf->bf_flags &= ~ATH_BUF_BUSY;
4819 ath_returnbuf_tail(sc, bf);
4820 return;
4821 }
4822 ath_txq_freeholdingbuf(sc, txq);
4823 txq->axq_holdingbf = bf;
4824 }
4825
4826 /*
4827 * Return a buffer to the pool and update the 'busy' flag on the
4828 * previous 'tail' entry.
4829 *
4830 * This _must_ only be called when the buffer is involved in a completed
4831 * TX. The logic is that if it was part of an active TX, the previous
4832 * buffer on the list is now not involved in a halted TX DMA queue, waiting
4833 * for restart (eg for TDMA.)
4834 *
4835 * The caller must free the mbuf and recycle the node reference.
4836 *
4837 * XXX This method of handling busy / holding buffers is insanely stupid.
4838 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would
4839 * be much nicer if buffers in the processq() methods would instead be
4840 * always completed there (pushed onto a txq or ath_bufhead) so we knew
4841 * exactly what hardware queue they came from in the first place.
4842 */
4843 void
4844 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
4845 {
4846 struct ath_txq *txq;
4847
4848 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
4849
4850 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
4851 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
4852
4853 /*
4854 * If this buffer is busy, push it onto the holding queue.
4855 */
4856 if (bf->bf_flags & ATH_BUF_BUSY) {
4857 ATH_TXQ_LOCK(txq);
4858 ath_txq_addholdingbuf(sc, bf);
4859 ATH_TXQ_UNLOCK(txq);
4860 return;
4861 }
4862
4863 /*
4864 * Not a busy buffer, so free normally
4865 */
4866 ATH_TXBUF_LOCK(sc);
4867 ath_returnbuf_tail(sc, bf);
4868 ATH_TXBUF_UNLOCK(sc);
4869 }
4870
4871 /*
4872 * This is currently used by ath_tx_draintxq() and
4873 * ath_tx_tid_free_pkts().
4874 *
4875 * It recycles a single ath_buf.
4876 */
4877 void
4878 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
4879 {
4880 struct ieee80211_node *ni = bf->bf_node;
4881 struct mbuf *m0 = bf->bf_m;
4882
4883 /*
4884 * Make sure that we only sync/unload if there's an mbuf.
4885 * If not (eg we cloned a buffer), the unload will have already
4886 * occurred.
4887 */
4888 if (bf->bf_m != NULL) {
4889 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4890 BUS_DMASYNC_POSTWRITE);
4891 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4892 }
4893
4894 bf->bf_node = NULL;
4895 bf->bf_m = NULL;
4896
4897 /* Free the buffer, it's not needed any longer */
4898 ath_freebuf(sc, bf);
4899
4900 /* Pass the buffer back to net80211 - completing it */
4901 ieee80211_tx_complete(ni, m0, status);
4902 }
4903
4904 static struct ath_buf *
4905 ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq)
4906 {
4907 struct ath_buf *bf;
4908
4909 ATH_TXQ_LOCK_ASSERT(txq);
4910
4911 /*
4912 * Drain the FIFO queue first, then if it's
4913 * empty, move to the normal frame queue.
4914 */
4915 bf = TAILQ_FIRST(&txq->fifo.axq_q);
4916 if (bf != NULL) {
4917 /*
4918 * Is it the last buffer in this set?
4919 * Decrement the FIFO counter.
4920 */
4921 if (bf->bf_flags & ATH_BUF_FIFOEND) {
4922 if (txq->axq_fifo_depth == 0) {
4923 device_printf(sc->sc_dev,
4924 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n",
4925 __func__,
4926 txq->axq_qnum,
4927 txq->fifo.axq_depth);
4928 } else
4929 txq->axq_fifo_depth--;
4930 }
4931 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
4932 return (bf);
4933 }
4934
4935 /*
4936 * Debugging!
4937 */
4938 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) {
4939 device_printf(sc->sc_dev,
4940 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n",
4941 __func__,
4942 txq->axq_qnum,
4943 txq->axq_fifo_depth,
4944 txq->fifo.axq_depth);
4945 }
4946
4947 /*
4948 * Now drain the pending queue.
4949 */
4950 bf = TAILQ_FIRST(&txq->axq_q);
4951 if (bf == NULL) {
4952 txq->axq_link = NULL;
4953 return (NULL);
4954 }
4955 ATH_TXQ_REMOVE(txq, bf, bf_list);
4956 return (bf);
4957 }
4958
4959 void
4960 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4961 {
4962 #ifdef ATH_DEBUG
4963 struct ath_hal *ah = sc->sc_ah;
4964 #endif
4965 struct ath_buf *bf;
4966 u_int ix;
4967
4968 /*
4969 * NB: this assumes output has been stopped and
4970 * we do not need to block ath_tx_proc
4971 */
4972 for (ix = 0;; ix++) {
4973 ATH_TXQ_LOCK(txq);
4974 bf = ath_tx_draintxq_get_one(sc, txq);
4975 if (bf == NULL) {
4976 ATH_TXQ_UNLOCK(txq);
4977 break;
4978 }
4979 if (bf->bf_state.bfs_aggr)
4980 txq->axq_aggr_depth--;
4981 #ifdef ATH_DEBUG
4982 if (sc->sc_debug & ATH_DEBUG_RESET) {
4983 struct ieee80211com *ic = &sc->sc_ic;
4984 int status = 0;
4985
4986 /*
4987 * EDMA operation has a TX completion FIFO
4988 * separate from the TX descriptor, so this
4989 * method of checking the "completion" status
4990 * is wrong.
4991 */
4992 if (! sc->sc_isedma) {
4993 status = (ath_hal_txprocdesc(ah,
4994 bf->bf_lastds,
4995 &bf->bf_status.ds_txstat) == HAL_OK);
4996 }
4997 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status);
4998 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
4999 bf->bf_m->m_len, 0, -1);
5000 }
5001 #endif /* ATH_DEBUG */
5002 /*
5003 * Since we're now doing magic in the completion
5004 * functions, we -must- call it for aggregation
5005 * destinations or BAW tracking will get upset.
5006 */
5007 /*
5008 * Clear ATH_BUF_BUSY; the completion handler
5009 * will free the buffer.
5010 */
5011 ATH_TXQ_UNLOCK(txq);
5012 bf->bf_flags &= ~ATH_BUF_BUSY;
5013 if (bf->bf_comp)
5014 bf->bf_comp(sc, bf, 1);
5015 else
5016 ath_tx_default_comp(sc, bf, 1);
5017 }
5018
5019 /*
5020 * Free the holding buffer if it exists
5021 */
5022 ATH_TXQ_LOCK(txq);
5023 ath_txq_freeholdingbuf(sc, txq);
5024 ATH_TXQ_UNLOCK(txq);
5025
5026 /*
5027 * Drain software queued frames which are on
5028 * active TIDs.
5029 */
5030 ath_tx_txq_drain(sc, txq);
5031 }
5032
5033 static void
5034 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5035 {
5036 struct ath_hal *ah = sc->sc_ah;
5037
5038 ATH_TXQ_LOCK_ASSERT(txq);
5039
5040 DPRINTF(sc, ATH_DEBUG_RESET,
5041 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, "
5042 "link %p, holdingbf=%p\n",
5043 __func__,
5044 txq->axq_qnum,
5045 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5046 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)),
5047 (int) ath_hal_numtxpending(ah, txq->axq_qnum),
5048 txq->axq_flags,
5049 txq->axq_link,
5050 txq->axq_holdingbf);
5051
5052 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5053 /* We've stopped TX DMA, so mark this as stopped. */
5054 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING;
5055
5056 #ifdef ATH_DEBUG
5057 if ((sc->sc_debug & ATH_DEBUG_RESET)
5058 && (txq->axq_holdingbf != NULL)) {
5059 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0);
5060 }
5061 #endif
5062 }
5063
5064 int
5065 ath_stoptxdma(struct ath_softc *sc)
5066 {
5067 struct ath_hal *ah = sc->sc_ah;
5068 int i;
5069
5070 /* XXX return value */
5071 if (sc->sc_invalid)
5072 return 0;
5073
5074 if (!sc->sc_invalid) {
5075 /* don't touch the hardware if marked invalid */
5076 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5077 __func__, sc->sc_bhalq,
5078 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5079 NULL);
5080
5081 /* stop the beacon queue */
5082 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5083
5084 /* Stop the data queues */
5085 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5086 if (ATH_TXQ_SETUP(sc, i)) {
5087 ATH_TXQ_LOCK(&sc->sc_txq[i]);
5088 ath_tx_stopdma(sc, &sc->sc_txq[i]);
5089 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
5090 }
5091 }
5092 }
5093
5094 return 1;
5095 }
5096
5097 #ifdef ATH_DEBUG
5098 void
5099 ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq)
5100 {
5101 struct ath_hal *ah = sc->sc_ah;
5102 struct ath_buf *bf;
5103 int i = 0;
5104
5105 if (! (sc->sc_debug & ATH_DEBUG_RESET))
5106 return;
5107
5108 device_printf(sc->sc_dev, "%s: Q%d: begin\n",
5109 __func__, txq->axq_qnum);
5110 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
5111 ath_printtxbuf(sc, bf, txq->axq_qnum, i,
5112 ath_hal_txprocdesc(ah, bf->bf_lastds,
5113 &bf->bf_status.ds_txstat) == HAL_OK);
5114 i++;
5115 }
5116 device_printf(sc->sc_dev, "%s: Q%d: end\n",
5117 __func__, txq->axq_qnum);
5118 }
5119 #endif /* ATH_DEBUG */
5120
5121 /*
5122 * Drain the transmit queues and reclaim resources.
5123 */
5124 void
5125 ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
5126 {
5127 struct ath_hal *ah = sc->sc_ah;
5128 struct ath_buf *bf_last;
5129 int i;
5130
5131 (void) ath_stoptxdma(sc);
5132
5133 /*
5134 * Dump the queue contents
5135 */
5136 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5137 /*
5138 * XXX TODO: should we just handle the completed TX frames
5139 * here, whether or not the reset is a full one or not?
5140 */
5141 if (ATH_TXQ_SETUP(sc, i)) {
5142 #ifdef ATH_DEBUG
5143 if (sc->sc_debug & ATH_DEBUG_RESET)
5144 ath_tx_dump(sc, &sc->sc_txq[i]);
5145 #endif /* ATH_DEBUG */
5146 if (reset_type == ATH_RESET_NOLOSS) {
5147 ath_tx_processq(sc, &sc->sc_txq[i], 0);
5148 ATH_TXQ_LOCK(&sc->sc_txq[i]);
5149 /*
5150 * Free the holding buffer; DMA is now
5151 * stopped.
5152 */
5153 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
5154 /*
5155 * Setup the link pointer to be the
5156 * _last_ buffer/descriptor in the list.
5157 * If there's nothing in the list, set it
5158 * to NULL.
5159 */
5160 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i],
5161 axq_q_s);
5162 if (bf_last != NULL) {
5163 ath_hal_gettxdesclinkptr(ah,
5164 bf_last->bf_lastds,
5165 &sc->sc_txq[i].axq_link);
5166 } else {
5167 sc->sc_txq[i].axq_link = NULL;
5168 }
5169 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
5170 } else
5171 ath_tx_draintxq(sc, &sc->sc_txq[i]);
5172 }
5173 }
5174 #ifdef ATH_DEBUG
5175 if (sc->sc_debug & ATH_DEBUG_RESET) {
5176 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
5177 if (bf != NULL && bf->bf_m != NULL) {
5178 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5179 ath_hal_txprocdesc(ah, bf->bf_lastds,
5180 &bf->bf_status.ds_txstat) == HAL_OK);
5181 ieee80211_dump_pkt(&sc->sc_ic,
5182 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
5183 0, -1);
5184 }
5185 }
5186 #endif /* ATH_DEBUG */
5187 sc->sc_wd_timer = 0;
5188 }
5189
5190 /*
5191 * Update internal state after a channel change.
5192 */
5193 static void
5194 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5195 {
5196 enum ieee80211_phymode mode;
5197
5198 /*
5199 * Change channels and update the h/w rate map
5200 * if we're switching; e.g. 11a to 11b/g.
5201 */
5202 mode = ieee80211_chan2mode(chan);
5203 if (mode != sc->sc_curmode)
5204 ath_setcurmode(sc, mode);
5205 sc->sc_curchan = chan;
5206 }
5207
5208 /*
5209 * Set/change channels. If the channel is really being changed,
5210 * it's done by resetting the chip. To accomplish this we must
5211 * first cleanup any pending DMA, then restart stuff after a la
5212 * ath_init.
5213 */
5214 static int
5215 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5216 {
5217 struct ieee80211com *ic = &sc->sc_ic;
5218 struct ath_hal *ah = sc->sc_ah;
5219 int ret = 0;
5220
5221 /* Treat this as an interface reset */
5222 ATH_PCU_UNLOCK_ASSERT(sc);
5223 ATH_UNLOCK_ASSERT(sc);
5224
5225 /* (Try to) stop TX/RX from occurring */
5226 taskqueue_block(sc->sc_tq);
5227
5228 ATH_PCU_LOCK(sc);
5229
5230 /* Disable interrupts */
5231 ath_hal_intrset(ah, 0);
5232
5233 /* Stop new RX/TX/interrupt completion */
5234 if (ath_reset_grablock(sc, 1) == 0) {
5235 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
5236 __func__);
5237 }
5238
5239 /* Stop pending RX/TX completion */
5240 ath_txrx_stop_locked(sc);
5241
5242 ATH_PCU_UNLOCK(sc);
5243
5244 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5245 __func__, ieee80211_chan2ieee(ic, chan),
5246 chan->ic_freq, chan->ic_flags);
5247 if (chan != sc->sc_curchan) {
5248 HAL_STATUS status;
5249 /*
5250 * To switch channels clear any pending DMA operations;
5251 * wait long enough for the RX fifo to drain, reset the
5252 * hardware at the new frequency, and then re-enable
5253 * the relevant bits of the h/w.
5254 */
5255 #if 0
5256 ath_hal_intrset(ah, 0); /* disable interrupts */
5257 #endif
5258 ath_stoprecv(sc, 1); /* turn off frame recv */
5259 /*
5260 * First, handle completed TX/RX frames.
5261 */
5262 ath_rx_flush(sc);
5263 ath_draintxq(sc, ATH_RESET_NOLOSS);
5264 /*
5265 * Next, flush the non-scheduled frames.
5266 */
5267 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
5268
5269 ath_update_chainmasks(sc, chan);
5270 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
5271 sc->sc_cur_rxchainmask);
5272 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE,
5273 HAL_RESET_NORMAL, &status)) {
5274 device_printf(sc->sc_dev, "%s: unable to reset "
5275 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
5276 __func__, ieee80211_chan2ieee(ic, chan),
5277 chan->ic_freq, chan->ic_flags, status);
5278 ret = EIO;
5279 goto finish;
5280 }
5281 sc->sc_diversity = ath_hal_getdiversity(ah);
5282
5283 ATH_RX_LOCK(sc);
5284 sc->sc_rx_stopped = 1;
5285 sc->sc_rx_resetted = 1;
5286 ATH_RX_UNLOCK(sc);
5287
5288 /* Let DFS at it in case it's a DFS channel */
5289 ath_dfs_radar_enable(sc, chan);
5290
5291 /* Let spectral at in case spectral is enabled */
5292 ath_spectral_enable(sc, chan);
5293
5294 /*
5295 * Let bluetooth coexistence at in case it's needed for this
5296 * channel
5297 */
5298 ath_btcoex_enable(sc, ic->ic_curchan);
5299
5300 /*
5301 * If we're doing TDMA, enforce the TXOP limitation for chips
5302 * that support it.
5303 */
5304 if (sc->sc_hasenforcetxop && sc->sc_tdma)
5305 ath_hal_setenforcetxop(sc->sc_ah, 1);
5306 else
5307 ath_hal_setenforcetxop(sc->sc_ah, 0);
5308
5309 /*
5310 * Re-enable rx framework.
5311 */
5312 if (ath_startrecv(sc) != 0) {
5313 device_printf(sc->sc_dev,
5314 "%s: unable to restart recv logic\n", __func__);
5315 ret = EIO;
5316 goto finish;
5317 }
5318
5319 /*
5320 * Change channels and update the h/w rate map
5321 * if we're switching; e.g. 11a to 11b/g.
5322 */
5323 ath_chan_change(sc, chan);
5324
5325 /*
5326 * Reset clears the beacon timers; reset them
5327 * here if needed.
5328 */
5329 if (sc->sc_beacons) { /* restart beacons */
5330 #ifdef IEEE80211_SUPPORT_TDMA
5331 if (sc->sc_tdma)
5332 ath_tdma_config(sc, NULL);
5333 else
5334 #endif
5335 ath_beacon_config(sc, NULL);
5336 }
5337
5338 /*
5339 * Re-enable interrupts.
5340 */
5341 #if 0
5342 ath_hal_intrset(ah, sc->sc_imask);
5343 #endif
5344 }
5345
5346 finish:
5347 ATH_PCU_LOCK(sc);
5348 sc->sc_inreset_cnt--;
5349 /* XXX only do this if sc_inreset_cnt == 0? */
5350 ath_hal_intrset(ah, sc->sc_imask);
5351 ATH_PCU_UNLOCK(sc);
5352
5353 ath_txrx_start(sc);
5354 /* XXX ath_start? */
5355
5356 return ret;
5357 }
5358
5359 /*
5360 * Periodically recalibrate the PHY to account
5361 * for temperature/environment changes.
5362 */
5363 static void
5364 ath_calibrate(void *arg)
5365 {
5366 struct ath_softc *sc = arg;
5367 struct ath_hal *ah = sc->sc_ah;
5368 struct ieee80211com *ic = &sc->sc_ic;
5369 HAL_BOOL longCal, isCalDone = AH_TRUE;
5370 HAL_BOOL aniCal, shortCal = AH_FALSE;
5371 int nextcal;
5372
5373 ATH_LOCK_ASSERT(sc);
5374
5375 /*
5376 * Force the hardware awake for ANI work.
5377 */
5378 ath_power_set_power_state(sc, HAL_PM_AWAKE);
5379
5380 /* Skip trying to do this if we're in reset */
5381 if (sc->sc_inreset_cnt)
5382 goto restart;
5383
5384 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5385 goto restart;
5386 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5387 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
5388 if (sc->sc_doresetcal)
5389 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
5390
5391 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
5392 if (aniCal) {
5393 sc->sc_stats.ast_ani_cal++;
5394 sc->sc_lastani = ticks;
5395 ath_hal_ani_poll(ah, sc->sc_curchan);
5396 }
5397
5398 if (longCal) {
5399 sc->sc_stats.ast_per_cal++;
5400 sc->sc_lastlongcal = ticks;
5401 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5402 /*
5403 * Rfgain is out of bounds, reset the chip
5404 * to load new gain values.
5405 */
5406 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5407 "%s: rfgain change\n", __func__);
5408 sc->sc_stats.ast_per_rfgain++;
5409 sc->sc_resetcal = 0;
5410 sc->sc_doresetcal = AH_TRUE;
5411 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5412 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5413 ath_power_restore_power_state(sc);
5414 return;
5415 }
5416 /*
5417 * If this long cal is after an idle period, then
5418 * reset the data collection state so we start fresh.
5419 */
5420 if (sc->sc_resetcal) {
5421 (void) ath_hal_calreset(ah, sc->sc_curchan);
5422 sc->sc_lastcalreset = ticks;
5423 sc->sc_lastshortcal = ticks;
5424 sc->sc_resetcal = 0;
5425 sc->sc_doresetcal = AH_TRUE;
5426 }
5427 }
5428
5429 /* Only call if we're doing a short/long cal, not for ANI calibration */
5430 if (shortCal || longCal) {
5431 isCalDone = AH_FALSE;
5432 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5433 if (longCal) {
5434 /*
5435 * Calibrate noise floor data again in case of change.
5436 */
5437 ath_hal_process_noisefloor(ah);
5438 }
5439 } else {
5440 DPRINTF(sc, ATH_DEBUG_ANY,
5441 "%s: calibration of channel %u failed\n",
5442 __func__, sc->sc_curchan->ic_freq);
5443 sc->sc_stats.ast_per_calfail++;
5444 }
5445 if (shortCal)
5446 sc->sc_lastshortcal = ticks;
5447 }
5448 if (!isCalDone) {
5449 restart:
5450 /*
5451 * Use a shorter interval to potentially collect multiple
5452 * data samples required to complete calibration. Once
5453 * we're told the work is done we drop back to a longer
5454 * interval between requests. We're more aggressive doing
5455 * work when operating as an AP to improve operation right
5456 * after startup.
5457 */
5458 sc->sc_lastshortcal = ticks;
5459 nextcal = ath_shortcalinterval*hz/1000;
5460 if (sc->sc_opmode != HAL_M_HOSTAP)
5461 nextcal *= 10;
5462 sc->sc_doresetcal = AH_TRUE;
5463 } else {
5464 /* nextcal should be the shortest time for next event */
5465 nextcal = ath_longcalinterval*hz;
5466 if (sc->sc_lastcalreset == 0)
5467 sc->sc_lastcalreset = sc->sc_lastlongcal;
5468 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5469 sc->sc_resetcal = 1; /* setup reset next trip */
5470 sc->sc_doresetcal = AH_FALSE;
5471 }
5472 /* ANI calibration may occur more often than short/long/resetcal */
5473 if (ath_anicalinterval > 0)
5474 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
5475
5476 if (nextcal != 0) {
5477 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5478 __func__, nextcal, isCalDone ? "" : "!");
5479 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5480 } else {
5481 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5482 __func__);
5483 /* NB: don't rearm timer */
5484 }
5485 /*
5486 * Restore power state now that we're done.
5487 */
5488 ath_power_restore_power_state(sc);
5489 }
5490
5491 static void
5492 ath_scan_start(struct ieee80211com *ic)
5493 {
5494 struct ath_softc *sc = ic->ic_softc;
5495 struct ath_hal *ah = sc->sc_ah;
5496 u_int32_t rfilt;
5497
5498 /* XXX calibration timer? */
5499 /* XXXGL: is constant ieee80211broadcastaddr a correct choice? */
5500
5501 ATH_LOCK(sc);
5502 sc->sc_scanning = 1;
5503 sc->sc_syncbeacon = 0;
5504 rfilt = ath_calcrxfilter(sc);
5505 ATH_UNLOCK(sc);
5506
5507 ATH_PCU_LOCK(sc);
5508 ath_hal_setrxfilter(ah, rfilt);
5509 ath_hal_setassocid(ah, ieee80211broadcastaddr, 0);
5510 ATH_PCU_UNLOCK(sc);
5511
5512 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5513 __func__, rfilt, ether_sprintf(ieee80211broadcastaddr));
5514 }
5515
5516 static void
5517 ath_scan_end(struct ieee80211com *ic)
5518 {
5519 struct ath_softc *sc = ic->ic_softc;
5520 struct ath_hal *ah = sc->sc_ah;
5521 u_int32_t rfilt;
5522
5523 ATH_LOCK(sc);
5524 sc->sc_scanning = 0;
5525 rfilt = ath_calcrxfilter(sc);
5526 ATH_UNLOCK(sc);
5527
5528 ATH_PCU_LOCK(sc);
5529 ath_hal_setrxfilter(ah, rfilt);
5530 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5531
5532 ath_hal_process_noisefloor(ah);
5533 ATH_PCU_UNLOCK(sc);
5534
5535 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5536 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5537 sc->sc_curaid);
5538 }
5539
5540 #ifdef ATH_ENABLE_11N
5541 /*
5542 * For now, just do a channel change.
5543 *
5544 * Later, we'll go through the hard slog of suspending tx/rx, changing rate
5545 * control state and resetting the hardware without dropping frames out
5546 * of the queue.
5547 *
5548 * The unfortunate trouble here is making absolutely sure that the
5549 * channel width change has propagated enough so the hardware
5550 * absolutely isn't handed bogus frames for it's current operating
5551 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
5552 * does occur in parallel, we need to make certain we've blocked
5553 * any further ongoing TX (and RX, that can cause raw TX)
5554 * before we do this.
5555 */
5556 static void
5557 ath_update_chw(struct ieee80211com *ic)
5558 {
5559 struct ath_softc *sc = ic->ic_softc;
5560
5561 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
5562 ath_set_channel(ic);
5563 }
5564 #endif /* ATH_ENABLE_11N */
5565
5566 static void
5567 ath_set_channel(struct ieee80211com *ic)
5568 {
5569 struct ath_softc *sc = ic->ic_softc;
5570
5571 ATH_LOCK(sc);
5572 ath_power_set_power_state(sc, HAL_PM_AWAKE);
5573 ATH_UNLOCK(sc);
5574
5575 (void) ath_chan_set(sc, ic->ic_curchan);
5576 /*
5577 * If we are returning to our bss channel then mark state
5578 * so the next recv'd beacon's tsf will be used to sync the
5579 * beacon timers. Note that since we only hear beacons in
5580 * sta/ibss mode this has no effect in other operating modes.
5581 */
5582 ATH_LOCK(sc);
5583 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5584 sc->sc_syncbeacon = 1;
5585 ath_power_restore_power_state(sc);
5586 ATH_UNLOCK(sc);
5587 }
5588
5589 /*
5590 * Walk the vap list and check if there any vap's in RUN state.
5591 */
5592 static int
5593 ath_isanyrunningvaps(struct ieee80211vap *this)
5594 {
5595 struct ieee80211com *ic = this->iv_ic;
5596 struct ieee80211vap *vap;
5597
5598 IEEE80211_LOCK_ASSERT(ic);
5599
5600 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5601 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
5602 return 1;
5603 }
5604 return 0;
5605 }
5606
5607 static int
5608 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5609 {
5610 struct ieee80211com *ic = vap->iv_ic;
5611 struct ath_softc *sc = ic->ic_softc;
5612 struct ath_vap *avp = ATH_VAP(vap);
5613 struct ath_hal *ah = sc->sc_ah;
5614 struct ieee80211_node *ni = NULL;
5615 int i, error, stamode;
5616 u_int32_t rfilt;
5617 int csa_run_transition = 0;
5618 enum ieee80211_state ostate = vap->iv_state;
5619
5620 static const HAL_LED_STATE leds[] = {
5621 HAL_LED_INIT, /* IEEE80211_S_INIT */
5622 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5623 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5624 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5625 HAL_LED_RUN, /* IEEE80211_S_CAC */
5626 HAL_LED_RUN, /* IEEE80211_S_RUN */
5627 HAL_LED_RUN, /* IEEE80211_S_CSA */
5628 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5629 };
5630
5631 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5632 ieee80211_state_name[ostate],
5633 ieee80211_state_name[nstate]);
5634
5635 /*
5636 * net80211 _should_ have the comlock asserted at this point.
5637 * There are some comments around the calls to vap->iv_newstate
5638 * which indicate that it (newstate) may end up dropping the
5639 * lock. This and the subsequent lock assert check after newstate
5640 * are an attempt to catch these and figure out how/why.
5641 */
5642 IEEE80211_LOCK_ASSERT(ic);
5643
5644 /* Before we touch the hardware - wake it up */
5645 ATH_LOCK(sc);
5646 /*
5647 * If the NIC is in anything other than SLEEP state,
5648 * we need to ensure that self-generated frames are
5649 * set for PWRMGT=0. Otherwise we may end up with
5650 * strange situations.
5651 *
5652 * XXX TODO: is this actually the case? :-)
5653 */
5654 if (nstate != IEEE80211_S_SLEEP)
5655 ath_power_setselfgen(sc, HAL_PM_AWAKE);
5656
5657 /*
5658 * Now, wake the thing up.
5659 */
5660 ath_power_set_power_state(sc, HAL_PM_AWAKE);
5661
5662 /*
5663 * And stop the calibration callout whilst we have
5664 * ATH_LOCK held.
5665 */
5666 #if defined(__DragonFly__)
5667 callout_cancel(&sc->sc_cal_ch);
5668 #else
5669 callout_stop(&sc->sc_cal_ch);
5670 #endif
5671 ATH_UNLOCK(sc);
5672
5673 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
5674 csa_run_transition = 1;
5675
5676 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5677
5678 if (nstate == IEEE80211_S_SCAN) {
5679 /*
5680 * Scanning: turn off beacon miss and don't beacon.
5681 * Mark beacon state so when we reach RUN state we'll
5682 * [re]setup beacons. Unblock the task q thread so
5683 * deferred interrupt processing is done.
5684 */
5685
5686 /* Ensure we stay awake during scan */
5687 ATH_LOCK(sc);
5688 ath_power_setselfgen(sc, HAL_PM_AWAKE);
5689 ath_power_setpower(sc, HAL_PM_AWAKE);
5690 ATH_UNLOCK(sc);
5691
5692 ath_hal_intrset(ah,
5693 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5694 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5695 sc->sc_beacons = 0;
5696 taskqueue_unblock(sc->sc_tq);
5697 }
5698
5699 ni = ieee80211_ref_node(vap->iv_bss);
5700 rfilt = ath_calcrxfilter(sc);
5701 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5702 vap->iv_opmode == IEEE80211_M_AHDEMO ||
5703 vap->iv_opmode == IEEE80211_M_IBSS);
5704
5705 /*
5706 * XXX Dont need to do this (and others) if we've transitioned
5707 * from SLEEP->RUN.
5708 */
5709 if (stamode && nstate == IEEE80211_S_RUN) {
5710 sc->sc_curaid = ni->ni_associd;
5711 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5712 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5713 }
5714 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5715 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5716 ath_hal_setrxfilter(ah, rfilt);
5717
5718 /* XXX is this to restore keycache on resume? */
5719 if (vap->iv_opmode != IEEE80211_M_STA &&
5720 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5721 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5722 if (ath_hal_keyisvalid(ah, i))
5723 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5724 }
5725
5726 /*
5727 * Invoke the parent method to do net80211 work.
5728 */
5729 error = avp->av_newstate(vap, nstate, arg);
5730 if (error != 0)
5731 goto bad;
5732
5733 /*
5734 * See above: ensure av_newstate() doesn't drop the lock
5735 * on us.
5736 */
5737 IEEE80211_LOCK_ASSERT(ic);
5738
5739 if (nstate == IEEE80211_S_RUN) {
5740 /* NB: collect bss node again, it may have changed */
5741 ieee80211_free_node(ni);
5742 ni = ieee80211_ref_node(vap->iv_bss);
5743
5744 DPRINTF(sc, ATH_DEBUG_STATE,
5745 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5746 "capinfo 0x%04x chan %d\n", __func__,
5747 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5748 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5749
5750 switch (vap->iv_opmode) {
5751 #ifdef IEEE80211_SUPPORT_TDMA
5752 case IEEE80211_M_AHDEMO:
5753 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5754 break;
5755 /* fall thru... */
5756 #endif
5757 case IEEE80211_M_HOSTAP:
5758 case IEEE80211_M_IBSS:
5759 case IEEE80211_M_MBSS:
5760 /*
5761 * Allocate and setup the beacon frame.
5762 *
5763 * Stop any previous beacon DMA. This may be
5764 * necessary, for example, when an ibss merge
5765 * causes reconfiguration; there will be a state
5766 * transition from RUN->RUN that means we may
5767 * be called with beacon transmission active.
5768 */
5769 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5770
5771 error = ath_beacon_alloc(sc, ni);
5772 if (error != 0)
5773 goto bad;
5774 /*
5775 * If joining an adhoc network defer beacon timer
5776 * configuration to the next beacon frame so we
5777 * have a current TSF to use. Otherwise we're
5778 * starting an ibss/bss so there's no need to delay;
5779 * if this is the first vap moving to RUN state, then
5780 * beacon state needs to be [re]configured.
5781 */
5782 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5783 ni->ni_tstamp.tsf != 0) {
5784 sc->sc_syncbeacon = 1;
5785 } else if (!sc->sc_beacons) {
5786 #ifdef IEEE80211_SUPPORT_TDMA
5787 if (vap->iv_caps & IEEE80211_C_TDMA)
5788 ath_tdma_config(sc, vap);
5789 else
5790 #endif
5791 ath_beacon_config(sc, vap);
5792 sc->sc_beacons = 1;
5793 }
5794 break;
5795 case IEEE80211_M_STA:
5796 /*
5797 * Defer beacon timer configuration to the next
5798 * beacon frame so we have a current TSF to use
5799 * (any TSF collected when scanning is likely old).
5800 * However if it's due to a CSA -> RUN transition,
5801 * force a beacon update so we pick up a lack of
5802 * beacons from an AP in CAC and thus force a
5803 * scan.
5804 *
5805 * And, there's also corner cases here where
5806 * after a scan, the AP may have disappeared.
5807 * In that case, we may not receive an actual
5808 * beacon to update the beacon timer and thus we
5809 * won't get notified of the missing beacons.
5810 */
5811 if (ostate != IEEE80211_S_RUN &&
5812 ostate != IEEE80211_S_SLEEP) {
5813 DPRINTF(sc, ATH_DEBUG_BEACON,
5814 "%s: STA; syncbeacon=1\n", __func__);
5815 sc->sc_syncbeacon = 1;
5816
5817 if (csa_run_transition)
5818 ath_beacon_config(sc, vap);
5819
5820 /*
5821 * PR: kern/175227
5822 *
5823 * Reconfigure beacons during reset; as otherwise
5824 * we won't get the beacon timers reprogrammed
5825 * after a reset and thus we won't pick up a
5826 * beacon miss interrupt.
5827 *
5828 * Hopefully we'll see a beacon before the BMISS
5829 * timer fires (too often), leading to a STA
5830 * disassociation.
5831 */
5832 sc->sc_beacons = 1;
5833 }
5834 break;
5835 case IEEE80211_M_MONITOR:
5836 /*
5837 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5838 * transitions so we must re-enable interrupts here to
5839 * handle the case of a single monitor mode vap.
5840 */
5841 ath_hal_intrset(ah, sc->sc_imask);
5842 break;
5843 case IEEE80211_M_WDS:
5844 break;
5845 default:
5846 break;
5847 }
5848 /*
5849 * Let the hal process statistics collected during a
5850 * scan so it can provide calibrated noise floor data.
5851 */
5852 ath_hal_process_noisefloor(ah);
5853 /*
5854 * Reset rssi stats; maybe not the best place...
5855 */
5856 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5857 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5858 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5859
5860 /*
5861 * Force awake for RUN mode.
5862 */
5863 ATH_LOCK(sc);
5864 ath_power_setselfgen(sc, HAL_PM_AWAKE);
5865 ath_power_setpower(sc, HAL_PM_AWAKE);
5866
5867 /*
5868 * Finally, start any timers and the task q thread
5869 * (in case we didn't go through SCAN state).
5870 */
5871 if (ath_longcalinterval != 0) {
5872 /* start periodic recalibration timer */
5873 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5874 } else {
5875 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5876 "%s: calibration disabled\n", __func__);
5877 }
5878 ATH_UNLOCK(sc);
5879
5880 taskqueue_unblock(sc->sc_tq);
5881 } else if (nstate == IEEE80211_S_INIT) {
5882 /*
5883 * If there are no vaps left in RUN state then
5884 * shutdown host/driver operation:
5885 * o disable interrupts
5886 * o disable the task queue thread
5887 * o mark beacon processing as stopped
5888 */
5889 if (!ath_isanyrunningvaps(vap)) {
5890 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5891 /* disable interrupts */
5892 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5893 taskqueue_block(sc->sc_tq);
5894 sc->sc_beacons = 0;
5895 }
5896 #ifdef IEEE80211_SUPPORT_TDMA
5897 ath_hal_setcca(ah, AH_TRUE);
5898 #endif
5899 } else if (nstate == IEEE80211_S_SLEEP) {
5900 /* We're going to sleep, so transition appropriately */
5901 /* For now, only do this if we're a single STA vap */
5902 if (sc->sc_nvaps == 1 &&
5903 vap->iv_opmode == IEEE80211_M_STA) {
5904 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon);
5905 ATH_LOCK(sc);
5906 /*
5907 * Always at least set the self-generated
5908 * frame config to set PWRMGT=1.
5909 */
5910 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP);
5911
5912 /*
5913 * If we're not syncing beacons, transition
5914 * to NETWORK_SLEEP.
5915 *
5916 * We stay awake if syncbeacon > 0 in case
5917 * we need to listen for some beacons otherwise
5918 * our beacon timer config may be wrong.
5919 */
5920 if (sc->sc_syncbeacon == 0) {
5921 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP);
5922 }
5923 ATH_UNLOCK(sc);
5924 }
5925 }
5926 bad:
5927 ieee80211_free_node(ni);
5928
5929 /*
5930 * Restore the power state - either to what it was, or
5931 * to network_sleep if it's alright.
5932 */
5933 ATH_LOCK(sc);
5934 ath_power_restore_power_state(sc);
5935 ATH_UNLOCK(sc);
5936 return error;
5937 }
5938
5939 /*
5940 * Allocate a key cache slot to the station so we can
5941 * setup a mapping from key index to node. The key cache
5942 * slot is needed for managing antenna state and for
5943 * compression when stations do not use crypto. We do
5944 * it uniliaterally here; if crypto is employed this slot
5945 * will be reassigned.
5946 */
5947 static void
5948 ath_setup_stationkey(struct ieee80211_node *ni)
5949 {
5950 struct ieee80211vap *vap = ni->ni_vap;
5951 struct ath_softc *sc = vap->iv_ic->ic_softc;
5952 ieee80211_keyix keyix, rxkeyix;
5953
5954 /* XXX should take a locked ref to vap->iv_bss */
5955 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5956 /*
5957 * Key cache is full; we'll fall back to doing
5958 * the more expensive lookup in software. Note
5959 * this also means no h/w compression.
5960 */
5961 /* XXX msg+statistic */
5962 } else {
5963 /* XXX locking? */
5964 ni->ni_ucastkey.wk_keyix = keyix;
5965 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5966 /* NB: must mark device key to get called back on delete */
5967 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
5968 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
5969 /* NB: this will create a pass-thru key entry */
5970 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
5971 }
5972 }
5973
5974 /*
5975 * Setup driver-specific state for a newly associated node.
5976 * Note that we're called also on a re-associate, the isnew
5977 * param tells us if this is the first time or not.
5978 */
5979 static void
5980 ath_newassoc(struct ieee80211_node *ni, int isnew)
5981 {
5982 struct ath_node *an = ATH_NODE(ni);
5983 struct ieee80211vap *vap = ni->ni_vap;
5984 struct ath_softc *sc = vap->iv_ic->ic_softc;
5985 const struct ieee80211_txparam *tp = ni->ni_txparms;
5986
5987 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
5988 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
5989
5990 #if defined(__DragonFly__)
5991 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: reassoc; isnew=%d, is_powersave=%d\n",
5992 __func__,
5993 ath_hal_ether_sprintf(ni->ni_macaddr),
5994 isnew,
5995 an->an_is_powersave);
5996 #else
5997 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n",
5998 __func__,
5999 ni->ni_macaddr,
6000 ":",
6001 isnew,
6002 an->an_is_powersave);
6003 #endif
6004
6005 ATH_NODE_LOCK(an);
6006 ath_rate_newassoc(sc, an, isnew);
6007 ATH_NODE_UNLOCK(an);
6008
6009 if (isnew &&
6010 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
6011 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
6012 ath_setup_stationkey(ni);
6013
6014 /*
6015 * If we're reassociating, make sure that any paused queues
6016 * get unpaused.
6017 *
6018 * Now, we may have frames in the hardware queue for this node.
6019 * So if we are reassociating and there are frames in the queue,
6020 * we need to go through the cleanup path to ensure that they're
6021 * marked as non-aggregate.
6022 */
6023 if (! isnew) {
6024 #if defined(__DragonFly__)
6025 DPRINTF(sc, ATH_DEBUG_NODE,
6026 "%s: %s: reassoc; is_powersave=%d\n",
6027 __func__,
6028 ath_hal_ether_sprintf(ni->ni_macaddr),
6029 an->an_is_powersave);
6030 #else
6031 DPRINTF(sc, ATH_DEBUG_NODE,
6032 "%s: %6D: reassoc; is_powersave=%d\n",
6033 __func__,
6034 ni->ni_macaddr,
6035 ":",
6036 an->an_is_powersave);
6037 #endif
6038
6039 /* XXX for now, we can't hold the lock across assoc */
6040 ath_tx_node_reassoc(sc, an);
6041
6042 /* XXX for now, we can't hold the lock across wakeup */
6043 if (an->an_is_powersave)
6044 ath_tx_node_wakeup(sc, an);
6045 }
6046 }
6047
6048 static int
6049 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
6050 int nchans, struct ieee80211_channel chans[])
6051 {
6052 struct ath_softc *sc = ic->ic_softc;
6053 struct ath_hal *ah = sc->sc_ah;
6054 HAL_STATUS status;
6055
6056 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6057 "%s: rd %u cc %u location %c%s\n",
6058 __func__, reg->regdomain, reg->country, reg->location,
6059 reg->ecm ? " ecm" : "");
6060
6061 status = ath_hal_set_channels(ah, chans, nchans,
6062 reg->country, reg->regdomain);
6063 if (status != HAL_OK) {
6064 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
6065 __func__, status);
6066 return EINVAL; /* XXX */
6067 }
6068
6069 return 0;
6070 }
6071
6072 static void
6073 ath_getradiocaps(struct ieee80211com *ic,
6074 int maxchans, int *nchans, struct ieee80211_channel chans[])
6075 {
6076 struct ath_softc *sc = ic->ic_softc;
6077 struct ath_hal *ah = sc->sc_ah;
6078
6079 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
6080 __func__, SKU_DEBUG, CTRY_DEFAULT);
6081
6082 /* XXX check return */
6083 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
6084 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
6085
6086 }
6087
6088 static int
6089 ath_getchannels(struct ath_softc *sc)
6090 {
6091 struct ieee80211com *ic = &sc->sc_ic;
6092 struct ath_hal *ah = sc->sc_ah;
6093 HAL_STATUS status;
6094
6095 /*
6096 * Collect channel set based on EEPROM contents.
6097 */
6098 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
6099 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
6100 if (status != HAL_OK) {
6101 device_printf(sc->sc_dev,
6102 "%s: unable to collect channel list from hal, status %d\n",
6103 __func__, status);
6104 return EINVAL;
6105 }
6106 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
6107 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
6108 /* XXX map Atheros sku's to net80211 SKU's */
6109 /* XXX net80211 types too small */
6110 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
6111 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
6112 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
6113 ic->ic_regdomain.isocc[1] = ' ';
6114
6115 ic->ic_regdomain.ecm = 1;
6116 ic->ic_regdomain.location = 'I';
6117
6118 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6119 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
6120 __func__, sc->sc_eerd, sc->sc_eecc,
6121 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
6122 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
6123 return 0;
6124 }
6125
6126 static int
6127 ath_rate_setup(struct ath_softc *sc, u_int mode)
6128 {
6129 struct ath_hal *ah = sc->sc_ah;
6130 const HAL_RATE_TABLE *rt;
6131
6132 switch (mode) {
6133 case IEEE80211_MODE_11A:
6134 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
6135 break;
6136 case IEEE80211_MODE_HALF:
6137 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
6138 break;
6139 case IEEE80211_MODE_QUARTER:
6140 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
6141 break;
6142 case IEEE80211_MODE_11B:
6143 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
6144 break;
6145 case IEEE80211_MODE_11G:
6146 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
6147 break;
6148 case IEEE80211_MODE_TURBO_A:
6149 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
6150 break;
6151 case IEEE80211_MODE_TURBO_G:
6152 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
6153 break;
6154 case IEEE80211_MODE_STURBO_A:
6155 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6156 break;
6157 case IEEE80211_MODE_11NA:
6158 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
6159 break;
6160 case IEEE80211_MODE_11NG:
6161 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
6162 break;
6163 default:
6164 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
6165 __func__, mode);
6166 return 0;
6167 }
6168 sc->sc_rates[mode] = rt;
6169 return (rt != NULL);
6170 }
6171
6172 static void
6173 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6174 {
6175 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
6176 static const struct {
6177 u_int rate; /* tx/rx 802.11 rate */
6178 u_int16_t timeOn; /* LED on time (ms) */
6179 u_int16_t timeOff; /* LED off time (ms) */
6180 } blinkrates[] = {
6181 { 108, 40, 10 },
6182 { 96, 44, 11 },
6183 { 72, 50, 13 },
6184 { 48, 57, 14 },
6185 { 36, 67, 16 },
6186 { 24, 80, 20 },
6187 { 22, 100, 25 },
6188 { 18, 133, 34 },
6189 { 12, 160, 40 },
6190 { 10, 200, 50 },
6191 { 6, 240, 58 },
6192 { 4, 267, 66 },
6193 { 2, 400, 100 },
6194 { 0, 500, 130 },
6195 /* XXX half/quarter rates */
6196 };
6197 const HAL_RATE_TABLE *rt;
6198 int i, j;
6199
6200 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6201 rt = sc->sc_rates[mode];
6202 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6203 for (i = 0; i < rt->rateCount; i++) {
6204 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6205 if (rt->info[i].phy != IEEE80211_T_HT)
6206 sc->sc_rixmap[ieeerate] = i;
6207 else
6208 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6209 }
6210 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6211 for (i = 0; i < nitems(sc->sc_hwmap); i++) {
6212 if (i >= rt->rateCount) {
6213 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6214 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6215 continue;
6216 }
6217 sc->sc_hwmap[i].ieeerate =
6218 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6219 if (rt->info[i].phy == IEEE80211_T_HT)
6220 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6221 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6222 if (rt->info[i].shortPreamble ||
6223 rt->info[i].phy == IEEE80211_T_OFDM)
6224 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6225 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
6226 for (j = 0; j < nitems(blinkrates)-1; j++)
6227 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6228 break;
6229 /* NB: this uses the last entry if the rate isn't found */
6230 /* XXX beware of overlow */
6231 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6232 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6233 }
6234 sc->sc_currates = rt;
6235 sc->sc_curmode = mode;
6236 /*
6237 * All protection frames are transmitted at 2Mb/s for
6238 * 11g, otherwise at 1Mb/s.
6239 */
6240 if (mode == IEEE80211_MODE_11G)
6241 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
6242 else
6243 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
6244 /* NB: caller is responsible for resetting rate control state */
6245 }
6246
6247 static void
6248 ath_watchdog(void *arg)
6249 {
6250 struct ath_softc *sc = arg;
6251 struct ieee80211com *ic = &sc->sc_ic;
6252 int do_reset = 0;
6253
6254 ATH_LOCK_ASSERT(sc);
6255
6256 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6257 uint32_t hangs;
6258
6259 ath_power_set_power_state(sc, HAL_PM_AWAKE);
6260
6261 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6262 hangs != 0) {
6263 device_printf(sc->sc_dev, "%s hang detected (0x%x)\n",
6264 hangs & 0xff ? "bb" : "mac", hangs);
6265 } else
6266 device_printf(sc->sc_dev, "device timeout\n");
6267 do_reset = 1;
6268 #if defined(__DragonFly__)
6269 ++ic->ic_oerrors; /* don't care about SMP races */
6270 #else
6271 counter_u64_add(ic->ic_oerrors, 1);
6272 #endif
6273 sc->sc_stats.ast_watchdog++;
6274
6275 ath_power_restore_power_state(sc);
6276 }
6277
6278 /*
6279 * We can't hold the lock across the ath_reset() call.
6280 *
6281 * And since this routine can't hold a lock and sleep,
6282 * do the reset deferred.
6283 */
6284 if (do_reset) {
6285 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
6286 }
6287
6288 #if defined(__DragonFly__)
6289 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
6290 #else
6291 callout_schedule(&sc->sc_wd_ch, hz);
6292 #endif
6293 }
6294
6295 static void
6296 ath_parent(struct ieee80211com *ic)
6297 {
6298 struct ath_softc *sc = ic->ic_softc;
6299 int error = EDOOFUS;
6300
6301 ATH_LOCK(sc);
6302 if (ic->ic_nrunning > 0) {
6303 /*
6304 * To avoid rescanning another access point,
6305 * do not call ath_init() here. Instead,
6306 * only reflect promisc mode settings.
6307 */
6308 if (sc->sc_running) {
6309 ath_power_set_power_state(sc, HAL_PM_AWAKE);
6310 ath_mode_init(sc);
6311 ath_power_restore_power_state(sc);
6312 } else if (!sc->sc_invalid) {
6313 /*
6314 * Beware of being called during attach/detach
6315 * to reset promiscuous mode. In that case we
6316 * will still be marked UP but not RUNNING.
6317 * However trying to re-init the interface
6318 * is the wrong thing to do as we've already
6319 * torn down much of our state. There's
6320 * probably a better way to deal with this.
6321 */
6322 error = ath_init(sc);
6323 }
6324 } else {
6325 ath_stop(sc);
6326 if (!sc->sc_invalid)
6327 ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
6328 }
6329 ATH_UNLOCK(sc);
6330
6331 if (error == 0) {
6332 #ifdef ATH_TX99_DIAG
6333 if (sc->sc_tx99 != NULL)
6334 sc->sc_tx99->start(sc->sc_tx99);
6335 else
6336 #endif
6337 ieee80211_start_all(ic);
6338 }
6339 }
6340
6341 /*
6342 * Announce various information on device/driver attach.
6343 */
6344 static void
6345 ath_announce(struct ath_softc *sc)
6346 {
6347 struct ath_hal *ah = sc->sc_ah;
6348
6349 device_printf(sc->sc_dev, "%s mac %d.%d RF%s phy %d.%d\n",
6350 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
6351 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6352 device_printf(sc->sc_dev, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
6353 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
6354 if (bootverbose) {
6355 int i;
6356 for (i = 0; i <= WME_AC_VO; i++) {
6357 struct ath_txq *txq = sc->sc_ac2q[i];
6358 device_printf(sc->sc_dev,
6359 "Use hw queue %u for %s traffic\n",
6360 txq->axq_qnum, ieee80211_wme_acnames[i]);
6361 }
6362 device_printf(sc->sc_dev, "Use hw queue %u for CAB traffic\n",
6363 sc->sc_cabq->axq_qnum);
6364 device_printf(sc->sc_dev, "Use hw queue %u for beacons\n",
6365 sc->sc_bhalq);
6366 }
6367 if (ath_rxbuf != ATH_RXBUF)
6368 device_printf(sc->sc_dev, "using %u rx buffers\n", ath_rxbuf);
6369 if (ath_txbuf != ATH_TXBUF)
6370 device_printf(sc->sc_dev, "using %u tx buffers\n", ath_txbuf);
6371 if (sc->sc_mcastkey && bootverbose)
6372 device_printf(sc->sc_dev, "using multicast key search\n");
6373 }
6374
6375 static void
6376 ath_dfs_tasklet(void *p, int npending)
6377 {
6378 struct ath_softc *sc = (struct ath_softc *) p;
6379 struct ieee80211com *ic = &sc->sc_ic;
6380
6381 /*
6382 * If previous processing has found a radar event,
6383 * signal this to the net80211 layer to begin DFS
6384 * processing.
6385 */
6386 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
6387 /* DFS event found, initiate channel change */
6388 /*
6389 * XXX doesn't currently tell us whether the event
6390 * XXX was found in the primary or extension
6391 * XXX channel!
6392 */
6393 IEEE80211_LOCK(ic);
6394 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
6395 IEEE80211_UNLOCK(ic);
6396 }
6397 }
6398
6399 /*
6400 * Enable/disable power save. This must be called with
6401 * no TX driver locks currently held, so it should only
6402 * be called from the RX path (which doesn't hold any
6403 * TX driver locks.)
6404 */
6405 static void
6406 ath_node_powersave(struct ieee80211_node *ni, int enable)
6407 {
6408 #ifdef ATH_SW_PSQ
6409 struct ath_node *an = ATH_NODE(ni);
6410 struct ieee80211com *ic = ni->ni_ic;
6411 struct ath_softc *sc = ic->ic_softc;
6412 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6413
6414 /* XXX and no TXQ locks should be held here */
6415
6416 #if defined(__DragonFly__)
6417 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6s: enable=%d\n",
6418 __func__,
6419 ath_hal_ether_sprintf(ni->ni_macaddr),
6420 !! enable);
6421 #else
6422 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n",
6423 __func__,
6424 ni->ni_macaddr,
6425 ":",
6426 !! enable);
6427 #endif
6428
6429 /* Suspend or resume software queue handling */
6430 if (enable)
6431 ath_tx_node_sleep(sc, an);
6432 else
6433 ath_tx_node_wakeup(sc, an);
6434
6435 /* Update net80211 state */
6436 avp->av_node_ps(ni, enable);
6437 #else
6438 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6439
6440 /* Update net80211 state */
6441 avp->av_node_ps(ni, enable);
6442 #endif/* ATH_SW_PSQ */
6443 }
6444
6445 /*
6446 * Notification from net80211 that the powersave queue state has
6447 * changed.
6448 *
6449 * Since the software queue also may have some frames:
6450 *
6451 * + if the node software queue has frames and the TID state
6452 * is 0, we set the TIM;
6453 * + if the node and the stack are both empty, we clear the TIM bit.
6454 * + If the stack tries to set the bit, always set it.
6455 * + If the stack tries to clear the bit, only clear it if the
6456 * software queue in question is also cleared.
6457 *
6458 * TODO: this is called during node teardown; so let's ensure this
6459 * is all correctly handled and that the TIM bit is cleared.
6460 * It may be that the node flush is called _AFTER_ the net80211
6461 * stack clears the TIM.
6462 *
6463 * Here is the racy part. Since it's possible >1 concurrent,
6464 * overlapping TXes will appear complete with a TX completion in
6465 * another thread, it's possible that the concurrent TIM calls will
6466 * clash. We can't hold the node lock here because setting the
6467 * TIM grabs the net80211 comlock and this may cause a LOR.
6468 * The solution is either to totally serialise _everything_ at
6469 * this point (ie, all TX, completion and any reset/flush go into
6470 * one taskqueue) or a new "ath TIM lock" needs to be created that
6471 * just wraps the driver state change and this call to avp->av_set_tim().
6472 *
6473 * The same race exists in the net80211 power save queue handling
6474 * as well. Since multiple transmitting threads may queue frames
6475 * into the driver, as well as ps-poll and the driver transmitting
6476 * frames (and thus clearing the psq), it's quite possible that
6477 * a packet entering the PSQ and a ps-poll being handled will
6478 * race, causing the TIM to be cleared and not re-set.
6479 */
6480 static int
6481 ath_node_set_tim(struct ieee80211_node *ni, int enable)
6482 {
6483 #ifdef ATH_SW_PSQ
6484 struct ieee80211com *ic = ni->ni_ic;
6485 struct ath_softc *sc = ic->ic_softc;
6486 struct ath_node *an = ATH_NODE(ni);
6487 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6488 int changed = 0;
6489
6490 ATH_TX_LOCK(sc);
6491 an->an_stack_psq = enable;
6492
6493 /*
6494 * This will get called for all operating modes,
6495 * even if avp->av_set_tim is unset.
6496 * It's currently set for hostap/ibss modes; but
6497 * the same infrastructure is used for both STA
6498 * and AP/IBSS node power save.
6499 */
6500 if (avp->av_set_tim == NULL) {
6501 ATH_TX_UNLOCK(sc);
6502 return (0);
6503 }
6504
6505 /*
6506 * If setting the bit, always set it here.
6507 * If clearing the bit, only clear it if the
6508 * software queue is also empty.
6509 *
6510 * If the node has left power save, just clear the TIM
6511 * bit regardless of the state of the power save queue.
6512 *
6513 * XXX TODO: although atomics are used, it's quite possible
6514 * that a race will occur between this and setting/clearing
6515 * in another thread. TX completion will occur always in
6516 * one thread, however setting/clearing the TIM bit can come
6517 * from a variety of different process contexts!
6518 */
6519 if (enable && an->an_tim_set == 1) {
6520 #if defined(__DragonFly__)
6521 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6522 "%s: %s: enable=%d, tim_set=1, ignoring\n",
6523 __func__,
6524 ath_hal_ether_sprintf(ni->ni_macaddr),
6525 enable);
6526 #else
6527 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6528 "%s: %6D: enable=%d, tim_set=1, ignoring\n",
6529 __func__,
6530 ni->ni_macaddr,
6531 ":",
6532 enable);
6533 #endif
6534 ATH_TX_UNLOCK(sc);
6535 } else if (enable) {
6536 #if defined(__DragonFly__)
6537 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6538 "%s: %s: enable=%d, enabling TIM\n",
6539 __func__,
6540 ath_hal_ether_sprintf(ni->ni_macaddr),
6541 enable);
6542 #else
6543 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6544 "%s: %6D: enable=%d, enabling TIM\n",
6545 __func__,
6546 ni->ni_macaddr,
6547 ":",
6548 enable);
6549 #endif
6550 an->an_tim_set = 1;
6551 ATH_TX_UNLOCK(sc);
6552 changed = avp->av_set_tim(ni, enable);
6553 } else if (an->an_swq_depth == 0) {
6554 /* disable */
6555 #if defined(__DragonFly__)
6556 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6557 "%s: %s: enable=%d, an_swq_depth == 0, disabling\n",
6558 __func__,
6559 ath_hal_ether_sprintf(ni->ni_macaddr),
6560 enable);
6561 #else
6562 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6563 "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n",
6564 __func__,
6565 ni->ni_macaddr,
6566 ":",
6567 enable);
6568 #endif
6569 an->an_tim_set = 0;
6570 ATH_TX_UNLOCK(sc);
6571 changed = avp->av_set_tim(ni, enable);
6572 } else if (! an->an_is_powersave) {
6573 /*
6574 * disable regardless; the node isn't in powersave now
6575 */
6576 #if defined(__DragonFly__)
6577 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6578 "%s: %s: enable=%d, an_pwrsave=0, disabling\n",
6579 __func__,
6580 ath_hal_ether_sprintf(ni->ni_macaddr),
6581 enable);
6582 #else
6583 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6584 "%s: %6D: enable=%d, an_pwrsave=0, disabling\n",
6585 __func__,
6586 ni->ni_macaddr,
6587 ":",
6588 enable);
6589 #endif
6590 an->an_tim_set = 0;
6591 ATH_TX_UNLOCK(sc);
6592 changed = avp->av_set_tim(ni, enable);
6593 } else {
6594 /*
6595 * psq disable, node is currently in powersave, node
6596 * software queue isn't empty, so don't clear the TIM bit
6597 * for now.
6598 */
6599 ATH_TX_UNLOCK(sc);
6600 #if defined(__DragonFly__)
6601 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6602 "%s: %s: enable=%d, an_swq_depth > 0, ignoring\n",
6603 __func__,
6604 ath_hal_ether_sprintf(ni->ni_macaddr),
6605 enable);
6606 #else
6607 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6608 "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n",
6609 __func__,
6610 ni->ni_macaddr,
6611 ":",
6612 enable);
6613 #endif
6614 changed = 0;
6615 }
6616
6617 return (changed);
6618 #else
6619 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6620
6621 /*
6622 * Some operating modes don't set av_set_tim(), so don't
6623 * update it here.
6624 */
6625 if (avp->av_set_tim == NULL)
6626 return (0);
6627
6628 return (avp->av_set_tim(ni, enable));
6629 #endif /* ATH_SW_PSQ */
6630 }
6631
6632 /*
6633 * Set or update the TIM from the software queue.
6634 *
6635 * Check the software queue depth before attempting to do lock
6636 * anything; that avoids trying to obtain the lock. Then,
6637 * re-check afterwards to ensure nothing has changed in the
6638 * meantime.
6639 *
6640 * set: This is designed to be called from the TX path, after
6641 * a frame has been queued; to see if the swq > 0.
6642 *
6643 * clear: This is designed to be called from the buffer completion point
6644 * (right now it's ath_tx_default_comp()) where the state of
6645 * a software queue has changed.
6646 *
6647 * It makes sense to place it at buffer free / completion rather
6648 * than after each software queue operation, as there's no real
6649 * point in churning the TIM bit as the last frames in the software
6650 * queue are transmitted. If they fail and we retry them, we'd
6651 * just be setting the TIM bit again anyway.
6652 */
6653 void
6654 ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
6655 int enable)
6656 {
6657 #ifdef ATH_SW_PSQ
6658 struct ath_node *an;
6659 struct ath_vap *avp;
6660
6661 /* Don't do this for broadcast/etc frames */
6662 if (ni == NULL)
6663 return;
6664
6665 an = ATH_NODE(ni);
6666 avp = ATH_VAP(ni->ni_vap);
6667
6668 /*
6669 * And for operating modes without the TIM handler set, let's
6670 * just skip those.
6671 */
6672 if (avp->av_set_tim == NULL)
6673 return;
6674
6675 ATH_TX_LOCK_ASSERT(sc);
6676
6677 if (enable) {
6678 if (an->an_is_powersave &&
6679 an->an_tim_set == 0 &&
6680 an->an_swq_depth != 0) {
6681 #if defined(__DragonFly__)
6682 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6683 "%s: %s: swq_depth>0, tim_set=0, set!\n",
6684 __func__,
6685 ath_hal_ether_sprintf(ni->ni_macaddr));
6686 #else
6687 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6688 "%s: %6D: swq_depth>0, tim_set=0, set!\n",
6689 __func__,
6690 ni->ni_macaddr,
6691 ":");
6692 #endif
6693 an->an_tim_set = 1;
6694 (void) avp->av_set_tim(ni, 1);
6695 }
6696 } else {
6697 /*
6698 * Don't bother grabbing the lock unless the queue is empty.
6699 */
6700 if (an->an_swq_depth != 0)
6701 return;
6702
6703 if (an->an_is_powersave &&
6704 an->an_stack_psq == 0 &&
6705 an->an_tim_set == 1 &&
6706 an->an_swq_depth == 0) {
6707 #if defined(__DragonFly__)
6708 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6709 "%s: %s: swq_depth=0, tim_set=1, psq_set=0,"
6710 " clear!\n",
6711 __func__,
6712 ath_hal_ether_sprintf(ni->ni_macaddr));
6713 #else
6714 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6715 "%s: %6D: swq_depth=0, tim_set=1, psq_set=0,"
6716 " clear!\n",
6717 __func__,
6718 ni->ni_macaddr,
6719 ":");
6720 #endif
6721 an->an_tim_set = 0;
6722 (void) avp->av_set_tim(ni, 0);
6723 }
6724 }
6725 #else
6726 return;
6727 #endif /* ATH_SW_PSQ */
6728 }
6729
6730 /*
6731 * Received a ps-poll frame from net80211.
6732 *
6733 * Here we get a chance to serve out a software-queued frame ourselves
6734 * before we punt it to net80211 to transmit us one itself - either
6735 * because there's traffic in the net80211 psq, or a NULL frame to
6736 * indicate there's nothing else.
6737 */
6738 static void
6739 ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m)
6740 {
6741 #ifdef ATH_SW_PSQ
6742 struct ath_node *an;
6743 struct ath_vap *avp;
6744 struct ieee80211com *ic = ni->ni_ic;
6745 struct ath_softc *sc = ic->ic_softc;
6746 int tid;
6747
6748 /* Just paranoia */
6749 if (ni == NULL)
6750 return;
6751
6752 /*
6753 * Unassociated (temporary node) station.
6754 */
6755 if (ni->ni_associd == 0)
6756 return;
6757
6758 /*
6759 * We do have an active node, so let's begin looking into it.
6760 */
6761 an = ATH_NODE(ni);
6762 avp = ATH_VAP(ni->ni_vap);
6763
6764 /*
6765 * For now, we just call the original ps-poll method.
6766 * Once we're ready to flip this on:
6767 *
6768 * + Set leak to 1, as no matter what we're going to have
6769 * to send a frame;
6770 * + Check the software queue and if there's something in it,
6771 * schedule the highest TID thas has traffic from this node.
6772 * Then make sure we schedule the software scheduler to
6773 * run so it picks up said frame.
6774 *
6775 * That way whatever happens, we'll at least send _a_ frame
6776 * to the given node.
6777 *
6778 * Again, yes, it's crappy QoS if the node has multiple
6779 * TIDs worth of traffic - but let's get it working first
6780 * before we optimise it.
6781 *
6782 * Also yes, there's definitely latency here - we're not
6783 * direct dispatching to the hardware in this path (and
6784 * we're likely being called from the packet receive path,
6785 * so going back into TX may be a little hairy!) but again
6786 * I'd like to get this working first before optimising
6787 * turn-around time.
6788 */
6789
6790 ATH_TX_LOCK(sc);
6791
6792 /*
6793 * Legacy - we're called and the node isn't asleep.
6794 * Immediately punt.
6795 */
6796 if (! an->an_is_powersave) {
6797 #if defined(__DragonFly__)
6798 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6799 "%s: %s: not in powersave?\n",
6800 __func__,
6801 ath_hal_ether_sprintf(ni->ni_macaddr));
6802 #else
6803 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6804 "%s: %6D: not in powersave?\n",
6805 __func__,
6806 ni->ni_macaddr,
6807 ":");
6808 #endif
6809 ATH_TX_UNLOCK(sc);
6810 avp->av_recv_pspoll(ni, m);
6811 return;
6812 }
6813
6814 /*
6815 * We're in powersave.
6816 *
6817 * Leak a frame.
6818 */
6819 an->an_leak_count = 1;
6820
6821 /*
6822 * Now, if there's no frames in the node, just punt to
6823 * recv_pspoll.
6824 *
6825 * Don't bother checking if the TIM bit is set, we really
6826 * only care if there are any frames here!
6827 */
6828 if (an->an_swq_depth == 0) {
6829 ATH_TX_UNLOCK(sc);
6830 #if defined(__DragonFly__)
6831 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6832 "%s: %s: SWQ empty; punting to net80211\n",
6833 __func__,
6834 ath_hal_ether_sprintf(ni->ni_macaddr));
6835 #else
6836 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6837 "%s: %6D: SWQ empty; punting to net80211\n",
6838 __func__,
6839 ni->ni_macaddr,
6840 ":");
6841 #endif
6842 avp->av_recv_pspoll(ni, m);
6843 return;
6844 }
6845
6846 /*
6847 * Ok, let's schedule the highest TID that has traffic
6848 * and then schedule something.
6849 */
6850 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) {
6851 struct ath_tid *atid = &an->an_tid[tid];
6852 /*
6853 * No frames? Skip.
6854 */
6855 if (atid->axq_depth == 0)
6856 continue;
6857 ath_tx_tid_sched(sc, atid);
6858 /*
6859 * XXX we could do a direct call to the TXQ
6860 * scheduler code here to optimise latency
6861 * at the expense of a REALLY deep callstack.
6862 */
6863 ATH_TX_UNLOCK(sc);
6864 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
6865 #if defined(__DragonFly__)
6866 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6867 "%s: %s: leaking frame to TID %d\n",
6868 __func__,
6869 ath_hal_ether_sprintf(ni->ni_macaddr),
6870 tid);
6871 #else
6872 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6873 "%s: %6D: leaking frame to TID %d\n",
6874 __func__,
6875 ni->ni_macaddr,
6876 ":",
6877 tid);
6878 #endif
6879 return;
6880 }
6881
6882 ATH_TX_UNLOCK(sc);
6883
6884 /*
6885 * XXX nothing in the TIDs at this point? Eek.
6886 */
6887 #if defined(__DragonFly__)
6888 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6889 "%s: %s: TIDs empty, but ath_node showed traffic?!\n",
6890 __func__,
6891 ath_hal_ether_sprintf(ni->ni_macaddr));
6892 #else
6893 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6894 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n",
6895 __func__,
6896 ni->ni_macaddr,
6897 ":");
6898 #endif
6899 avp->av_recv_pspoll(ni, m);
6900 #else
6901 avp->av_recv_pspoll(ni, m);
6902 #endif /* ATH_SW_PSQ */
6903 }
6904
6905 MODULE_VERSION(if_ath, 1);
6906 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
6907 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ)
6908 MODULE_DEPEND(if_ath, alq, 1, 1, 1);
6909 #endif
6910